1 /* -*- mode: c; c-basic-offset: 8; -*- 2 * vim: noexpandtab sw=8 ts=8 sts=0: 3 * 4 * dlmlock.c 5 * 6 * underlying calls for lock creation 7 * 8 * Copyright (C) 2004 Oracle. All rights reserved. 9 * 10 * This program is free software; you can redistribute it and/or 11 * modify it under the terms of the GNU General Public 12 * License as published by the Free Software Foundation; either 13 * version 2 of the License, or (at your option) any later version. 14 * 15 * This program is distributed in the hope that it will be useful, 16 * but WITHOUT ANY WARRANTY; without even the implied warranty of 17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 18 * General Public License for more details. 19 * 20 * You should have received a copy of the GNU General Public 21 * License along with this program; if not, write to the 22 * Free Software Foundation, Inc., 59 Temple Place - Suite 330, 23 * Boston, MA 021110-1307, USA. 24 * 25 */ 26 27 28 #include <linux/module.h> 29 #include <linux/fs.h> 30 #include <linux/types.h> 31 #include <linux/slab.h> 32 #include <linux/highmem.h> 33 #include <linux/init.h> 34 #include <linux/sysctl.h> 35 #include <linux/random.h> 36 #include <linux/blkdev.h> 37 #include <linux/socket.h> 38 #include <linux/inet.h> 39 #include <linux/spinlock.h> 40 #include <linux/delay.h> 41 42 43 #include "cluster/heartbeat.h" 44 #include "cluster/nodemanager.h" 45 #include "cluster/tcp.h" 46 47 #include "dlmapi.h" 48 #include "dlmcommon.h" 49 50 #include "dlmconvert.h" 51 52 #define MLOG_MASK_PREFIX ML_DLM 53 #include "cluster/masklog.h" 54 55 static struct kmem_cache *dlm_lock_cache = NULL; 56 57 static DEFINE_SPINLOCK(dlm_cookie_lock); 58 static u64 dlm_next_cookie = 1; 59 60 static enum dlm_status dlm_send_remote_lock_request(struct dlm_ctxt *dlm, 61 struct dlm_lock_resource *res, 62 struct dlm_lock *lock, int flags); 63 static void dlm_init_lock(struct dlm_lock *newlock, int type, 64 u8 node, u64 cookie); 65 static void dlm_lock_release(struct kref *kref); 66 static void dlm_lock_detach_lockres(struct dlm_lock *lock); 67 68 int dlm_init_lock_cache(void) 69 { 70 dlm_lock_cache = kmem_cache_create("o2dlm_lock", 71 sizeof(struct dlm_lock), 72 0, SLAB_HWCACHE_ALIGN, NULL); 73 if (dlm_lock_cache == NULL) 74 return -ENOMEM; 75 return 0; 76 } 77 78 void dlm_destroy_lock_cache(void) 79 { 80 if (dlm_lock_cache) 81 kmem_cache_destroy(dlm_lock_cache); 82 } 83 84 /* Tell us whether we can grant a new lock request. 85 * locking: 86 * caller needs: res->spinlock 87 * taken: none 88 * held on exit: none 89 * returns: 1 if the lock can be granted, 0 otherwise. 90 */ 91 static int dlm_can_grant_new_lock(struct dlm_lock_resource *res, 92 struct dlm_lock *lock) 93 { 94 struct list_head *iter; 95 struct dlm_lock *tmplock; 96 97 list_for_each(iter, &res->granted) { 98 tmplock = list_entry(iter, struct dlm_lock, list); 99 100 if (!dlm_lock_compatible(tmplock->ml.type, lock->ml.type)) 101 return 0; 102 } 103 104 list_for_each(iter, &res->converting) { 105 tmplock = list_entry(iter, struct dlm_lock, list); 106 107 if (!dlm_lock_compatible(tmplock->ml.type, lock->ml.type)) 108 return 0; 109 } 110 111 return 1; 112 } 113 114 /* performs lock creation at the lockres master site 115 * locking: 116 * caller needs: none 117 * taken: takes and drops res->spinlock 118 * held on exit: none 119 * returns: DLM_NORMAL, DLM_NOTQUEUED 120 */ 121 static enum dlm_status dlmlock_master(struct dlm_ctxt *dlm, 122 struct dlm_lock_resource *res, 123 struct dlm_lock *lock, int flags) 124 { 125 int call_ast = 0, kick_thread = 0; 126 enum dlm_status status = DLM_NORMAL; 127 128 mlog_entry("type=%d\n", lock->ml.type); 129 130 spin_lock(&res->spinlock); 131 /* if called from dlm_create_lock_handler, need to 132 * ensure it will not sleep in dlm_wait_on_lockres */ 133 status = __dlm_lockres_state_to_status(res); 134 if (status != DLM_NORMAL && 135 lock->ml.node != dlm->node_num) { 136 /* erf. state changed after lock was dropped. */ 137 spin_unlock(&res->spinlock); 138 dlm_error(status); 139 return status; 140 } 141 __dlm_wait_on_lockres(res); 142 __dlm_lockres_reserve_ast(res); 143 144 if (dlm_can_grant_new_lock(res, lock)) { 145 mlog(0, "I can grant this lock right away\n"); 146 /* got it right away */ 147 lock->lksb->status = DLM_NORMAL; 148 status = DLM_NORMAL; 149 dlm_lock_get(lock); 150 list_add_tail(&lock->list, &res->granted); 151 152 /* for the recovery lock, we can't allow the ast 153 * to be queued since the dlmthread is already 154 * frozen. but the recovery lock is always locked 155 * with LKM_NOQUEUE so we do not need the ast in 156 * this special case */ 157 if (!dlm_is_recovery_lock(res->lockname.name, 158 res->lockname.len)) { 159 kick_thread = 1; 160 call_ast = 1; 161 } else { 162 mlog(0, "%s: returning DLM_NORMAL to " 163 "node %u for reco lock\n", dlm->name, 164 lock->ml.node); 165 } 166 } else { 167 /* for NOQUEUE request, unless we get the 168 * lock right away, return DLM_NOTQUEUED */ 169 if (flags & LKM_NOQUEUE) { 170 status = DLM_NOTQUEUED; 171 if (dlm_is_recovery_lock(res->lockname.name, 172 res->lockname.len)) { 173 mlog(0, "%s: returning NOTQUEUED to " 174 "node %u for reco lock\n", dlm->name, 175 lock->ml.node); 176 } 177 } else { 178 dlm_lock_get(lock); 179 list_add_tail(&lock->list, &res->blocked); 180 kick_thread = 1; 181 } 182 } 183 /* reduce the inflight count, this may result in the lockres 184 * being purged below during calc_usage */ 185 if (lock->ml.node == dlm->node_num) 186 dlm_lockres_drop_inflight_ref(dlm, res); 187 188 spin_unlock(&res->spinlock); 189 wake_up(&res->wq); 190 191 /* either queue the ast or release it */ 192 if (call_ast) 193 dlm_queue_ast(dlm, lock); 194 else 195 dlm_lockres_release_ast(dlm, res); 196 197 dlm_lockres_calc_usage(dlm, res); 198 if (kick_thread) 199 dlm_kick_thread(dlm, res); 200 201 return status; 202 } 203 204 void dlm_revert_pending_lock(struct dlm_lock_resource *res, 205 struct dlm_lock *lock) 206 { 207 /* remove from local queue if it failed */ 208 list_del_init(&lock->list); 209 lock->lksb->flags &= ~DLM_LKSB_GET_LVB; 210 } 211 212 213 /* 214 * locking: 215 * caller needs: none 216 * taken: takes and drops res->spinlock 217 * held on exit: none 218 * returns: DLM_DENIED, DLM_RECOVERING, or net status 219 */ 220 static enum dlm_status dlmlock_remote(struct dlm_ctxt *dlm, 221 struct dlm_lock_resource *res, 222 struct dlm_lock *lock, int flags) 223 { 224 enum dlm_status status = DLM_DENIED; 225 int lockres_changed = 1; 226 227 mlog_entry("type=%d\n", lock->ml.type); 228 mlog(0, "lockres %.*s, flags = 0x%x\n", res->lockname.len, 229 res->lockname.name, flags); 230 231 spin_lock(&res->spinlock); 232 233 /* will exit this call with spinlock held */ 234 __dlm_wait_on_lockres(res); 235 res->state |= DLM_LOCK_RES_IN_PROGRESS; 236 237 /* add lock to local (secondary) queue */ 238 dlm_lock_get(lock); 239 list_add_tail(&lock->list, &res->blocked); 240 lock->lock_pending = 1; 241 spin_unlock(&res->spinlock); 242 243 /* spec seems to say that you will get DLM_NORMAL when the lock 244 * has been queued, meaning we need to wait for a reply here. */ 245 status = dlm_send_remote_lock_request(dlm, res, lock, flags); 246 247 spin_lock(&res->spinlock); 248 res->state &= ~DLM_LOCK_RES_IN_PROGRESS; 249 lock->lock_pending = 0; 250 if (status != DLM_NORMAL) { 251 if (status == DLM_RECOVERING && 252 dlm_is_recovery_lock(res->lockname.name, 253 res->lockname.len)) { 254 /* recovery lock was mastered by dead node. 255 * we need to have calc_usage shoot down this 256 * lockres and completely remaster it. */ 257 mlog(0, "%s: recovery lock was owned by " 258 "dead node %u, remaster it now.\n", 259 dlm->name, res->owner); 260 } else if (status != DLM_NOTQUEUED) { 261 /* 262 * DO NOT call calc_usage, as this would unhash 263 * the remote lockres before we ever get to use 264 * it. treat as if we never made any change to 265 * the lockres. 266 */ 267 lockres_changed = 0; 268 dlm_error(status); 269 } 270 dlm_revert_pending_lock(res, lock); 271 dlm_lock_put(lock); 272 } else if (dlm_is_recovery_lock(res->lockname.name, 273 res->lockname.len)) { 274 /* special case for the $RECOVERY lock. 275 * there will never be an AST delivered to put 276 * this lock on the proper secondary queue 277 * (granted), so do it manually. */ 278 mlog(0, "%s: $RECOVERY lock for this node (%u) is " 279 "mastered by %u; got lock, manually granting (no ast)\n", 280 dlm->name, dlm->node_num, res->owner); 281 list_move_tail(&lock->list, &res->granted); 282 } 283 spin_unlock(&res->spinlock); 284 285 if (lockres_changed) 286 dlm_lockres_calc_usage(dlm, res); 287 288 wake_up(&res->wq); 289 return status; 290 } 291 292 293 /* for remote lock creation. 294 * locking: 295 * caller needs: none, but need res->state & DLM_LOCK_RES_IN_PROGRESS 296 * taken: none 297 * held on exit: none 298 * returns: DLM_NOLOCKMGR, or net status 299 */ 300 static enum dlm_status dlm_send_remote_lock_request(struct dlm_ctxt *dlm, 301 struct dlm_lock_resource *res, 302 struct dlm_lock *lock, int flags) 303 { 304 struct dlm_create_lock create; 305 int tmpret, status = 0; 306 enum dlm_status ret; 307 308 mlog_entry_void(); 309 310 memset(&create, 0, sizeof(create)); 311 create.node_idx = dlm->node_num; 312 create.requested_type = lock->ml.type; 313 create.cookie = lock->ml.cookie; 314 create.namelen = res->lockname.len; 315 create.flags = cpu_to_be32(flags); 316 memcpy(create.name, res->lockname.name, create.namelen); 317 318 tmpret = o2net_send_message(DLM_CREATE_LOCK_MSG, dlm->key, &create, 319 sizeof(create), res->owner, &status); 320 if (tmpret >= 0) { 321 // successfully sent and received 322 ret = status; // this is already a dlm_status 323 if (ret == DLM_REJECTED) { 324 mlog(ML_ERROR, "%s:%.*s: BUG. this is a stale lockres " 325 "no longer owned by %u. that node is coming back " 326 "up currently.\n", dlm->name, create.namelen, 327 create.name, res->owner); 328 dlm_print_one_lock_resource(res); 329 BUG(); 330 } 331 } else { 332 mlog_errno(tmpret); 333 if (dlm_is_host_down(tmpret)) { 334 ret = DLM_RECOVERING; 335 mlog(0, "node %u died so returning DLM_RECOVERING " 336 "from lock message!\n", res->owner); 337 } else { 338 ret = dlm_err_to_dlm_status(tmpret); 339 } 340 } 341 342 return ret; 343 } 344 345 void dlm_lock_get(struct dlm_lock *lock) 346 { 347 kref_get(&lock->lock_refs); 348 } 349 350 void dlm_lock_put(struct dlm_lock *lock) 351 { 352 kref_put(&lock->lock_refs, dlm_lock_release); 353 } 354 355 static void dlm_lock_release(struct kref *kref) 356 { 357 struct dlm_lock *lock; 358 359 lock = container_of(kref, struct dlm_lock, lock_refs); 360 361 BUG_ON(!list_empty(&lock->list)); 362 BUG_ON(!list_empty(&lock->ast_list)); 363 BUG_ON(!list_empty(&lock->bast_list)); 364 BUG_ON(lock->ast_pending); 365 BUG_ON(lock->bast_pending); 366 367 dlm_lock_detach_lockres(lock); 368 369 if (lock->lksb_kernel_allocated) { 370 mlog(0, "freeing kernel-allocated lksb\n"); 371 kfree(lock->lksb); 372 } 373 kmem_cache_free(dlm_lock_cache, lock); 374 } 375 376 /* associate a lock with it's lockres, getting a ref on the lockres */ 377 void dlm_lock_attach_lockres(struct dlm_lock *lock, 378 struct dlm_lock_resource *res) 379 { 380 dlm_lockres_get(res); 381 lock->lockres = res; 382 } 383 384 /* drop ref on lockres, if there is still one associated with lock */ 385 static void dlm_lock_detach_lockres(struct dlm_lock *lock) 386 { 387 struct dlm_lock_resource *res; 388 389 res = lock->lockres; 390 if (res) { 391 lock->lockres = NULL; 392 mlog(0, "removing lock's lockres reference\n"); 393 dlm_lockres_put(res); 394 } 395 } 396 397 static void dlm_init_lock(struct dlm_lock *newlock, int type, 398 u8 node, u64 cookie) 399 { 400 INIT_LIST_HEAD(&newlock->list); 401 INIT_LIST_HEAD(&newlock->ast_list); 402 INIT_LIST_HEAD(&newlock->bast_list); 403 spin_lock_init(&newlock->spinlock); 404 newlock->ml.type = type; 405 newlock->ml.convert_type = LKM_IVMODE; 406 newlock->ml.highest_blocked = LKM_IVMODE; 407 newlock->ml.node = node; 408 newlock->ml.pad1 = 0; 409 newlock->ml.list = 0; 410 newlock->ml.flags = 0; 411 newlock->ast = NULL; 412 newlock->bast = NULL; 413 newlock->astdata = NULL; 414 newlock->ml.cookie = cpu_to_be64(cookie); 415 newlock->ast_pending = 0; 416 newlock->bast_pending = 0; 417 newlock->convert_pending = 0; 418 newlock->lock_pending = 0; 419 newlock->unlock_pending = 0; 420 newlock->cancel_pending = 0; 421 newlock->lksb_kernel_allocated = 0; 422 423 kref_init(&newlock->lock_refs); 424 } 425 426 struct dlm_lock * dlm_new_lock(int type, u8 node, u64 cookie, 427 struct dlm_lockstatus *lksb) 428 { 429 struct dlm_lock *lock; 430 int kernel_allocated = 0; 431 432 lock = (struct dlm_lock *) kmem_cache_zalloc(dlm_lock_cache, GFP_NOFS); 433 if (!lock) 434 return NULL; 435 436 if (!lksb) { 437 /* zero memory only if kernel-allocated */ 438 lksb = kzalloc(sizeof(*lksb), GFP_NOFS); 439 if (!lksb) { 440 kfree(lock); 441 return NULL; 442 } 443 kernel_allocated = 1; 444 } 445 446 dlm_init_lock(lock, type, node, cookie); 447 if (kernel_allocated) 448 lock->lksb_kernel_allocated = 1; 449 lock->lksb = lksb; 450 lksb->lockid = lock; 451 return lock; 452 } 453 454 /* handler for lock creation net message 455 * locking: 456 * caller needs: none 457 * taken: takes and drops res->spinlock 458 * held on exit: none 459 * returns: DLM_NORMAL, DLM_SYSERR, DLM_IVLOCKID, DLM_NOTQUEUED 460 */ 461 int dlm_create_lock_handler(struct o2net_msg *msg, u32 len, void *data, 462 void **ret_data) 463 { 464 struct dlm_ctxt *dlm = data; 465 struct dlm_create_lock *create = (struct dlm_create_lock *)msg->buf; 466 struct dlm_lock_resource *res = NULL; 467 struct dlm_lock *newlock = NULL; 468 struct dlm_lockstatus *lksb = NULL; 469 enum dlm_status status = DLM_NORMAL; 470 char *name; 471 unsigned int namelen; 472 473 BUG_ON(!dlm); 474 475 mlog_entry_void(); 476 477 if (!dlm_grab(dlm)) 478 return DLM_REJECTED; 479 480 name = create->name; 481 namelen = create->namelen; 482 status = DLM_REJECTED; 483 if (!dlm_domain_fully_joined(dlm)) { 484 mlog(ML_ERROR, "Domain %s not fully joined, but node %u is " 485 "sending a create_lock message for lock %.*s!\n", 486 dlm->name, create->node_idx, namelen, name); 487 dlm_error(status); 488 goto leave; 489 } 490 491 status = DLM_IVBUFLEN; 492 if (namelen > DLM_LOCKID_NAME_MAX) { 493 dlm_error(status); 494 goto leave; 495 } 496 497 status = DLM_SYSERR; 498 newlock = dlm_new_lock(create->requested_type, 499 create->node_idx, 500 be64_to_cpu(create->cookie), NULL); 501 if (!newlock) { 502 dlm_error(status); 503 goto leave; 504 } 505 506 lksb = newlock->lksb; 507 508 if (be32_to_cpu(create->flags) & LKM_GET_LVB) { 509 lksb->flags |= DLM_LKSB_GET_LVB; 510 mlog(0, "set DLM_LKSB_GET_LVB flag\n"); 511 } 512 513 status = DLM_IVLOCKID; 514 res = dlm_lookup_lockres(dlm, name, namelen); 515 if (!res) { 516 dlm_error(status); 517 goto leave; 518 } 519 520 spin_lock(&res->spinlock); 521 status = __dlm_lockres_state_to_status(res); 522 spin_unlock(&res->spinlock); 523 524 if (status != DLM_NORMAL) { 525 mlog(0, "lockres recovering/migrating/in-progress\n"); 526 goto leave; 527 } 528 529 dlm_lock_attach_lockres(newlock, res); 530 531 status = dlmlock_master(dlm, res, newlock, be32_to_cpu(create->flags)); 532 leave: 533 if (status != DLM_NORMAL) 534 if (newlock) 535 dlm_lock_put(newlock); 536 537 if (res) 538 dlm_lockres_put(res); 539 540 dlm_put(dlm); 541 542 return status; 543 } 544 545 546 /* fetch next node-local (u8 nodenum + u56 cookie) into u64 */ 547 static inline void dlm_get_next_cookie(u8 node_num, u64 *cookie) 548 { 549 u64 tmpnode = node_num; 550 551 /* shift single byte of node num into top 8 bits */ 552 tmpnode <<= 56; 553 554 spin_lock(&dlm_cookie_lock); 555 *cookie = (dlm_next_cookie | tmpnode); 556 if (++dlm_next_cookie & 0xff00000000000000ull) { 557 mlog(0, "This node's cookie will now wrap!\n"); 558 dlm_next_cookie = 1; 559 } 560 spin_unlock(&dlm_cookie_lock); 561 } 562 563 enum dlm_status dlmlock(struct dlm_ctxt *dlm, int mode, 564 struct dlm_lockstatus *lksb, int flags, 565 const char *name, int namelen, dlm_astlockfunc_t *ast, 566 void *data, dlm_bastlockfunc_t *bast) 567 { 568 enum dlm_status status; 569 struct dlm_lock_resource *res = NULL; 570 struct dlm_lock *lock = NULL; 571 int convert = 0, recovery = 0; 572 573 /* yes this function is a mess. 574 * TODO: clean this up. lots of common code in the 575 * lock and convert paths, especially in the retry blocks */ 576 if (!lksb) { 577 dlm_error(DLM_BADARGS); 578 return DLM_BADARGS; 579 } 580 581 status = DLM_BADPARAM; 582 if (mode != LKM_EXMODE && mode != LKM_PRMODE && mode != LKM_NLMODE) { 583 dlm_error(status); 584 goto error; 585 } 586 587 if (flags & ~LKM_VALID_FLAGS) { 588 dlm_error(status); 589 goto error; 590 } 591 592 convert = (flags & LKM_CONVERT); 593 recovery = (flags & LKM_RECOVERY); 594 595 if (recovery && 596 (!dlm_is_recovery_lock(name, namelen) || convert) ) { 597 dlm_error(status); 598 goto error; 599 } 600 if (convert && (flags & LKM_LOCAL)) { 601 mlog(ML_ERROR, "strange LOCAL convert request!\n"); 602 goto error; 603 } 604 605 if (convert) { 606 /* CONVERT request */ 607 608 /* if converting, must pass in a valid dlm_lock */ 609 lock = lksb->lockid; 610 if (!lock) { 611 mlog(ML_ERROR, "NULL lock pointer in convert " 612 "request\n"); 613 goto error; 614 } 615 616 res = lock->lockres; 617 if (!res) { 618 mlog(ML_ERROR, "NULL lockres pointer in convert " 619 "request\n"); 620 goto error; 621 } 622 dlm_lockres_get(res); 623 624 /* XXX: for ocfs2 purposes, the ast/bast/astdata/lksb are 625 * static after the original lock call. convert requests will 626 * ensure that everything is the same, or return DLM_BADARGS. 627 * this means that DLM_DENIED_NOASTS will never be returned. 628 */ 629 if (lock->lksb != lksb || lock->ast != ast || 630 lock->bast != bast || lock->astdata != data) { 631 status = DLM_BADARGS; 632 mlog(ML_ERROR, "new args: lksb=%p, ast=%p, bast=%p, " 633 "astdata=%p\n", lksb, ast, bast, data); 634 mlog(ML_ERROR, "orig args: lksb=%p, ast=%p, bast=%p, " 635 "astdata=%p\n", lock->lksb, lock->ast, 636 lock->bast, lock->astdata); 637 goto error; 638 } 639 retry_convert: 640 dlm_wait_for_recovery(dlm); 641 642 if (res->owner == dlm->node_num) 643 status = dlmconvert_master(dlm, res, lock, flags, mode); 644 else 645 status = dlmconvert_remote(dlm, res, lock, flags, mode); 646 if (status == DLM_RECOVERING || status == DLM_MIGRATING || 647 status == DLM_FORWARD) { 648 /* for now, see how this works without sleeping 649 * and just retry right away. I suspect the reco 650 * or migration will complete fast enough that 651 * no waiting will be necessary */ 652 mlog(0, "retrying convert with migration/recovery/" 653 "in-progress\n"); 654 msleep(100); 655 goto retry_convert; 656 } 657 } else { 658 u64 tmpcookie; 659 660 /* LOCK request */ 661 status = DLM_BADARGS; 662 if (!name) { 663 dlm_error(status); 664 goto error; 665 } 666 667 status = DLM_IVBUFLEN; 668 if (namelen > DLM_LOCKID_NAME_MAX || namelen < 1) { 669 dlm_error(status); 670 goto error; 671 } 672 673 dlm_get_next_cookie(dlm->node_num, &tmpcookie); 674 lock = dlm_new_lock(mode, dlm->node_num, tmpcookie, lksb); 675 if (!lock) { 676 dlm_error(status); 677 goto error; 678 } 679 680 if (!recovery) 681 dlm_wait_for_recovery(dlm); 682 683 /* find or create the lock resource */ 684 res = dlm_get_lock_resource(dlm, name, namelen, flags); 685 if (!res) { 686 status = DLM_IVLOCKID; 687 dlm_error(status); 688 goto error; 689 } 690 691 mlog(0, "type=%d, flags = 0x%x\n", mode, flags); 692 mlog(0, "creating lock: lock=%p res=%p\n", lock, res); 693 694 dlm_lock_attach_lockres(lock, res); 695 lock->ast = ast; 696 lock->bast = bast; 697 lock->astdata = data; 698 699 retry_lock: 700 if (flags & LKM_VALBLK) { 701 mlog(0, "LKM_VALBLK passed by caller\n"); 702 703 /* LVB requests for non PR, PW or EX locks are 704 * ignored. */ 705 if (mode < LKM_PRMODE) 706 flags &= ~LKM_VALBLK; 707 else { 708 flags |= LKM_GET_LVB; 709 lock->lksb->flags |= DLM_LKSB_GET_LVB; 710 } 711 } 712 713 if (res->owner == dlm->node_num) 714 status = dlmlock_master(dlm, res, lock, flags); 715 else 716 status = dlmlock_remote(dlm, res, lock, flags); 717 718 if (status == DLM_RECOVERING || status == DLM_MIGRATING || 719 status == DLM_FORWARD) { 720 mlog(0, "retrying lock with migration/" 721 "recovery/in progress\n"); 722 msleep(100); 723 /* no waiting for dlm_reco_thread */ 724 if (recovery) { 725 if (status != DLM_RECOVERING) 726 goto retry_lock; 727 728 mlog(0, "%s: got RECOVERING " 729 "for $RECOVERY lock, master " 730 "was %u\n", dlm->name, 731 res->owner); 732 /* wait to see the node go down, then 733 * drop down and allow the lockres to 734 * get cleaned up. need to remaster. */ 735 dlm_wait_for_node_death(dlm, res->owner, 736 DLM_NODE_DEATH_WAIT_MAX); 737 } else { 738 dlm_wait_for_recovery(dlm); 739 goto retry_lock; 740 } 741 } 742 743 if (status != DLM_NORMAL) { 744 lock->lksb->flags &= ~DLM_LKSB_GET_LVB; 745 if (status != DLM_NOTQUEUED) 746 dlm_error(status); 747 goto error; 748 } 749 } 750 751 error: 752 if (status != DLM_NORMAL) { 753 if (lock && !convert) 754 dlm_lock_put(lock); 755 // this is kind of unnecessary 756 lksb->status = status; 757 } 758 759 /* put lockres ref from the convert path 760 * or from dlm_get_lock_resource */ 761 if (res) 762 dlm_lockres_put(res); 763 764 return status; 765 } 766 EXPORT_SYMBOL_GPL(dlmlock); 767