1 /* -*- mode: c; c-basic-offset: 8; -*- 2 * vim: noexpandtab sw=8 ts=8 sts=0: 3 * 4 * dlmmod.c 5 * 6 * standalone DLM module 7 * 8 * Copyright (C) 2004 Oracle. All rights reserved. 9 * 10 * This program is free software; you can redistribute it and/or 11 * modify it under the terms of the GNU General Public 12 * License as published by the Free Software Foundation; either 13 * version 2 of the License, or (at your option) any later version. 14 * 15 * This program is distributed in the hope that it will be useful, 16 * but WITHOUT ANY WARRANTY; without even the implied warranty of 17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 18 * General Public License for more details. 19 * 20 * You should have received a copy of the GNU General Public 21 * License along with this program; if not, write to the 22 * Free Software Foundation, Inc., 59 Temple Place - Suite 330, 23 * Boston, MA 021110-1307, USA. 24 * 25 */ 26 27 28 #include <linux/module.h> 29 #include <linux/fs.h> 30 #include <linux/types.h> 31 #include <linux/slab.h> 32 #include <linux/highmem.h> 33 #include <linux/init.h> 34 #include <linux/sysctl.h> 35 #include <linux/random.h> 36 #include <linux/blkdev.h> 37 #include <linux/socket.h> 38 #include <linux/inet.h> 39 #include <linux/spinlock.h> 40 #include <linux/delay.h> 41 42 43 #include "cluster/heartbeat.h" 44 #include "cluster/nodemanager.h" 45 #include "cluster/tcp.h" 46 47 #include "dlmapi.h" 48 #include "dlmcommon.h" 49 #include "dlmdomain.h" 50 #include "dlmdebug.h" 51 52 #define MLOG_MASK_PREFIX (ML_DLM|ML_DLM_MASTER) 53 #include "cluster/masklog.h" 54 55 static void dlm_mle_node_down(struct dlm_ctxt *dlm, 56 struct dlm_master_list_entry *mle, 57 struct o2nm_node *node, 58 int idx); 59 static void dlm_mle_node_up(struct dlm_ctxt *dlm, 60 struct dlm_master_list_entry *mle, 61 struct o2nm_node *node, 62 int idx); 63 64 static void dlm_assert_master_worker(struct dlm_work_item *item, void *data); 65 static int dlm_do_assert_master(struct dlm_ctxt *dlm, 66 struct dlm_lock_resource *res, 67 void *nodemap, u32 flags); 68 static void dlm_deref_lockres_worker(struct dlm_work_item *item, void *data); 69 70 static inline int dlm_mle_equal(struct dlm_ctxt *dlm, 71 struct dlm_master_list_entry *mle, 72 const char *name, 73 unsigned int namelen) 74 { 75 if (dlm != mle->dlm) 76 return 0; 77 78 if (namelen != mle->mnamelen || 79 memcmp(name, mle->mname, namelen) != 0) 80 return 0; 81 82 return 1; 83 } 84 85 static struct kmem_cache *dlm_lockres_cache; 86 static struct kmem_cache *dlm_lockname_cache; 87 static struct kmem_cache *dlm_mle_cache; 88 89 static void dlm_mle_release(struct kref *kref); 90 static void dlm_init_mle(struct dlm_master_list_entry *mle, 91 enum dlm_mle_type type, 92 struct dlm_ctxt *dlm, 93 struct dlm_lock_resource *res, 94 const char *name, 95 unsigned int namelen); 96 static void dlm_put_mle(struct dlm_master_list_entry *mle); 97 static void __dlm_put_mle(struct dlm_master_list_entry *mle); 98 static int dlm_find_mle(struct dlm_ctxt *dlm, 99 struct dlm_master_list_entry **mle, 100 char *name, unsigned int namelen); 101 102 static int dlm_do_master_request(struct dlm_lock_resource *res, 103 struct dlm_master_list_entry *mle, int to); 104 105 106 static int dlm_wait_for_lock_mastery(struct dlm_ctxt *dlm, 107 struct dlm_lock_resource *res, 108 struct dlm_master_list_entry *mle, 109 int *blocked); 110 static int dlm_restart_lock_mastery(struct dlm_ctxt *dlm, 111 struct dlm_lock_resource *res, 112 struct dlm_master_list_entry *mle, 113 int blocked); 114 static int dlm_add_migration_mle(struct dlm_ctxt *dlm, 115 struct dlm_lock_resource *res, 116 struct dlm_master_list_entry *mle, 117 struct dlm_master_list_entry **oldmle, 118 const char *name, unsigned int namelen, 119 u8 new_master, u8 master); 120 121 static u8 dlm_pick_migration_target(struct dlm_ctxt *dlm, 122 struct dlm_lock_resource *res); 123 static void dlm_remove_nonlocal_locks(struct dlm_ctxt *dlm, 124 struct dlm_lock_resource *res); 125 static int dlm_mark_lockres_migrating(struct dlm_ctxt *dlm, 126 struct dlm_lock_resource *res, 127 u8 target); 128 static int dlm_pre_master_reco_lockres(struct dlm_ctxt *dlm, 129 struct dlm_lock_resource *res); 130 131 132 int dlm_is_host_down(int errno) 133 { 134 switch (errno) { 135 case -EBADF: 136 case -ECONNREFUSED: 137 case -ENOTCONN: 138 case -ECONNRESET: 139 case -EPIPE: 140 case -EHOSTDOWN: 141 case -EHOSTUNREACH: 142 case -ETIMEDOUT: 143 case -ECONNABORTED: 144 case -ENETDOWN: 145 case -ENETUNREACH: 146 case -ENETRESET: 147 case -ESHUTDOWN: 148 case -ENOPROTOOPT: 149 case -EINVAL: /* if returned from our tcp code, 150 this means there is no socket */ 151 return 1; 152 } 153 return 0; 154 } 155 156 157 /* 158 * MASTER LIST FUNCTIONS 159 */ 160 161 162 /* 163 * regarding master list entries and heartbeat callbacks: 164 * 165 * in order to avoid sleeping and allocation that occurs in 166 * heartbeat, master list entries are simply attached to the 167 * dlm's established heartbeat callbacks. the mle is attached 168 * when it is created, and since the dlm->spinlock is held at 169 * that time, any heartbeat event will be properly discovered 170 * by the mle. the mle needs to be detached from the 171 * dlm->mle_hb_events list as soon as heartbeat events are no 172 * longer useful to the mle, and before the mle is freed. 173 * 174 * as a general rule, heartbeat events are no longer needed by 175 * the mle once an "answer" regarding the lock master has been 176 * received. 177 */ 178 static inline void __dlm_mle_attach_hb_events(struct dlm_ctxt *dlm, 179 struct dlm_master_list_entry *mle) 180 { 181 assert_spin_locked(&dlm->spinlock); 182 183 list_add_tail(&mle->hb_events, &dlm->mle_hb_events); 184 } 185 186 187 static inline void __dlm_mle_detach_hb_events(struct dlm_ctxt *dlm, 188 struct dlm_master_list_entry *mle) 189 { 190 if (!list_empty(&mle->hb_events)) 191 list_del_init(&mle->hb_events); 192 } 193 194 195 static inline void dlm_mle_detach_hb_events(struct dlm_ctxt *dlm, 196 struct dlm_master_list_entry *mle) 197 { 198 spin_lock(&dlm->spinlock); 199 __dlm_mle_detach_hb_events(dlm, mle); 200 spin_unlock(&dlm->spinlock); 201 } 202 203 static void dlm_get_mle_inuse(struct dlm_master_list_entry *mle) 204 { 205 struct dlm_ctxt *dlm; 206 dlm = mle->dlm; 207 208 assert_spin_locked(&dlm->spinlock); 209 assert_spin_locked(&dlm->master_lock); 210 mle->inuse++; 211 kref_get(&mle->mle_refs); 212 } 213 214 static void dlm_put_mle_inuse(struct dlm_master_list_entry *mle) 215 { 216 struct dlm_ctxt *dlm; 217 dlm = mle->dlm; 218 219 spin_lock(&dlm->spinlock); 220 spin_lock(&dlm->master_lock); 221 mle->inuse--; 222 __dlm_put_mle(mle); 223 spin_unlock(&dlm->master_lock); 224 spin_unlock(&dlm->spinlock); 225 226 } 227 228 /* remove from list and free */ 229 static void __dlm_put_mle(struct dlm_master_list_entry *mle) 230 { 231 struct dlm_ctxt *dlm; 232 dlm = mle->dlm; 233 234 assert_spin_locked(&dlm->spinlock); 235 assert_spin_locked(&dlm->master_lock); 236 if (!atomic_read(&mle->mle_refs.refcount)) { 237 /* this may or may not crash, but who cares. 238 * it's a BUG. */ 239 mlog(ML_ERROR, "bad mle: %p\n", mle); 240 dlm_print_one_mle(mle); 241 BUG(); 242 } else 243 kref_put(&mle->mle_refs, dlm_mle_release); 244 } 245 246 247 /* must not have any spinlocks coming in */ 248 static void dlm_put_mle(struct dlm_master_list_entry *mle) 249 { 250 struct dlm_ctxt *dlm; 251 dlm = mle->dlm; 252 253 spin_lock(&dlm->spinlock); 254 spin_lock(&dlm->master_lock); 255 __dlm_put_mle(mle); 256 spin_unlock(&dlm->master_lock); 257 spin_unlock(&dlm->spinlock); 258 } 259 260 static inline void dlm_get_mle(struct dlm_master_list_entry *mle) 261 { 262 kref_get(&mle->mle_refs); 263 } 264 265 static void dlm_init_mle(struct dlm_master_list_entry *mle, 266 enum dlm_mle_type type, 267 struct dlm_ctxt *dlm, 268 struct dlm_lock_resource *res, 269 const char *name, 270 unsigned int namelen) 271 { 272 assert_spin_locked(&dlm->spinlock); 273 274 mle->dlm = dlm; 275 mle->type = type; 276 INIT_HLIST_NODE(&mle->master_hash_node); 277 INIT_LIST_HEAD(&mle->hb_events); 278 memset(mle->maybe_map, 0, sizeof(mle->maybe_map)); 279 spin_lock_init(&mle->spinlock); 280 init_waitqueue_head(&mle->wq); 281 atomic_set(&mle->woken, 0); 282 kref_init(&mle->mle_refs); 283 memset(mle->response_map, 0, sizeof(mle->response_map)); 284 mle->master = O2NM_MAX_NODES; 285 mle->new_master = O2NM_MAX_NODES; 286 mle->inuse = 0; 287 288 BUG_ON(mle->type != DLM_MLE_BLOCK && 289 mle->type != DLM_MLE_MASTER && 290 mle->type != DLM_MLE_MIGRATION); 291 292 if (mle->type == DLM_MLE_MASTER) { 293 BUG_ON(!res); 294 mle->mleres = res; 295 memcpy(mle->mname, res->lockname.name, res->lockname.len); 296 mle->mnamelen = res->lockname.len; 297 mle->mnamehash = res->lockname.hash; 298 } else { 299 BUG_ON(!name); 300 mle->mleres = NULL; 301 memcpy(mle->mname, name, namelen); 302 mle->mnamelen = namelen; 303 mle->mnamehash = dlm_lockid_hash(name, namelen); 304 } 305 306 atomic_inc(&dlm->mle_tot_count[mle->type]); 307 atomic_inc(&dlm->mle_cur_count[mle->type]); 308 309 /* copy off the node_map and register hb callbacks on our copy */ 310 memcpy(mle->node_map, dlm->domain_map, sizeof(mle->node_map)); 311 memcpy(mle->vote_map, dlm->domain_map, sizeof(mle->vote_map)); 312 clear_bit(dlm->node_num, mle->vote_map); 313 clear_bit(dlm->node_num, mle->node_map); 314 315 /* attach the mle to the domain node up/down events */ 316 __dlm_mle_attach_hb_events(dlm, mle); 317 } 318 319 void __dlm_unlink_mle(struct dlm_ctxt *dlm, struct dlm_master_list_entry *mle) 320 { 321 assert_spin_locked(&dlm->spinlock); 322 assert_spin_locked(&dlm->master_lock); 323 324 if (!hlist_unhashed(&mle->master_hash_node)) 325 hlist_del_init(&mle->master_hash_node); 326 } 327 328 void __dlm_insert_mle(struct dlm_ctxt *dlm, struct dlm_master_list_entry *mle) 329 { 330 struct hlist_head *bucket; 331 332 assert_spin_locked(&dlm->master_lock); 333 334 bucket = dlm_master_hash(dlm, mle->mnamehash); 335 hlist_add_head(&mle->master_hash_node, bucket); 336 } 337 338 /* returns 1 if found, 0 if not */ 339 static int dlm_find_mle(struct dlm_ctxt *dlm, 340 struct dlm_master_list_entry **mle, 341 char *name, unsigned int namelen) 342 { 343 struct dlm_master_list_entry *tmpmle; 344 struct hlist_head *bucket; 345 unsigned int hash; 346 347 assert_spin_locked(&dlm->master_lock); 348 349 hash = dlm_lockid_hash(name, namelen); 350 bucket = dlm_master_hash(dlm, hash); 351 hlist_for_each_entry(tmpmle, bucket, master_hash_node) { 352 if (!dlm_mle_equal(dlm, tmpmle, name, namelen)) 353 continue; 354 dlm_get_mle(tmpmle); 355 *mle = tmpmle; 356 return 1; 357 } 358 return 0; 359 } 360 361 void dlm_hb_event_notify_attached(struct dlm_ctxt *dlm, int idx, int node_up) 362 { 363 struct dlm_master_list_entry *mle; 364 365 assert_spin_locked(&dlm->spinlock); 366 367 list_for_each_entry(mle, &dlm->mle_hb_events, hb_events) { 368 if (node_up) 369 dlm_mle_node_up(dlm, mle, NULL, idx); 370 else 371 dlm_mle_node_down(dlm, mle, NULL, idx); 372 } 373 } 374 375 static void dlm_mle_node_down(struct dlm_ctxt *dlm, 376 struct dlm_master_list_entry *mle, 377 struct o2nm_node *node, int idx) 378 { 379 spin_lock(&mle->spinlock); 380 381 if (!test_bit(idx, mle->node_map)) 382 mlog(0, "node %u already removed from nodemap!\n", idx); 383 else 384 clear_bit(idx, mle->node_map); 385 386 spin_unlock(&mle->spinlock); 387 } 388 389 static void dlm_mle_node_up(struct dlm_ctxt *dlm, 390 struct dlm_master_list_entry *mle, 391 struct o2nm_node *node, int idx) 392 { 393 spin_lock(&mle->spinlock); 394 395 if (test_bit(idx, mle->node_map)) 396 mlog(0, "node %u already in node map!\n", idx); 397 else 398 set_bit(idx, mle->node_map); 399 400 spin_unlock(&mle->spinlock); 401 } 402 403 404 int dlm_init_mle_cache(void) 405 { 406 dlm_mle_cache = kmem_cache_create("o2dlm_mle", 407 sizeof(struct dlm_master_list_entry), 408 0, SLAB_HWCACHE_ALIGN, 409 NULL); 410 if (dlm_mle_cache == NULL) 411 return -ENOMEM; 412 return 0; 413 } 414 415 void dlm_destroy_mle_cache(void) 416 { 417 if (dlm_mle_cache) 418 kmem_cache_destroy(dlm_mle_cache); 419 } 420 421 static void dlm_mle_release(struct kref *kref) 422 { 423 struct dlm_master_list_entry *mle; 424 struct dlm_ctxt *dlm; 425 426 mle = container_of(kref, struct dlm_master_list_entry, mle_refs); 427 dlm = mle->dlm; 428 429 assert_spin_locked(&dlm->spinlock); 430 assert_spin_locked(&dlm->master_lock); 431 432 mlog(0, "Releasing mle for %.*s, type %d\n", mle->mnamelen, mle->mname, 433 mle->type); 434 435 /* remove from list if not already */ 436 __dlm_unlink_mle(dlm, mle); 437 438 /* detach the mle from the domain node up/down events */ 439 __dlm_mle_detach_hb_events(dlm, mle); 440 441 atomic_dec(&dlm->mle_cur_count[mle->type]); 442 443 /* NOTE: kfree under spinlock here. 444 * if this is bad, we can move this to a freelist. */ 445 kmem_cache_free(dlm_mle_cache, mle); 446 } 447 448 449 /* 450 * LOCK RESOURCE FUNCTIONS 451 */ 452 453 int dlm_init_master_caches(void) 454 { 455 dlm_lockres_cache = kmem_cache_create("o2dlm_lockres", 456 sizeof(struct dlm_lock_resource), 457 0, SLAB_HWCACHE_ALIGN, NULL); 458 if (!dlm_lockres_cache) 459 goto bail; 460 461 dlm_lockname_cache = kmem_cache_create("o2dlm_lockname", 462 DLM_LOCKID_NAME_MAX, 0, 463 SLAB_HWCACHE_ALIGN, NULL); 464 if (!dlm_lockname_cache) 465 goto bail; 466 467 return 0; 468 bail: 469 dlm_destroy_master_caches(); 470 return -ENOMEM; 471 } 472 473 void dlm_destroy_master_caches(void) 474 { 475 if (dlm_lockname_cache) { 476 kmem_cache_destroy(dlm_lockname_cache); 477 dlm_lockname_cache = NULL; 478 } 479 480 if (dlm_lockres_cache) { 481 kmem_cache_destroy(dlm_lockres_cache); 482 dlm_lockres_cache = NULL; 483 } 484 } 485 486 static void dlm_lockres_release(struct kref *kref) 487 { 488 struct dlm_lock_resource *res; 489 struct dlm_ctxt *dlm; 490 491 res = container_of(kref, struct dlm_lock_resource, refs); 492 dlm = res->dlm; 493 494 /* This should not happen -- all lockres' have a name 495 * associated with them at init time. */ 496 BUG_ON(!res->lockname.name); 497 498 mlog(0, "destroying lockres %.*s\n", res->lockname.len, 499 res->lockname.name); 500 501 atomic_dec(&dlm->res_cur_count); 502 503 if (!hlist_unhashed(&res->hash_node) || 504 !list_empty(&res->granted) || 505 !list_empty(&res->converting) || 506 !list_empty(&res->blocked) || 507 !list_empty(&res->dirty) || 508 !list_empty(&res->recovering) || 509 !list_empty(&res->purge)) { 510 mlog(ML_ERROR, 511 "Going to BUG for resource %.*s." 512 " We're on a list! [%c%c%c%c%c%c%c]\n", 513 res->lockname.len, res->lockname.name, 514 !hlist_unhashed(&res->hash_node) ? 'H' : ' ', 515 !list_empty(&res->granted) ? 'G' : ' ', 516 !list_empty(&res->converting) ? 'C' : ' ', 517 !list_empty(&res->blocked) ? 'B' : ' ', 518 !list_empty(&res->dirty) ? 'D' : ' ', 519 !list_empty(&res->recovering) ? 'R' : ' ', 520 !list_empty(&res->purge) ? 'P' : ' '); 521 522 dlm_print_one_lock_resource(res); 523 } 524 525 /* By the time we're ready to blow this guy away, we shouldn't 526 * be on any lists. */ 527 BUG_ON(!hlist_unhashed(&res->hash_node)); 528 BUG_ON(!list_empty(&res->granted)); 529 BUG_ON(!list_empty(&res->converting)); 530 BUG_ON(!list_empty(&res->blocked)); 531 BUG_ON(!list_empty(&res->dirty)); 532 BUG_ON(!list_empty(&res->recovering)); 533 BUG_ON(!list_empty(&res->purge)); 534 535 kmem_cache_free(dlm_lockname_cache, (void *)res->lockname.name); 536 537 kmem_cache_free(dlm_lockres_cache, res); 538 } 539 540 void dlm_lockres_put(struct dlm_lock_resource *res) 541 { 542 kref_put(&res->refs, dlm_lockres_release); 543 } 544 545 static void dlm_init_lockres(struct dlm_ctxt *dlm, 546 struct dlm_lock_resource *res, 547 const char *name, unsigned int namelen) 548 { 549 char *qname; 550 551 /* If we memset here, we lose our reference to the kmalloc'd 552 * res->lockname.name, so be sure to init every field 553 * correctly! */ 554 555 qname = (char *) res->lockname.name; 556 memcpy(qname, name, namelen); 557 558 res->lockname.len = namelen; 559 res->lockname.hash = dlm_lockid_hash(name, namelen); 560 561 init_waitqueue_head(&res->wq); 562 spin_lock_init(&res->spinlock); 563 INIT_HLIST_NODE(&res->hash_node); 564 INIT_LIST_HEAD(&res->granted); 565 INIT_LIST_HEAD(&res->converting); 566 INIT_LIST_HEAD(&res->blocked); 567 INIT_LIST_HEAD(&res->dirty); 568 INIT_LIST_HEAD(&res->recovering); 569 INIT_LIST_HEAD(&res->purge); 570 INIT_LIST_HEAD(&res->tracking); 571 atomic_set(&res->asts_reserved, 0); 572 res->migration_pending = 0; 573 res->inflight_locks = 0; 574 res->inflight_assert_workers = 0; 575 576 res->dlm = dlm; 577 578 kref_init(&res->refs); 579 580 atomic_inc(&dlm->res_tot_count); 581 atomic_inc(&dlm->res_cur_count); 582 583 /* just for consistency */ 584 spin_lock(&res->spinlock); 585 dlm_set_lockres_owner(dlm, res, DLM_LOCK_RES_OWNER_UNKNOWN); 586 spin_unlock(&res->spinlock); 587 588 res->state = DLM_LOCK_RES_IN_PROGRESS; 589 590 res->last_used = 0; 591 592 spin_lock(&dlm->spinlock); 593 list_add_tail(&res->tracking, &dlm->tracking_list); 594 spin_unlock(&dlm->spinlock); 595 596 memset(res->lvb, 0, DLM_LVB_LEN); 597 memset(res->refmap, 0, sizeof(res->refmap)); 598 } 599 600 struct dlm_lock_resource *dlm_new_lockres(struct dlm_ctxt *dlm, 601 const char *name, 602 unsigned int namelen) 603 { 604 struct dlm_lock_resource *res = NULL; 605 606 res = kmem_cache_zalloc(dlm_lockres_cache, GFP_NOFS); 607 if (!res) 608 goto error; 609 610 res->lockname.name = kmem_cache_zalloc(dlm_lockname_cache, GFP_NOFS); 611 if (!res->lockname.name) 612 goto error; 613 614 dlm_init_lockres(dlm, res, name, namelen); 615 return res; 616 617 error: 618 if (res) 619 kmem_cache_free(dlm_lockres_cache, res); 620 return NULL; 621 } 622 623 void dlm_lockres_set_refmap_bit(struct dlm_ctxt *dlm, 624 struct dlm_lock_resource *res, int bit) 625 { 626 assert_spin_locked(&res->spinlock); 627 628 mlog(0, "res %.*s, set node %u, %ps()\n", res->lockname.len, 629 res->lockname.name, bit, __builtin_return_address(0)); 630 631 set_bit(bit, res->refmap); 632 } 633 634 void dlm_lockres_clear_refmap_bit(struct dlm_ctxt *dlm, 635 struct dlm_lock_resource *res, int bit) 636 { 637 assert_spin_locked(&res->spinlock); 638 639 mlog(0, "res %.*s, clr node %u, %ps()\n", res->lockname.len, 640 res->lockname.name, bit, __builtin_return_address(0)); 641 642 clear_bit(bit, res->refmap); 643 } 644 645 static void __dlm_lockres_grab_inflight_ref(struct dlm_ctxt *dlm, 646 struct dlm_lock_resource *res) 647 { 648 res->inflight_locks++; 649 650 mlog(0, "%s: res %.*s, inflight++: now %u, %ps()\n", dlm->name, 651 res->lockname.len, res->lockname.name, res->inflight_locks, 652 __builtin_return_address(0)); 653 } 654 655 void dlm_lockres_grab_inflight_ref(struct dlm_ctxt *dlm, 656 struct dlm_lock_resource *res) 657 { 658 assert_spin_locked(&res->spinlock); 659 __dlm_lockres_grab_inflight_ref(dlm, res); 660 } 661 662 void dlm_lockres_drop_inflight_ref(struct dlm_ctxt *dlm, 663 struct dlm_lock_resource *res) 664 { 665 assert_spin_locked(&res->spinlock); 666 667 BUG_ON(res->inflight_locks == 0); 668 669 res->inflight_locks--; 670 671 mlog(0, "%s: res %.*s, inflight--: now %u, %ps()\n", dlm->name, 672 res->lockname.len, res->lockname.name, res->inflight_locks, 673 __builtin_return_address(0)); 674 675 wake_up(&res->wq); 676 } 677 678 void __dlm_lockres_grab_inflight_worker(struct dlm_ctxt *dlm, 679 struct dlm_lock_resource *res) 680 { 681 assert_spin_locked(&res->spinlock); 682 res->inflight_assert_workers++; 683 mlog(0, "%s:%.*s: inflight assert worker++: now %u\n", 684 dlm->name, res->lockname.len, res->lockname.name, 685 res->inflight_assert_workers); 686 } 687 688 static void __dlm_lockres_drop_inflight_worker(struct dlm_ctxt *dlm, 689 struct dlm_lock_resource *res) 690 { 691 assert_spin_locked(&res->spinlock); 692 BUG_ON(res->inflight_assert_workers == 0); 693 res->inflight_assert_workers--; 694 mlog(0, "%s:%.*s: inflight assert worker--: now %u\n", 695 dlm->name, res->lockname.len, res->lockname.name, 696 res->inflight_assert_workers); 697 } 698 699 static void dlm_lockres_drop_inflight_worker(struct dlm_ctxt *dlm, 700 struct dlm_lock_resource *res) 701 { 702 spin_lock(&res->spinlock); 703 __dlm_lockres_drop_inflight_worker(dlm, res); 704 spin_unlock(&res->spinlock); 705 } 706 707 /* 708 * lookup a lock resource by name. 709 * may already exist in the hashtable. 710 * lockid is null terminated 711 * 712 * if not, allocate enough for the lockres and for 713 * the temporary structure used in doing the mastering. 714 * 715 * also, do a lookup in the dlm->master_list to see 716 * if another node has begun mastering the same lock. 717 * if so, there should be a block entry in there 718 * for this name, and we should *not* attempt to master 719 * the lock here. need to wait around for that node 720 * to assert_master (or die). 721 * 722 */ 723 struct dlm_lock_resource * dlm_get_lock_resource(struct dlm_ctxt *dlm, 724 const char *lockid, 725 int namelen, 726 int flags) 727 { 728 struct dlm_lock_resource *tmpres=NULL, *res=NULL; 729 struct dlm_master_list_entry *mle = NULL; 730 struct dlm_master_list_entry *alloc_mle = NULL; 731 int blocked = 0; 732 int ret, nodenum; 733 struct dlm_node_iter iter; 734 unsigned int hash; 735 int tries = 0; 736 int bit, wait_on_recovery = 0; 737 738 BUG_ON(!lockid); 739 740 hash = dlm_lockid_hash(lockid, namelen); 741 742 mlog(0, "get lockres %s (len %d)\n", lockid, namelen); 743 744 lookup: 745 spin_lock(&dlm->spinlock); 746 tmpres = __dlm_lookup_lockres_full(dlm, lockid, namelen, hash); 747 if (tmpres) { 748 spin_unlock(&dlm->spinlock); 749 spin_lock(&tmpres->spinlock); 750 751 /* 752 * Right after dlm spinlock was released, dlm_thread could have 753 * purged the lockres. Check if lockres got unhashed. If so 754 * start over. 755 */ 756 if (hlist_unhashed(&tmpres->hash_node)) { 757 spin_unlock(&tmpres->spinlock); 758 dlm_lockres_put(tmpres); 759 tmpres = NULL; 760 goto lookup; 761 } 762 763 /* Wait on the thread that is mastering the resource */ 764 if (tmpres->owner == DLM_LOCK_RES_OWNER_UNKNOWN) { 765 __dlm_wait_on_lockres(tmpres); 766 BUG_ON(tmpres->owner == DLM_LOCK_RES_OWNER_UNKNOWN); 767 spin_unlock(&tmpres->spinlock); 768 dlm_lockres_put(tmpres); 769 tmpres = NULL; 770 goto lookup; 771 } 772 773 /* Wait on the resource purge to complete before continuing */ 774 if (tmpres->state & DLM_LOCK_RES_DROPPING_REF) { 775 BUG_ON(tmpres->owner == dlm->node_num); 776 __dlm_wait_on_lockres_flags(tmpres, 777 DLM_LOCK_RES_DROPPING_REF); 778 spin_unlock(&tmpres->spinlock); 779 dlm_lockres_put(tmpres); 780 tmpres = NULL; 781 goto lookup; 782 } 783 784 /* Grab inflight ref to pin the resource */ 785 dlm_lockres_grab_inflight_ref(dlm, tmpres); 786 787 spin_unlock(&tmpres->spinlock); 788 if (res) { 789 spin_lock(&dlm->track_lock); 790 if (!list_empty(&res->tracking)) 791 list_del_init(&res->tracking); 792 else 793 mlog(ML_ERROR, "Resource %.*s not " 794 "on the Tracking list\n", 795 res->lockname.len, 796 res->lockname.name); 797 spin_unlock(&dlm->track_lock); 798 dlm_lockres_put(res); 799 } 800 res = tmpres; 801 goto leave; 802 } 803 804 if (!res) { 805 spin_unlock(&dlm->spinlock); 806 mlog(0, "allocating a new resource\n"); 807 /* nothing found and we need to allocate one. */ 808 alloc_mle = kmem_cache_alloc(dlm_mle_cache, GFP_NOFS); 809 if (!alloc_mle) 810 goto leave; 811 res = dlm_new_lockres(dlm, lockid, namelen); 812 if (!res) 813 goto leave; 814 goto lookup; 815 } 816 817 mlog(0, "no lockres found, allocated our own: %p\n", res); 818 819 if (flags & LKM_LOCAL) { 820 /* caller knows it's safe to assume it's not mastered elsewhere 821 * DONE! return right away */ 822 spin_lock(&res->spinlock); 823 dlm_change_lockres_owner(dlm, res, dlm->node_num); 824 __dlm_insert_lockres(dlm, res); 825 dlm_lockres_grab_inflight_ref(dlm, res); 826 spin_unlock(&res->spinlock); 827 spin_unlock(&dlm->spinlock); 828 /* lockres still marked IN_PROGRESS */ 829 goto wake_waiters; 830 } 831 832 /* check master list to see if another node has started mastering it */ 833 spin_lock(&dlm->master_lock); 834 835 /* if we found a block, wait for lock to be mastered by another node */ 836 blocked = dlm_find_mle(dlm, &mle, (char *)lockid, namelen); 837 if (blocked) { 838 int mig; 839 if (mle->type == DLM_MLE_MASTER) { 840 mlog(ML_ERROR, "master entry for nonexistent lock!\n"); 841 BUG(); 842 } 843 mig = (mle->type == DLM_MLE_MIGRATION); 844 /* if there is a migration in progress, let the migration 845 * finish before continuing. we can wait for the absence 846 * of the MIGRATION mle: either the migrate finished or 847 * one of the nodes died and the mle was cleaned up. 848 * if there is a BLOCK here, but it already has a master 849 * set, we are too late. the master does not have a ref 850 * for us in the refmap. detach the mle and drop it. 851 * either way, go back to the top and start over. */ 852 if (mig || mle->master != O2NM_MAX_NODES) { 853 BUG_ON(mig && mle->master == dlm->node_num); 854 /* we arrived too late. the master does not 855 * have a ref for us. retry. */ 856 mlog(0, "%s:%.*s: late on %s\n", 857 dlm->name, namelen, lockid, 858 mig ? "MIGRATION" : "BLOCK"); 859 spin_unlock(&dlm->master_lock); 860 spin_unlock(&dlm->spinlock); 861 862 /* master is known, detach */ 863 if (!mig) 864 dlm_mle_detach_hb_events(dlm, mle); 865 dlm_put_mle(mle); 866 mle = NULL; 867 /* this is lame, but we can't wait on either 868 * the mle or lockres waitqueue here */ 869 if (mig) 870 msleep(100); 871 goto lookup; 872 } 873 } else { 874 /* go ahead and try to master lock on this node */ 875 mle = alloc_mle; 876 /* make sure this does not get freed below */ 877 alloc_mle = NULL; 878 dlm_init_mle(mle, DLM_MLE_MASTER, dlm, res, NULL, 0); 879 set_bit(dlm->node_num, mle->maybe_map); 880 __dlm_insert_mle(dlm, mle); 881 882 /* still holding the dlm spinlock, check the recovery map 883 * to see if there are any nodes that still need to be 884 * considered. these will not appear in the mle nodemap 885 * but they might own this lockres. wait on them. */ 886 bit = find_next_bit(dlm->recovery_map, O2NM_MAX_NODES, 0); 887 if (bit < O2NM_MAX_NODES) { 888 mlog(0, "%s: res %.*s, At least one node (%d) " 889 "to recover before lock mastery can begin\n", 890 dlm->name, namelen, (char *)lockid, bit); 891 wait_on_recovery = 1; 892 } 893 } 894 895 /* at this point there is either a DLM_MLE_BLOCK or a 896 * DLM_MLE_MASTER on the master list, so it's safe to add the 897 * lockres to the hashtable. anyone who finds the lock will 898 * still have to wait on the IN_PROGRESS. */ 899 900 /* finally add the lockres to its hash bucket */ 901 __dlm_insert_lockres(dlm, res); 902 903 /* since this lockres is new it doesn't not require the spinlock */ 904 __dlm_lockres_grab_inflight_ref(dlm, res); 905 906 /* get an extra ref on the mle in case this is a BLOCK 907 * if so, the creator of the BLOCK may try to put the last 908 * ref at this time in the assert master handler, so we 909 * need an extra one to keep from a bad ptr deref. */ 910 dlm_get_mle_inuse(mle); 911 spin_unlock(&dlm->master_lock); 912 spin_unlock(&dlm->spinlock); 913 914 redo_request: 915 while (wait_on_recovery) { 916 /* any cluster changes that occurred after dropping the 917 * dlm spinlock would be detectable be a change on the mle, 918 * so we only need to clear out the recovery map once. */ 919 if (dlm_is_recovery_lock(lockid, namelen)) { 920 mlog(0, "%s: Recovery map is not empty, but must " 921 "master $RECOVERY lock now\n", dlm->name); 922 if (!dlm_pre_master_reco_lockres(dlm, res)) 923 wait_on_recovery = 0; 924 else { 925 mlog(0, "%s: waiting 500ms for heartbeat state " 926 "change\n", dlm->name); 927 msleep(500); 928 } 929 continue; 930 } 931 932 dlm_kick_recovery_thread(dlm); 933 msleep(1000); 934 dlm_wait_for_recovery(dlm); 935 936 spin_lock(&dlm->spinlock); 937 bit = find_next_bit(dlm->recovery_map, O2NM_MAX_NODES, 0); 938 if (bit < O2NM_MAX_NODES) { 939 mlog(0, "%s: res %.*s, At least one node (%d) " 940 "to recover before lock mastery can begin\n", 941 dlm->name, namelen, (char *)lockid, bit); 942 wait_on_recovery = 1; 943 } else 944 wait_on_recovery = 0; 945 spin_unlock(&dlm->spinlock); 946 947 if (wait_on_recovery) 948 dlm_wait_for_node_recovery(dlm, bit, 10000); 949 } 950 951 /* must wait for lock to be mastered elsewhere */ 952 if (blocked) 953 goto wait; 954 955 ret = -EINVAL; 956 dlm_node_iter_init(mle->vote_map, &iter); 957 while ((nodenum = dlm_node_iter_next(&iter)) >= 0) { 958 ret = dlm_do_master_request(res, mle, nodenum); 959 if (ret < 0) 960 mlog_errno(ret); 961 if (mle->master != O2NM_MAX_NODES) { 962 /* found a master ! */ 963 if (mle->master <= nodenum) 964 break; 965 /* if our master request has not reached the master 966 * yet, keep going until it does. this is how the 967 * master will know that asserts are needed back to 968 * the lower nodes. */ 969 mlog(0, "%s: res %.*s, Requests only up to %u but " 970 "master is %u, keep going\n", dlm->name, namelen, 971 lockid, nodenum, mle->master); 972 } 973 } 974 975 wait: 976 /* keep going until the response map includes all nodes */ 977 ret = dlm_wait_for_lock_mastery(dlm, res, mle, &blocked); 978 if (ret < 0) { 979 wait_on_recovery = 1; 980 mlog(0, "%s: res %.*s, Node map changed, redo the master " 981 "request now, blocked=%d\n", dlm->name, res->lockname.len, 982 res->lockname.name, blocked); 983 if (++tries > 20) { 984 mlog(ML_ERROR, "%s: res %.*s, Spinning on " 985 "dlm_wait_for_lock_mastery, blocked = %d\n", 986 dlm->name, res->lockname.len, 987 res->lockname.name, blocked); 988 dlm_print_one_lock_resource(res); 989 dlm_print_one_mle(mle); 990 tries = 0; 991 } 992 goto redo_request; 993 } 994 995 mlog(0, "%s: res %.*s, Mastered by %u\n", dlm->name, res->lockname.len, 996 res->lockname.name, res->owner); 997 /* make sure we never continue without this */ 998 BUG_ON(res->owner == O2NM_MAX_NODES); 999 1000 /* master is known, detach if not already detached */ 1001 dlm_mle_detach_hb_events(dlm, mle); 1002 dlm_put_mle(mle); 1003 /* put the extra ref */ 1004 dlm_put_mle_inuse(mle); 1005 1006 wake_waiters: 1007 spin_lock(&res->spinlock); 1008 res->state &= ~DLM_LOCK_RES_IN_PROGRESS; 1009 spin_unlock(&res->spinlock); 1010 wake_up(&res->wq); 1011 1012 leave: 1013 /* need to free the unused mle */ 1014 if (alloc_mle) 1015 kmem_cache_free(dlm_mle_cache, alloc_mle); 1016 1017 return res; 1018 } 1019 1020 1021 #define DLM_MASTERY_TIMEOUT_MS 5000 1022 1023 static int dlm_wait_for_lock_mastery(struct dlm_ctxt *dlm, 1024 struct dlm_lock_resource *res, 1025 struct dlm_master_list_entry *mle, 1026 int *blocked) 1027 { 1028 u8 m; 1029 int ret, bit; 1030 int map_changed, voting_done; 1031 int assert, sleep; 1032 1033 recheck: 1034 ret = 0; 1035 assert = 0; 1036 1037 /* check if another node has already become the owner */ 1038 spin_lock(&res->spinlock); 1039 if (res->owner != DLM_LOCK_RES_OWNER_UNKNOWN) { 1040 mlog(0, "%s:%.*s: owner is suddenly %u\n", dlm->name, 1041 res->lockname.len, res->lockname.name, res->owner); 1042 spin_unlock(&res->spinlock); 1043 /* this will cause the master to re-assert across 1044 * the whole cluster, freeing up mles */ 1045 if (res->owner != dlm->node_num) { 1046 ret = dlm_do_master_request(res, mle, res->owner); 1047 if (ret < 0) { 1048 /* give recovery a chance to run */ 1049 mlog(ML_ERROR, "link to %u went down?: %d\n", res->owner, ret); 1050 msleep(500); 1051 goto recheck; 1052 } 1053 } 1054 ret = 0; 1055 goto leave; 1056 } 1057 spin_unlock(&res->spinlock); 1058 1059 spin_lock(&mle->spinlock); 1060 m = mle->master; 1061 map_changed = (memcmp(mle->vote_map, mle->node_map, 1062 sizeof(mle->vote_map)) != 0); 1063 voting_done = (memcmp(mle->vote_map, mle->response_map, 1064 sizeof(mle->vote_map)) == 0); 1065 1066 /* restart if we hit any errors */ 1067 if (map_changed) { 1068 int b; 1069 mlog(0, "%s: %.*s: node map changed, restarting\n", 1070 dlm->name, res->lockname.len, res->lockname.name); 1071 ret = dlm_restart_lock_mastery(dlm, res, mle, *blocked); 1072 b = (mle->type == DLM_MLE_BLOCK); 1073 if ((*blocked && !b) || (!*blocked && b)) { 1074 mlog(0, "%s:%.*s: status change: old=%d new=%d\n", 1075 dlm->name, res->lockname.len, res->lockname.name, 1076 *blocked, b); 1077 *blocked = b; 1078 } 1079 spin_unlock(&mle->spinlock); 1080 if (ret < 0) { 1081 mlog_errno(ret); 1082 goto leave; 1083 } 1084 mlog(0, "%s:%.*s: restart lock mastery succeeded, " 1085 "rechecking now\n", dlm->name, res->lockname.len, 1086 res->lockname.name); 1087 goto recheck; 1088 } else { 1089 if (!voting_done) { 1090 mlog(0, "map not changed and voting not done " 1091 "for %s:%.*s\n", dlm->name, res->lockname.len, 1092 res->lockname.name); 1093 } 1094 } 1095 1096 if (m != O2NM_MAX_NODES) { 1097 /* another node has done an assert! 1098 * all done! */ 1099 sleep = 0; 1100 } else { 1101 sleep = 1; 1102 /* have all nodes responded? */ 1103 if (voting_done && !*blocked) { 1104 bit = find_next_bit(mle->maybe_map, O2NM_MAX_NODES, 0); 1105 if (dlm->node_num <= bit) { 1106 /* my node number is lowest. 1107 * now tell other nodes that I am 1108 * mastering this. */ 1109 mle->master = dlm->node_num; 1110 /* ref was grabbed in get_lock_resource 1111 * will be dropped in dlmlock_master */ 1112 assert = 1; 1113 sleep = 0; 1114 } 1115 /* if voting is done, but we have not received 1116 * an assert master yet, we must sleep */ 1117 } 1118 } 1119 1120 spin_unlock(&mle->spinlock); 1121 1122 /* sleep if we haven't finished voting yet */ 1123 if (sleep) { 1124 unsigned long timeo = msecs_to_jiffies(DLM_MASTERY_TIMEOUT_MS); 1125 1126 /* 1127 if (atomic_read(&mle->mle_refs.refcount) < 2) 1128 mlog(ML_ERROR, "mle (%p) refs=%d, name=%.*s\n", mle, 1129 atomic_read(&mle->mle_refs.refcount), 1130 res->lockname.len, res->lockname.name); 1131 */ 1132 atomic_set(&mle->woken, 0); 1133 (void)wait_event_timeout(mle->wq, 1134 (atomic_read(&mle->woken) == 1), 1135 timeo); 1136 if (res->owner == O2NM_MAX_NODES) { 1137 mlog(0, "%s:%.*s: waiting again\n", dlm->name, 1138 res->lockname.len, res->lockname.name); 1139 goto recheck; 1140 } 1141 mlog(0, "done waiting, master is %u\n", res->owner); 1142 ret = 0; 1143 goto leave; 1144 } 1145 1146 ret = 0; /* done */ 1147 if (assert) { 1148 m = dlm->node_num; 1149 mlog(0, "about to master %.*s here, this=%u\n", 1150 res->lockname.len, res->lockname.name, m); 1151 ret = dlm_do_assert_master(dlm, res, mle->vote_map, 0); 1152 if (ret) { 1153 /* This is a failure in the network path, 1154 * not in the response to the assert_master 1155 * (any nonzero response is a BUG on this node). 1156 * Most likely a socket just got disconnected 1157 * due to node death. */ 1158 mlog_errno(ret); 1159 } 1160 /* no longer need to restart lock mastery. 1161 * all living nodes have been contacted. */ 1162 ret = 0; 1163 } 1164 1165 /* set the lockres owner */ 1166 spin_lock(&res->spinlock); 1167 /* mastery reference obtained either during 1168 * assert_master_handler or in get_lock_resource */ 1169 dlm_change_lockres_owner(dlm, res, m); 1170 spin_unlock(&res->spinlock); 1171 1172 leave: 1173 return ret; 1174 } 1175 1176 struct dlm_bitmap_diff_iter 1177 { 1178 int curnode; 1179 unsigned long *orig_bm; 1180 unsigned long *cur_bm; 1181 unsigned long diff_bm[BITS_TO_LONGS(O2NM_MAX_NODES)]; 1182 }; 1183 1184 enum dlm_node_state_change 1185 { 1186 NODE_DOWN = -1, 1187 NODE_NO_CHANGE = 0, 1188 NODE_UP 1189 }; 1190 1191 static void dlm_bitmap_diff_iter_init(struct dlm_bitmap_diff_iter *iter, 1192 unsigned long *orig_bm, 1193 unsigned long *cur_bm) 1194 { 1195 unsigned long p1, p2; 1196 int i; 1197 1198 iter->curnode = -1; 1199 iter->orig_bm = orig_bm; 1200 iter->cur_bm = cur_bm; 1201 1202 for (i = 0; i < BITS_TO_LONGS(O2NM_MAX_NODES); i++) { 1203 p1 = *(iter->orig_bm + i); 1204 p2 = *(iter->cur_bm + i); 1205 iter->diff_bm[i] = (p1 & ~p2) | (p2 & ~p1); 1206 } 1207 } 1208 1209 static int dlm_bitmap_diff_iter_next(struct dlm_bitmap_diff_iter *iter, 1210 enum dlm_node_state_change *state) 1211 { 1212 int bit; 1213 1214 if (iter->curnode >= O2NM_MAX_NODES) 1215 return -ENOENT; 1216 1217 bit = find_next_bit(iter->diff_bm, O2NM_MAX_NODES, 1218 iter->curnode+1); 1219 if (bit >= O2NM_MAX_NODES) { 1220 iter->curnode = O2NM_MAX_NODES; 1221 return -ENOENT; 1222 } 1223 1224 /* if it was there in the original then this node died */ 1225 if (test_bit(bit, iter->orig_bm)) 1226 *state = NODE_DOWN; 1227 else 1228 *state = NODE_UP; 1229 1230 iter->curnode = bit; 1231 return bit; 1232 } 1233 1234 1235 static int dlm_restart_lock_mastery(struct dlm_ctxt *dlm, 1236 struct dlm_lock_resource *res, 1237 struct dlm_master_list_entry *mle, 1238 int blocked) 1239 { 1240 struct dlm_bitmap_diff_iter bdi; 1241 enum dlm_node_state_change sc; 1242 int node; 1243 int ret = 0; 1244 1245 mlog(0, "something happened such that the " 1246 "master process may need to be restarted!\n"); 1247 1248 assert_spin_locked(&mle->spinlock); 1249 1250 dlm_bitmap_diff_iter_init(&bdi, mle->vote_map, mle->node_map); 1251 node = dlm_bitmap_diff_iter_next(&bdi, &sc); 1252 while (node >= 0) { 1253 if (sc == NODE_UP) { 1254 /* a node came up. clear any old vote from 1255 * the response map and set it in the vote map 1256 * then restart the mastery. */ 1257 mlog(ML_NOTICE, "node %d up while restarting\n", node); 1258 1259 /* redo the master request, but only for the new node */ 1260 mlog(0, "sending request to new node\n"); 1261 clear_bit(node, mle->response_map); 1262 set_bit(node, mle->vote_map); 1263 } else { 1264 mlog(ML_ERROR, "node down! %d\n", node); 1265 if (blocked) { 1266 int lowest = find_next_bit(mle->maybe_map, 1267 O2NM_MAX_NODES, 0); 1268 1269 /* act like it was never there */ 1270 clear_bit(node, mle->maybe_map); 1271 1272 if (node == lowest) { 1273 mlog(0, "expected master %u died" 1274 " while this node was blocked " 1275 "waiting on it!\n", node); 1276 lowest = find_next_bit(mle->maybe_map, 1277 O2NM_MAX_NODES, 1278 lowest+1); 1279 if (lowest < O2NM_MAX_NODES) { 1280 mlog(0, "%s:%.*s:still " 1281 "blocked. waiting on %u " 1282 "now\n", dlm->name, 1283 res->lockname.len, 1284 res->lockname.name, 1285 lowest); 1286 } else { 1287 /* mle is an MLE_BLOCK, but 1288 * there is now nothing left to 1289 * block on. we need to return 1290 * all the way back out and try 1291 * again with an MLE_MASTER. 1292 * dlm_do_local_recovery_cleanup 1293 * has already run, so the mle 1294 * refcount is ok */ 1295 mlog(0, "%s:%.*s: no " 1296 "longer blocking. try to " 1297 "master this here\n", 1298 dlm->name, 1299 res->lockname.len, 1300 res->lockname.name); 1301 mle->type = DLM_MLE_MASTER; 1302 mle->mleres = res; 1303 } 1304 } 1305 } 1306 1307 /* now blank out everything, as if we had never 1308 * contacted anyone */ 1309 memset(mle->maybe_map, 0, sizeof(mle->maybe_map)); 1310 memset(mle->response_map, 0, sizeof(mle->response_map)); 1311 /* reset the vote_map to the current node_map */ 1312 memcpy(mle->vote_map, mle->node_map, 1313 sizeof(mle->node_map)); 1314 /* put myself into the maybe map */ 1315 if (mle->type != DLM_MLE_BLOCK) 1316 set_bit(dlm->node_num, mle->maybe_map); 1317 } 1318 ret = -EAGAIN; 1319 node = dlm_bitmap_diff_iter_next(&bdi, &sc); 1320 } 1321 return ret; 1322 } 1323 1324 1325 /* 1326 * DLM_MASTER_REQUEST_MSG 1327 * 1328 * returns: 0 on success, 1329 * -errno on a network error 1330 * 1331 * on error, the caller should assume the target node is "dead" 1332 * 1333 */ 1334 1335 static int dlm_do_master_request(struct dlm_lock_resource *res, 1336 struct dlm_master_list_entry *mle, int to) 1337 { 1338 struct dlm_ctxt *dlm = mle->dlm; 1339 struct dlm_master_request request; 1340 int ret, response=0, resend; 1341 1342 memset(&request, 0, sizeof(request)); 1343 request.node_idx = dlm->node_num; 1344 1345 BUG_ON(mle->type == DLM_MLE_MIGRATION); 1346 1347 request.namelen = (u8)mle->mnamelen; 1348 memcpy(request.name, mle->mname, request.namelen); 1349 1350 again: 1351 ret = o2net_send_message(DLM_MASTER_REQUEST_MSG, dlm->key, &request, 1352 sizeof(request), to, &response); 1353 if (ret < 0) { 1354 if (ret == -ESRCH) { 1355 /* should never happen */ 1356 mlog(ML_ERROR, "TCP stack not ready!\n"); 1357 BUG(); 1358 } else if (ret == -EINVAL) { 1359 mlog(ML_ERROR, "bad args passed to o2net!\n"); 1360 BUG(); 1361 } else if (ret == -ENOMEM) { 1362 mlog(ML_ERROR, "out of memory while trying to send " 1363 "network message! retrying\n"); 1364 /* this is totally crude */ 1365 msleep(50); 1366 goto again; 1367 } else if (!dlm_is_host_down(ret)) { 1368 /* not a network error. bad. */ 1369 mlog_errno(ret); 1370 mlog(ML_ERROR, "unhandled error!"); 1371 BUG(); 1372 } 1373 /* all other errors should be network errors, 1374 * and likely indicate node death */ 1375 mlog(ML_ERROR, "link to %d went down!\n", to); 1376 goto out; 1377 } 1378 1379 ret = 0; 1380 resend = 0; 1381 spin_lock(&mle->spinlock); 1382 switch (response) { 1383 case DLM_MASTER_RESP_YES: 1384 set_bit(to, mle->response_map); 1385 mlog(0, "node %u is the master, response=YES\n", to); 1386 mlog(0, "%s:%.*s: master node %u now knows I have a " 1387 "reference\n", dlm->name, res->lockname.len, 1388 res->lockname.name, to); 1389 mle->master = to; 1390 break; 1391 case DLM_MASTER_RESP_NO: 1392 mlog(0, "node %u not master, response=NO\n", to); 1393 set_bit(to, mle->response_map); 1394 break; 1395 case DLM_MASTER_RESP_MAYBE: 1396 mlog(0, "node %u not master, response=MAYBE\n", to); 1397 set_bit(to, mle->response_map); 1398 set_bit(to, mle->maybe_map); 1399 break; 1400 case DLM_MASTER_RESP_ERROR: 1401 mlog(0, "node %u hit an error, resending\n", to); 1402 resend = 1; 1403 response = 0; 1404 break; 1405 default: 1406 mlog(ML_ERROR, "bad response! %u\n", response); 1407 BUG(); 1408 } 1409 spin_unlock(&mle->spinlock); 1410 if (resend) { 1411 /* this is also totally crude */ 1412 msleep(50); 1413 goto again; 1414 } 1415 1416 out: 1417 return ret; 1418 } 1419 1420 /* 1421 * locks that can be taken here: 1422 * dlm->spinlock 1423 * res->spinlock 1424 * mle->spinlock 1425 * dlm->master_list 1426 * 1427 * if possible, TRIM THIS DOWN!!! 1428 */ 1429 int dlm_master_request_handler(struct o2net_msg *msg, u32 len, void *data, 1430 void **ret_data) 1431 { 1432 u8 response = DLM_MASTER_RESP_MAYBE; 1433 struct dlm_ctxt *dlm = data; 1434 struct dlm_lock_resource *res = NULL; 1435 struct dlm_master_request *request = (struct dlm_master_request *) msg->buf; 1436 struct dlm_master_list_entry *mle = NULL, *tmpmle = NULL; 1437 char *name; 1438 unsigned int namelen, hash; 1439 int found, ret; 1440 int set_maybe; 1441 int dispatch_assert = 0; 1442 int dispatched = 0; 1443 1444 if (!dlm_grab(dlm)) 1445 return DLM_MASTER_RESP_NO; 1446 1447 if (!dlm_domain_fully_joined(dlm)) { 1448 response = DLM_MASTER_RESP_NO; 1449 goto send_response; 1450 } 1451 1452 name = request->name; 1453 namelen = request->namelen; 1454 hash = dlm_lockid_hash(name, namelen); 1455 1456 if (namelen > DLM_LOCKID_NAME_MAX) { 1457 response = DLM_IVBUFLEN; 1458 goto send_response; 1459 } 1460 1461 way_up_top: 1462 spin_lock(&dlm->spinlock); 1463 res = __dlm_lookup_lockres(dlm, name, namelen, hash); 1464 if (res) { 1465 spin_unlock(&dlm->spinlock); 1466 1467 /* take care of the easy cases up front */ 1468 spin_lock(&res->spinlock); 1469 1470 /* 1471 * Right after dlm spinlock was released, dlm_thread could have 1472 * purged the lockres. Check if lockres got unhashed. If so 1473 * start over. 1474 */ 1475 if (hlist_unhashed(&res->hash_node)) { 1476 spin_unlock(&res->spinlock); 1477 dlm_lockres_put(res); 1478 goto way_up_top; 1479 } 1480 1481 if (res->state & (DLM_LOCK_RES_RECOVERING| 1482 DLM_LOCK_RES_MIGRATING)) { 1483 spin_unlock(&res->spinlock); 1484 mlog(0, "returning DLM_MASTER_RESP_ERROR since res is " 1485 "being recovered/migrated\n"); 1486 response = DLM_MASTER_RESP_ERROR; 1487 if (mle) 1488 kmem_cache_free(dlm_mle_cache, mle); 1489 goto send_response; 1490 } 1491 1492 if (res->owner == dlm->node_num) { 1493 dlm_lockres_set_refmap_bit(dlm, res, request->node_idx); 1494 spin_unlock(&res->spinlock); 1495 response = DLM_MASTER_RESP_YES; 1496 if (mle) 1497 kmem_cache_free(dlm_mle_cache, mle); 1498 1499 /* this node is the owner. 1500 * there is some extra work that needs to 1501 * happen now. the requesting node has 1502 * caused all nodes up to this one to 1503 * create mles. this node now needs to 1504 * go back and clean those up. */ 1505 dispatch_assert = 1; 1506 goto send_response; 1507 } else if (res->owner != DLM_LOCK_RES_OWNER_UNKNOWN) { 1508 spin_unlock(&res->spinlock); 1509 // mlog(0, "node %u is the master\n", res->owner); 1510 response = DLM_MASTER_RESP_NO; 1511 if (mle) 1512 kmem_cache_free(dlm_mle_cache, mle); 1513 goto send_response; 1514 } 1515 1516 /* ok, there is no owner. either this node is 1517 * being blocked, or it is actively trying to 1518 * master this lock. */ 1519 if (!(res->state & DLM_LOCK_RES_IN_PROGRESS)) { 1520 mlog(ML_ERROR, "lock with no owner should be " 1521 "in-progress!\n"); 1522 BUG(); 1523 } 1524 1525 // mlog(0, "lockres is in progress...\n"); 1526 spin_lock(&dlm->master_lock); 1527 found = dlm_find_mle(dlm, &tmpmle, name, namelen); 1528 if (!found) { 1529 mlog(ML_ERROR, "no mle found for this lock!\n"); 1530 BUG(); 1531 } 1532 set_maybe = 1; 1533 spin_lock(&tmpmle->spinlock); 1534 if (tmpmle->type == DLM_MLE_BLOCK) { 1535 // mlog(0, "this node is waiting for " 1536 // "lockres to be mastered\n"); 1537 response = DLM_MASTER_RESP_NO; 1538 } else if (tmpmle->type == DLM_MLE_MIGRATION) { 1539 mlog(0, "node %u is master, but trying to migrate to " 1540 "node %u.\n", tmpmle->master, tmpmle->new_master); 1541 if (tmpmle->master == dlm->node_num) { 1542 mlog(ML_ERROR, "no owner on lockres, but this " 1543 "node is trying to migrate it to %u?!\n", 1544 tmpmle->new_master); 1545 BUG(); 1546 } else { 1547 /* the real master can respond on its own */ 1548 response = DLM_MASTER_RESP_NO; 1549 } 1550 } else if (tmpmle->master != DLM_LOCK_RES_OWNER_UNKNOWN) { 1551 set_maybe = 0; 1552 if (tmpmle->master == dlm->node_num) { 1553 response = DLM_MASTER_RESP_YES; 1554 /* this node will be the owner. 1555 * go back and clean the mles on any 1556 * other nodes */ 1557 dispatch_assert = 1; 1558 dlm_lockres_set_refmap_bit(dlm, res, 1559 request->node_idx); 1560 } else 1561 response = DLM_MASTER_RESP_NO; 1562 } else { 1563 // mlog(0, "this node is attempting to " 1564 // "master lockres\n"); 1565 response = DLM_MASTER_RESP_MAYBE; 1566 } 1567 if (set_maybe) 1568 set_bit(request->node_idx, tmpmle->maybe_map); 1569 spin_unlock(&tmpmle->spinlock); 1570 1571 spin_unlock(&dlm->master_lock); 1572 spin_unlock(&res->spinlock); 1573 1574 /* keep the mle attached to heartbeat events */ 1575 dlm_put_mle(tmpmle); 1576 if (mle) 1577 kmem_cache_free(dlm_mle_cache, mle); 1578 goto send_response; 1579 } 1580 1581 /* 1582 * lockres doesn't exist on this node 1583 * if there is an MLE_BLOCK, return NO 1584 * if there is an MLE_MASTER, return MAYBE 1585 * otherwise, add an MLE_BLOCK, return NO 1586 */ 1587 spin_lock(&dlm->master_lock); 1588 found = dlm_find_mle(dlm, &tmpmle, name, namelen); 1589 if (!found) { 1590 /* this lockid has never been seen on this node yet */ 1591 // mlog(0, "no mle found\n"); 1592 if (!mle) { 1593 spin_unlock(&dlm->master_lock); 1594 spin_unlock(&dlm->spinlock); 1595 1596 mle = kmem_cache_alloc(dlm_mle_cache, GFP_NOFS); 1597 if (!mle) { 1598 response = DLM_MASTER_RESP_ERROR; 1599 mlog_errno(-ENOMEM); 1600 goto send_response; 1601 } 1602 goto way_up_top; 1603 } 1604 1605 // mlog(0, "this is second time thru, already allocated, " 1606 // "add the block.\n"); 1607 dlm_init_mle(mle, DLM_MLE_BLOCK, dlm, NULL, name, namelen); 1608 set_bit(request->node_idx, mle->maybe_map); 1609 __dlm_insert_mle(dlm, mle); 1610 response = DLM_MASTER_RESP_NO; 1611 } else { 1612 // mlog(0, "mle was found\n"); 1613 set_maybe = 1; 1614 spin_lock(&tmpmle->spinlock); 1615 if (tmpmle->master == dlm->node_num) { 1616 mlog(ML_ERROR, "no lockres, but an mle with this node as master!\n"); 1617 BUG(); 1618 } 1619 if (tmpmle->type == DLM_MLE_BLOCK) 1620 response = DLM_MASTER_RESP_NO; 1621 else if (tmpmle->type == DLM_MLE_MIGRATION) { 1622 mlog(0, "migration mle was found (%u->%u)\n", 1623 tmpmle->master, tmpmle->new_master); 1624 /* real master can respond on its own */ 1625 response = DLM_MASTER_RESP_NO; 1626 } else 1627 response = DLM_MASTER_RESP_MAYBE; 1628 if (set_maybe) 1629 set_bit(request->node_idx, tmpmle->maybe_map); 1630 spin_unlock(&tmpmle->spinlock); 1631 } 1632 spin_unlock(&dlm->master_lock); 1633 spin_unlock(&dlm->spinlock); 1634 1635 if (found) { 1636 /* keep the mle attached to heartbeat events */ 1637 dlm_put_mle(tmpmle); 1638 } 1639 send_response: 1640 /* 1641 * __dlm_lookup_lockres() grabbed a reference to this lockres. 1642 * The reference is released by dlm_assert_master_worker() under 1643 * the call to dlm_dispatch_assert_master(). If 1644 * dlm_assert_master_worker() isn't called, we drop it here. 1645 */ 1646 if (dispatch_assert) { 1647 if (response != DLM_MASTER_RESP_YES) 1648 mlog(ML_ERROR, "invalid response %d\n", response); 1649 if (!res) { 1650 mlog(ML_ERROR, "bad lockres while trying to assert!\n"); 1651 BUG(); 1652 } 1653 mlog(0, "%u is the owner of %.*s, cleaning everyone else\n", 1654 dlm->node_num, res->lockname.len, res->lockname.name); 1655 spin_lock(&res->spinlock); 1656 ret = dlm_dispatch_assert_master(dlm, res, 0, request->node_idx, 1657 DLM_ASSERT_MASTER_MLE_CLEANUP); 1658 if (ret < 0) { 1659 mlog(ML_ERROR, "failed to dispatch assert master work\n"); 1660 response = DLM_MASTER_RESP_ERROR; 1661 spin_unlock(&res->spinlock); 1662 dlm_lockres_put(res); 1663 } else { 1664 dispatched = 1; 1665 __dlm_lockres_grab_inflight_worker(dlm, res); 1666 spin_unlock(&res->spinlock); 1667 } 1668 } else { 1669 if (res) 1670 dlm_lockres_put(res); 1671 } 1672 1673 if (!dispatched) 1674 dlm_put(dlm); 1675 return response; 1676 } 1677 1678 /* 1679 * DLM_ASSERT_MASTER_MSG 1680 */ 1681 1682 1683 /* 1684 * NOTE: this can be used for debugging 1685 * can periodically run all locks owned by this node 1686 * and re-assert across the cluster... 1687 */ 1688 static int dlm_do_assert_master(struct dlm_ctxt *dlm, 1689 struct dlm_lock_resource *res, 1690 void *nodemap, u32 flags) 1691 { 1692 struct dlm_assert_master assert; 1693 int to, tmpret; 1694 struct dlm_node_iter iter; 1695 int ret = 0; 1696 int reassert; 1697 const char *lockname = res->lockname.name; 1698 unsigned int namelen = res->lockname.len; 1699 1700 BUG_ON(namelen > O2NM_MAX_NAME_LEN); 1701 1702 spin_lock(&res->spinlock); 1703 res->state |= DLM_LOCK_RES_SETREF_INPROG; 1704 spin_unlock(&res->spinlock); 1705 1706 again: 1707 reassert = 0; 1708 1709 /* note that if this nodemap is empty, it returns 0 */ 1710 dlm_node_iter_init(nodemap, &iter); 1711 while ((to = dlm_node_iter_next(&iter)) >= 0) { 1712 int r = 0; 1713 struct dlm_master_list_entry *mle = NULL; 1714 1715 mlog(0, "sending assert master to %d (%.*s)\n", to, 1716 namelen, lockname); 1717 memset(&assert, 0, sizeof(assert)); 1718 assert.node_idx = dlm->node_num; 1719 assert.namelen = namelen; 1720 memcpy(assert.name, lockname, namelen); 1721 assert.flags = cpu_to_be32(flags); 1722 1723 tmpret = o2net_send_message(DLM_ASSERT_MASTER_MSG, dlm->key, 1724 &assert, sizeof(assert), to, &r); 1725 if (tmpret < 0) { 1726 mlog(ML_ERROR, "Error %d when sending message %u (key " 1727 "0x%x) to node %u\n", tmpret, 1728 DLM_ASSERT_MASTER_MSG, dlm->key, to); 1729 if (!dlm_is_host_down(tmpret)) { 1730 mlog(ML_ERROR, "unhandled error=%d!\n", tmpret); 1731 BUG(); 1732 } 1733 /* a node died. finish out the rest of the nodes. */ 1734 mlog(0, "link to %d went down!\n", to); 1735 /* any nonzero status return will do */ 1736 ret = tmpret; 1737 r = 0; 1738 } else if (r < 0) { 1739 /* ok, something horribly messed. kill thyself. */ 1740 mlog(ML_ERROR,"during assert master of %.*s to %u, " 1741 "got %d.\n", namelen, lockname, to, r); 1742 spin_lock(&dlm->spinlock); 1743 spin_lock(&dlm->master_lock); 1744 if (dlm_find_mle(dlm, &mle, (char *)lockname, 1745 namelen)) { 1746 dlm_print_one_mle(mle); 1747 __dlm_put_mle(mle); 1748 } 1749 spin_unlock(&dlm->master_lock); 1750 spin_unlock(&dlm->spinlock); 1751 BUG(); 1752 } 1753 1754 if (r & DLM_ASSERT_RESPONSE_REASSERT && 1755 !(r & DLM_ASSERT_RESPONSE_MASTERY_REF)) { 1756 mlog(ML_ERROR, "%.*s: very strange, " 1757 "master MLE but no lockres on %u\n", 1758 namelen, lockname, to); 1759 } 1760 1761 if (r & DLM_ASSERT_RESPONSE_REASSERT) { 1762 mlog(0, "%.*s: node %u create mles on other " 1763 "nodes and requests a re-assert\n", 1764 namelen, lockname, to); 1765 reassert = 1; 1766 } 1767 if (r & DLM_ASSERT_RESPONSE_MASTERY_REF) { 1768 mlog(0, "%.*s: node %u has a reference to this " 1769 "lockres, set the bit in the refmap\n", 1770 namelen, lockname, to); 1771 spin_lock(&res->spinlock); 1772 dlm_lockres_set_refmap_bit(dlm, res, to); 1773 spin_unlock(&res->spinlock); 1774 } 1775 } 1776 1777 if (reassert) 1778 goto again; 1779 1780 spin_lock(&res->spinlock); 1781 res->state &= ~DLM_LOCK_RES_SETREF_INPROG; 1782 spin_unlock(&res->spinlock); 1783 wake_up(&res->wq); 1784 1785 return ret; 1786 } 1787 1788 /* 1789 * locks that can be taken here: 1790 * dlm->spinlock 1791 * res->spinlock 1792 * mle->spinlock 1793 * dlm->master_list 1794 * 1795 * if possible, TRIM THIS DOWN!!! 1796 */ 1797 int dlm_assert_master_handler(struct o2net_msg *msg, u32 len, void *data, 1798 void **ret_data) 1799 { 1800 struct dlm_ctxt *dlm = data; 1801 struct dlm_master_list_entry *mle = NULL; 1802 struct dlm_assert_master *assert = (struct dlm_assert_master *)msg->buf; 1803 struct dlm_lock_resource *res = NULL; 1804 char *name; 1805 unsigned int namelen, hash; 1806 u32 flags; 1807 int master_request = 0, have_lockres_ref = 0; 1808 int ret = 0; 1809 1810 if (!dlm_grab(dlm)) 1811 return 0; 1812 1813 name = assert->name; 1814 namelen = assert->namelen; 1815 hash = dlm_lockid_hash(name, namelen); 1816 flags = be32_to_cpu(assert->flags); 1817 1818 if (namelen > DLM_LOCKID_NAME_MAX) { 1819 mlog(ML_ERROR, "Invalid name length!"); 1820 goto done; 1821 } 1822 1823 spin_lock(&dlm->spinlock); 1824 1825 if (flags) 1826 mlog(0, "assert_master with flags: %u\n", flags); 1827 1828 /* find the MLE */ 1829 spin_lock(&dlm->master_lock); 1830 if (!dlm_find_mle(dlm, &mle, name, namelen)) { 1831 /* not an error, could be master just re-asserting */ 1832 mlog(0, "just got an assert_master from %u, but no " 1833 "MLE for it! (%.*s)\n", assert->node_idx, 1834 namelen, name); 1835 } else { 1836 int bit = find_next_bit (mle->maybe_map, O2NM_MAX_NODES, 0); 1837 if (bit >= O2NM_MAX_NODES) { 1838 /* not necessarily an error, though less likely. 1839 * could be master just re-asserting. */ 1840 mlog(0, "no bits set in the maybe_map, but %u " 1841 "is asserting! (%.*s)\n", assert->node_idx, 1842 namelen, name); 1843 } else if (bit != assert->node_idx) { 1844 if (flags & DLM_ASSERT_MASTER_MLE_CLEANUP) { 1845 mlog(0, "master %u was found, %u should " 1846 "back off\n", assert->node_idx, bit); 1847 } else { 1848 /* with the fix for bug 569, a higher node 1849 * number winning the mastery will respond 1850 * YES to mastery requests, but this node 1851 * had no way of knowing. let it pass. */ 1852 mlog(0, "%u is the lowest node, " 1853 "%u is asserting. (%.*s) %u must " 1854 "have begun after %u won.\n", bit, 1855 assert->node_idx, namelen, name, bit, 1856 assert->node_idx); 1857 } 1858 } 1859 if (mle->type == DLM_MLE_MIGRATION) { 1860 if (flags & DLM_ASSERT_MASTER_MLE_CLEANUP) { 1861 mlog(0, "%s:%.*s: got cleanup assert" 1862 " from %u for migration\n", 1863 dlm->name, namelen, name, 1864 assert->node_idx); 1865 } else if (!(flags & DLM_ASSERT_MASTER_FINISH_MIGRATION)) { 1866 mlog(0, "%s:%.*s: got unrelated assert" 1867 " from %u for migration, ignoring\n", 1868 dlm->name, namelen, name, 1869 assert->node_idx); 1870 __dlm_put_mle(mle); 1871 spin_unlock(&dlm->master_lock); 1872 spin_unlock(&dlm->spinlock); 1873 goto done; 1874 } 1875 } 1876 } 1877 spin_unlock(&dlm->master_lock); 1878 1879 /* ok everything checks out with the MLE 1880 * now check to see if there is a lockres */ 1881 res = __dlm_lookup_lockres(dlm, name, namelen, hash); 1882 if (res) { 1883 spin_lock(&res->spinlock); 1884 if (res->state & DLM_LOCK_RES_RECOVERING) { 1885 mlog(ML_ERROR, "%u asserting but %.*s is " 1886 "RECOVERING!\n", assert->node_idx, namelen, name); 1887 goto kill; 1888 } 1889 if (!mle) { 1890 if (res->owner != DLM_LOCK_RES_OWNER_UNKNOWN && 1891 res->owner != assert->node_idx) { 1892 mlog(ML_ERROR, "DIE! Mastery assert from %u, " 1893 "but current owner is %u! (%.*s)\n", 1894 assert->node_idx, res->owner, namelen, 1895 name); 1896 __dlm_print_one_lock_resource(res); 1897 BUG(); 1898 } 1899 } else if (mle->type != DLM_MLE_MIGRATION) { 1900 if (res->owner != DLM_LOCK_RES_OWNER_UNKNOWN) { 1901 /* owner is just re-asserting */ 1902 if (res->owner == assert->node_idx) { 1903 mlog(0, "owner %u re-asserting on " 1904 "lock %.*s\n", assert->node_idx, 1905 namelen, name); 1906 goto ok; 1907 } 1908 mlog(ML_ERROR, "got assert_master from " 1909 "node %u, but %u is the owner! " 1910 "(%.*s)\n", assert->node_idx, 1911 res->owner, namelen, name); 1912 goto kill; 1913 } 1914 if (!(res->state & DLM_LOCK_RES_IN_PROGRESS)) { 1915 mlog(ML_ERROR, "got assert from %u, but lock " 1916 "with no owner should be " 1917 "in-progress! (%.*s)\n", 1918 assert->node_idx, 1919 namelen, name); 1920 goto kill; 1921 } 1922 } else /* mle->type == DLM_MLE_MIGRATION */ { 1923 /* should only be getting an assert from new master */ 1924 if (assert->node_idx != mle->new_master) { 1925 mlog(ML_ERROR, "got assert from %u, but " 1926 "new master is %u, and old master " 1927 "was %u (%.*s)\n", 1928 assert->node_idx, mle->new_master, 1929 mle->master, namelen, name); 1930 goto kill; 1931 } 1932 1933 } 1934 ok: 1935 spin_unlock(&res->spinlock); 1936 } 1937 1938 // mlog(0, "woo! got an assert_master from node %u!\n", 1939 // assert->node_idx); 1940 if (mle) { 1941 int extra_ref = 0; 1942 int nn = -1; 1943 int rr, err = 0; 1944 1945 spin_lock(&mle->spinlock); 1946 if (mle->type == DLM_MLE_BLOCK || mle->type == DLM_MLE_MIGRATION) 1947 extra_ref = 1; 1948 else { 1949 /* MASTER mle: if any bits set in the response map 1950 * then the calling node needs to re-assert to clear 1951 * up nodes that this node contacted */ 1952 while ((nn = find_next_bit (mle->response_map, O2NM_MAX_NODES, 1953 nn+1)) < O2NM_MAX_NODES) { 1954 if (nn != dlm->node_num && nn != assert->node_idx) { 1955 master_request = 1; 1956 break; 1957 } 1958 } 1959 } 1960 mle->master = assert->node_idx; 1961 atomic_set(&mle->woken, 1); 1962 wake_up(&mle->wq); 1963 spin_unlock(&mle->spinlock); 1964 1965 if (res) { 1966 int wake = 0; 1967 spin_lock(&res->spinlock); 1968 if (mle->type == DLM_MLE_MIGRATION) { 1969 mlog(0, "finishing off migration of lockres %.*s, " 1970 "from %u to %u\n", 1971 res->lockname.len, res->lockname.name, 1972 dlm->node_num, mle->new_master); 1973 res->state &= ~DLM_LOCK_RES_MIGRATING; 1974 wake = 1; 1975 dlm_change_lockres_owner(dlm, res, mle->new_master); 1976 BUG_ON(res->state & DLM_LOCK_RES_DIRTY); 1977 } else { 1978 dlm_change_lockres_owner(dlm, res, mle->master); 1979 } 1980 spin_unlock(&res->spinlock); 1981 have_lockres_ref = 1; 1982 if (wake) 1983 wake_up(&res->wq); 1984 } 1985 1986 /* master is known, detach if not already detached. 1987 * ensures that only one assert_master call will happen 1988 * on this mle. */ 1989 spin_lock(&dlm->master_lock); 1990 1991 rr = atomic_read(&mle->mle_refs.refcount); 1992 if (mle->inuse > 0) { 1993 if (extra_ref && rr < 3) 1994 err = 1; 1995 else if (!extra_ref && rr < 2) 1996 err = 1; 1997 } else { 1998 if (extra_ref && rr < 2) 1999 err = 1; 2000 else if (!extra_ref && rr < 1) 2001 err = 1; 2002 } 2003 if (err) { 2004 mlog(ML_ERROR, "%s:%.*s: got assert master from %u " 2005 "that will mess up this node, refs=%d, extra=%d, " 2006 "inuse=%d\n", dlm->name, namelen, name, 2007 assert->node_idx, rr, extra_ref, mle->inuse); 2008 dlm_print_one_mle(mle); 2009 } 2010 __dlm_unlink_mle(dlm, mle); 2011 __dlm_mle_detach_hb_events(dlm, mle); 2012 __dlm_put_mle(mle); 2013 if (extra_ref) { 2014 /* the assert master message now balances the extra 2015 * ref given by the master / migration request message. 2016 * if this is the last put, it will be removed 2017 * from the list. */ 2018 __dlm_put_mle(mle); 2019 } 2020 spin_unlock(&dlm->master_lock); 2021 } else if (res) { 2022 if (res->owner != assert->node_idx) { 2023 mlog(0, "assert_master from %u, but current " 2024 "owner is %u (%.*s), no mle\n", assert->node_idx, 2025 res->owner, namelen, name); 2026 } 2027 } 2028 spin_unlock(&dlm->spinlock); 2029 2030 done: 2031 ret = 0; 2032 if (res) { 2033 spin_lock(&res->spinlock); 2034 res->state |= DLM_LOCK_RES_SETREF_INPROG; 2035 spin_unlock(&res->spinlock); 2036 *ret_data = (void *)res; 2037 } 2038 dlm_put(dlm); 2039 if (master_request) { 2040 mlog(0, "need to tell master to reassert\n"); 2041 /* positive. negative would shoot down the node. */ 2042 ret |= DLM_ASSERT_RESPONSE_REASSERT; 2043 if (!have_lockres_ref) { 2044 mlog(ML_ERROR, "strange, got assert from %u, MASTER " 2045 "mle present here for %s:%.*s, but no lockres!\n", 2046 assert->node_idx, dlm->name, namelen, name); 2047 } 2048 } 2049 if (have_lockres_ref) { 2050 /* let the master know we have a reference to the lockres */ 2051 ret |= DLM_ASSERT_RESPONSE_MASTERY_REF; 2052 mlog(0, "%s:%.*s: got assert from %u, need a ref\n", 2053 dlm->name, namelen, name, assert->node_idx); 2054 } 2055 return ret; 2056 2057 kill: 2058 /* kill the caller! */ 2059 mlog(ML_ERROR, "Bad message received from another node. Dumping state " 2060 "and killing the other node now! This node is OK and can continue.\n"); 2061 __dlm_print_one_lock_resource(res); 2062 spin_unlock(&res->spinlock); 2063 spin_lock(&dlm->master_lock); 2064 if (mle) 2065 __dlm_put_mle(mle); 2066 spin_unlock(&dlm->master_lock); 2067 spin_unlock(&dlm->spinlock); 2068 *ret_data = (void *)res; 2069 dlm_put(dlm); 2070 return -EINVAL; 2071 } 2072 2073 void dlm_assert_master_post_handler(int status, void *data, void *ret_data) 2074 { 2075 struct dlm_lock_resource *res = (struct dlm_lock_resource *)ret_data; 2076 2077 if (ret_data) { 2078 spin_lock(&res->spinlock); 2079 res->state &= ~DLM_LOCK_RES_SETREF_INPROG; 2080 spin_unlock(&res->spinlock); 2081 wake_up(&res->wq); 2082 dlm_lockres_put(res); 2083 } 2084 return; 2085 } 2086 2087 int dlm_dispatch_assert_master(struct dlm_ctxt *dlm, 2088 struct dlm_lock_resource *res, 2089 int ignore_higher, u8 request_from, u32 flags) 2090 { 2091 struct dlm_work_item *item; 2092 item = kzalloc(sizeof(*item), GFP_ATOMIC); 2093 if (!item) 2094 return -ENOMEM; 2095 2096 2097 /* queue up work for dlm_assert_master_worker */ 2098 dlm_init_work_item(dlm, item, dlm_assert_master_worker, NULL); 2099 item->u.am.lockres = res; /* already have a ref */ 2100 /* can optionally ignore node numbers higher than this node */ 2101 item->u.am.ignore_higher = ignore_higher; 2102 item->u.am.request_from = request_from; 2103 item->u.am.flags = flags; 2104 2105 if (ignore_higher) 2106 mlog(0, "IGNORE HIGHER: %.*s\n", res->lockname.len, 2107 res->lockname.name); 2108 2109 spin_lock(&dlm->work_lock); 2110 list_add_tail(&item->list, &dlm->work_list); 2111 spin_unlock(&dlm->work_lock); 2112 2113 queue_work(dlm->dlm_worker, &dlm->dispatched_work); 2114 return 0; 2115 } 2116 2117 static void dlm_assert_master_worker(struct dlm_work_item *item, void *data) 2118 { 2119 struct dlm_ctxt *dlm = data; 2120 int ret = 0; 2121 struct dlm_lock_resource *res; 2122 unsigned long nodemap[BITS_TO_LONGS(O2NM_MAX_NODES)]; 2123 int ignore_higher; 2124 int bit; 2125 u8 request_from; 2126 u32 flags; 2127 2128 dlm = item->dlm; 2129 res = item->u.am.lockres; 2130 ignore_higher = item->u.am.ignore_higher; 2131 request_from = item->u.am.request_from; 2132 flags = item->u.am.flags; 2133 2134 spin_lock(&dlm->spinlock); 2135 memcpy(nodemap, dlm->domain_map, sizeof(nodemap)); 2136 spin_unlock(&dlm->spinlock); 2137 2138 clear_bit(dlm->node_num, nodemap); 2139 if (ignore_higher) { 2140 /* if is this just to clear up mles for nodes below 2141 * this node, do not send the message to the original 2142 * caller or any node number higher than this */ 2143 clear_bit(request_from, nodemap); 2144 bit = dlm->node_num; 2145 while (1) { 2146 bit = find_next_bit(nodemap, O2NM_MAX_NODES, 2147 bit+1); 2148 if (bit >= O2NM_MAX_NODES) 2149 break; 2150 clear_bit(bit, nodemap); 2151 } 2152 } 2153 2154 /* 2155 * If we're migrating this lock to someone else, we are no 2156 * longer allowed to assert out own mastery. OTOH, we need to 2157 * prevent migration from starting while we're still asserting 2158 * our dominance. The reserved ast delays migration. 2159 */ 2160 spin_lock(&res->spinlock); 2161 if (res->state & DLM_LOCK_RES_MIGRATING) { 2162 mlog(0, "Someone asked us to assert mastery, but we're " 2163 "in the middle of migration. Skipping assert, " 2164 "the new master will handle that.\n"); 2165 spin_unlock(&res->spinlock); 2166 goto put; 2167 } else 2168 __dlm_lockres_reserve_ast(res); 2169 spin_unlock(&res->spinlock); 2170 2171 /* this call now finishes out the nodemap 2172 * even if one or more nodes die */ 2173 mlog(0, "worker about to master %.*s here, this=%u\n", 2174 res->lockname.len, res->lockname.name, dlm->node_num); 2175 ret = dlm_do_assert_master(dlm, res, nodemap, flags); 2176 if (ret < 0) { 2177 /* no need to restart, we are done */ 2178 if (!dlm_is_host_down(ret)) 2179 mlog_errno(ret); 2180 } 2181 2182 /* Ok, we've asserted ourselves. Let's let migration start. */ 2183 dlm_lockres_release_ast(dlm, res); 2184 2185 put: 2186 dlm_lockres_drop_inflight_worker(dlm, res); 2187 2188 dlm_lockres_put(res); 2189 2190 mlog(0, "finished with dlm_assert_master_worker\n"); 2191 } 2192 2193 /* SPECIAL CASE for the $RECOVERY lock used by the recovery thread. 2194 * We cannot wait for node recovery to complete to begin mastering this 2195 * lockres because this lockres is used to kick off recovery! ;-) 2196 * So, do a pre-check on all living nodes to see if any of those nodes 2197 * think that $RECOVERY is currently mastered by a dead node. If so, 2198 * we wait a short time to allow that node to get notified by its own 2199 * heartbeat stack, then check again. All $RECOVERY lock resources 2200 * mastered by dead nodes are purged when the hearbeat callback is 2201 * fired, so we can know for sure that it is safe to continue once 2202 * the node returns a live node or no node. */ 2203 static int dlm_pre_master_reco_lockres(struct dlm_ctxt *dlm, 2204 struct dlm_lock_resource *res) 2205 { 2206 struct dlm_node_iter iter; 2207 int nodenum; 2208 int ret = 0; 2209 u8 master = DLM_LOCK_RES_OWNER_UNKNOWN; 2210 2211 spin_lock(&dlm->spinlock); 2212 dlm_node_iter_init(dlm->domain_map, &iter); 2213 spin_unlock(&dlm->spinlock); 2214 2215 while ((nodenum = dlm_node_iter_next(&iter)) >= 0) { 2216 /* do not send to self */ 2217 if (nodenum == dlm->node_num) 2218 continue; 2219 ret = dlm_do_master_requery(dlm, res, nodenum, &master); 2220 if (ret < 0) { 2221 mlog_errno(ret); 2222 if (!dlm_is_host_down(ret)) 2223 BUG(); 2224 /* host is down, so answer for that node would be 2225 * DLM_LOCK_RES_OWNER_UNKNOWN. continue. */ 2226 ret = 0; 2227 } 2228 2229 if (master != DLM_LOCK_RES_OWNER_UNKNOWN) { 2230 /* check to see if this master is in the recovery map */ 2231 spin_lock(&dlm->spinlock); 2232 if (test_bit(master, dlm->recovery_map)) { 2233 mlog(ML_NOTICE, "%s: node %u has not seen " 2234 "node %u go down yet, and thinks the " 2235 "dead node is mastering the recovery " 2236 "lock. must wait.\n", dlm->name, 2237 nodenum, master); 2238 ret = -EAGAIN; 2239 } 2240 spin_unlock(&dlm->spinlock); 2241 mlog(0, "%s: reco lock master is %u\n", dlm->name, 2242 master); 2243 break; 2244 } 2245 } 2246 return ret; 2247 } 2248 2249 /* 2250 * DLM_DEREF_LOCKRES_MSG 2251 */ 2252 2253 int dlm_drop_lockres_ref(struct dlm_ctxt *dlm, struct dlm_lock_resource *res) 2254 { 2255 struct dlm_deref_lockres deref; 2256 int ret = 0, r; 2257 const char *lockname; 2258 unsigned int namelen; 2259 2260 lockname = res->lockname.name; 2261 namelen = res->lockname.len; 2262 BUG_ON(namelen > O2NM_MAX_NAME_LEN); 2263 2264 memset(&deref, 0, sizeof(deref)); 2265 deref.node_idx = dlm->node_num; 2266 deref.namelen = namelen; 2267 memcpy(deref.name, lockname, namelen); 2268 2269 ret = o2net_send_message(DLM_DEREF_LOCKRES_MSG, dlm->key, 2270 &deref, sizeof(deref), res->owner, &r); 2271 if (ret < 0) 2272 mlog(ML_ERROR, "%s: res %.*s, error %d send DEREF to node %u\n", 2273 dlm->name, namelen, lockname, ret, res->owner); 2274 else if (r < 0) { 2275 /* BAD. other node says I did not have a ref. */ 2276 mlog(ML_ERROR, "%s: res %.*s, DEREF to node %u got %d\n", 2277 dlm->name, namelen, lockname, res->owner, r); 2278 dlm_print_one_lock_resource(res); 2279 BUG(); 2280 } 2281 return ret; 2282 } 2283 2284 int dlm_deref_lockres_handler(struct o2net_msg *msg, u32 len, void *data, 2285 void **ret_data) 2286 { 2287 struct dlm_ctxt *dlm = data; 2288 struct dlm_deref_lockres *deref = (struct dlm_deref_lockres *)msg->buf; 2289 struct dlm_lock_resource *res = NULL; 2290 char *name; 2291 unsigned int namelen; 2292 int ret = -EINVAL; 2293 u8 node; 2294 unsigned int hash; 2295 struct dlm_work_item *item; 2296 int cleared = 0; 2297 int dispatch = 0; 2298 2299 if (!dlm_grab(dlm)) 2300 return 0; 2301 2302 name = deref->name; 2303 namelen = deref->namelen; 2304 node = deref->node_idx; 2305 2306 if (namelen > DLM_LOCKID_NAME_MAX) { 2307 mlog(ML_ERROR, "Invalid name length!"); 2308 goto done; 2309 } 2310 if (deref->node_idx >= O2NM_MAX_NODES) { 2311 mlog(ML_ERROR, "Invalid node number: %u\n", node); 2312 goto done; 2313 } 2314 2315 hash = dlm_lockid_hash(name, namelen); 2316 2317 spin_lock(&dlm->spinlock); 2318 res = __dlm_lookup_lockres_full(dlm, name, namelen, hash); 2319 if (!res) { 2320 spin_unlock(&dlm->spinlock); 2321 mlog(ML_ERROR, "%s:%.*s: bad lockres name\n", 2322 dlm->name, namelen, name); 2323 goto done; 2324 } 2325 spin_unlock(&dlm->spinlock); 2326 2327 spin_lock(&res->spinlock); 2328 if (res->state & DLM_LOCK_RES_SETREF_INPROG) 2329 dispatch = 1; 2330 else { 2331 BUG_ON(res->state & DLM_LOCK_RES_DROPPING_REF); 2332 if (test_bit(node, res->refmap)) { 2333 dlm_lockres_clear_refmap_bit(dlm, res, node); 2334 cleared = 1; 2335 } 2336 } 2337 spin_unlock(&res->spinlock); 2338 2339 if (!dispatch) { 2340 if (cleared) 2341 dlm_lockres_calc_usage(dlm, res); 2342 else { 2343 mlog(ML_ERROR, "%s:%.*s: node %u trying to drop ref " 2344 "but it is already dropped!\n", dlm->name, 2345 res->lockname.len, res->lockname.name, node); 2346 dlm_print_one_lock_resource(res); 2347 } 2348 ret = 0; 2349 goto done; 2350 } 2351 2352 item = kzalloc(sizeof(*item), GFP_NOFS); 2353 if (!item) { 2354 ret = -ENOMEM; 2355 mlog_errno(ret); 2356 goto done; 2357 } 2358 2359 dlm_init_work_item(dlm, item, dlm_deref_lockres_worker, NULL); 2360 item->u.dl.deref_res = res; 2361 item->u.dl.deref_node = node; 2362 2363 spin_lock(&dlm->work_lock); 2364 list_add_tail(&item->list, &dlm->work_list); 2365 spin_unlock(&dlm->work_lock); 2366 2367 queue_work(dlm->dlm_worker, &dlm->dispatched_work); 2368 return 0; 2369 2370 done: 2371 if (res) 2372 dlm_lockres_put(res); 2373 dlm_put(dlm); 2374 2375 return ret; 2376 } 2377 2378 static void dlm_deref_lockres_worker(struct dlm_work_item *item, void *data) 2379 { 2380 struct dlm_ctxt *dlm; 2381 struct dlm_lock_resource *res; 2382 u8 node; 2383 u8 cleared = 0; 2384 2385 dlm = item->dlm; 2386 res = item->u.dl.deref_res; 2387 node = item->u.dl.deref_node; 2388 2389 spin_lock(&res->spinlock); 2390 BUG_ON(res->state & DLM_LOCK_RES_DROPPING_REF); 2391 if (test_bit(node, res->refmap)) { 2392 __dlm_wait_on_lockres_flags(res, DLM_LOCK_RES_SETREF_INPROG); 2393 dlm_lockres_clear_refmap_bit(dlm, res, node); 2394 cleared = 1; 2395 } 2396 spin_unlock(&res->spinlock); 2397 2398 if (cleared) { 2399 mlog(0, "%s:%.*s node %u ref dropped in dispatch\n", 2400 dlm->name, res->lockname.len, res->lockname.name, node); 2401 dlm_lockres_calc_usage(dlm, res); 2402 } else { 2403 mlog(ML_ERROR, "%s:%.*s: node %u trying to drop ref " 2404 "but it is already dropped!\n", dlm->name, 2405 res->lockname.len, res->lockname.name, node); 2406 dlm_print_one_lock_resource(res); 2407 } 2408 2409 dlm_lockres_put(res); 2410 } 2411 2412 /* 2413 * A migrateable resource is one that is : 2414 * 1. locally mastered, and, 2415 * 2. zero local locks, and, 2416 * 3. one or more non-local locks, or, one or more references 2417 * Returns 1 if yes, 0 if not. 2418 */ 2419 static int dlm_is_lockres_migrateable(struct dlm_ctxt *dlm, 2420 struct dlm_lock_resource *res) 2421 { 2422 enum dlm_lockres_list idx; 2423 int nonlocal = 0, node_ref; 2424 struct list_head *queue; 2425 struct dlm_lock *lock; 2426 u64 cookie; 2427 2428 assert_spin_locked(&res->spinlock); 2429 2430 /* delay migration when the lockres is in MIGRATING state */ 2431 if (res->state & DLM_LOCK_RES_MIGRATING) 2432 return 0; 2433 2434 /* delay migration when the lockres is in RECOCERING state */ 2435 if (res->state & DLM_LOCK_RES_RECOVERING) 2436 return 0; 2437 2438 if (res->owner != dlm->node_num) 2439 return 0; 2440 2441 for (idx = DLM_GRANTED_LIST; idx <= DLM_BLOCKED_LIST; idx++) { 2442 queue = dlm_list_idx_to_ptr(res, idx); 2443 list_for_each_entry(lock, queue, list) { 2444 if (lock->ml.node != dlm->node_num) { 2445 nonlocal++; 2446 continue; 2447 } 2448 cookie = be64_to_cpu(lock->ml.cookie); 2449 mlog(0, "%s: Not migrateable res %.*s, lock %u:%llu on " 2450 "%s list\n", dlm->name, res->lockname.len, 2451 res->lockname.name, 2452 dlm_get_lock_cookie_node(cookie), 2453 dlm_get_lock_cookie_seq(cookie), 2454 dlm_list_in_text(idx)); 2455 return 0; 2456 } 2457 } 2458 2459 if (!nonlocal) { 2460 node_ref = find_next_bit(res->refmap, O2NM_MAX_NODES, 0); 2461 if (node_ref >= O2NM_MAX_NODES) 2462 return 0; 2463 } 2464 2465 mlog(0, "%s: res %.*s, Migrateable\n", dlm->name, res->lockname.len, 2466 res->lockname.name); 2467 2468 return 1; 2469 } 2470 2471 /* 2472 * DLM_MIGRATE_LOCKRES 2473 */ 2474 2475 2476 static int dlm_migrate_lockres(struct dlm_ctxt *dlm, 2477 struct dlm_lock_resource *res, u8 target) 2478 { 2479 struct dlm_master_list_entry *mle = NULL; 2480 struct dlm_master_list_entry *oldmle = NULL; 2481 struct dlm_migratable_lockres *mres = NULL; 2482 int ret = 0; 2483 const char *name; 2484 unsigned int namelen; 2485 int mle_added = 0; 2486 int wake = 0; 2487 2488 if (!dlm_grab(dlm)) 2489 return -EINVAL; 2490 2491 BUG_ON(target == O2NM_MAX_NODES); 2492 2493 name = res->lockname.name; 2494 namelen = res->lockname.len; 2495 2496 mlog(0, "%s: Migrating %.*s to node %u\n", dlm->name, namelen, name, 2497 target); 2498 2499 /* preallocate up front. if this fails, abort */ 2500 ret = -ENOMEM; 2501 mres = (struct dlm_migratable_lockres *) __get_free_page(GFP_NOFS); 2502 if (!mres) { 2503 mlog_errno(ret); 2504 goto leave; 2505 } 2506 2507 mle = kmem_cache_alloc(dlm_mle_cache, GFP_NOFS); 2508 if (!mle) { 2509 mlog_errno(ret); 2510 goto leave; 2511 } 2512 ret = 0; 2513 2514 /* 2515 * clear any existing master requests and 2516 * add the migration mle to the list 2517 */ 2518 spin_lock(&dlm->spinlock); 2519 spin_lock(&dlm->master_lock); 2520 ret = dlm_add_migration_mle(dlm, res, mle, &oldmle, name, 2521 namelen, target, dlm->node_num); 2522 spin_unlock(&dlm->master_lock); 2523 spin_unlock(&dlm->spinlock); 2524 2525 if (ret == -EEXIST) { 2526 mlog(0, "another process is already migrating it\n"); 2527 goto fail; 2528 } 2529 mle_added = 1; 2530 2531 /* 2532 * set the MIGRATING flag and flush asts 2533 * if we fail after this we need to re-dirty the lockres 2534 */ 2535 if (dlm_mark_lockres_migrating(dlm, res, target) < 0) { 2536 mlog(ML_ERROR, "tried to migrate %.*s to %u, but " 2537 "the target went down.\n", res->lockname.len, 2538 res->lockname.name, target); 2539 spin_lock(&res->spinlock); 2540 res->state &= ~DLM_LOCK_RES_MIGRATING; 2541 wake = 1; 2542 spin_unlock(&res->spinlock); 2543 ret = -EINVAL; 2544 } 2545 2546 fail: 2547 if (oldmle) { 2548 /* master is known, detach if not already detached */ 2549 dlm_mle_detach_hb_events(dlm, oldmle); 2550 dlm_put_mle(oldmle); 2551 } 2552 2553 if (ret < 0) { 2554 if (mle_added) { 2555 dlm_mle_detach_hb_events(dlm, mle); 2556 dlm_put_mle(mle); 2557 } else if (mle) { 2558 kmem_cache_free(dlm_mle_cache, mle); 2559 mle = NULL; 2560 } 2561 goto leave; 2562 } 2563 2564 /* 2565 * at this point, we have a migration target, an mle 2566 * in the master list, and the MIGRATING flag set on 2567 * the lockres 2568 */ 2569 2570 /* now that remote nodes are spinning on the MIGRATING flag, 2571 * ensure that all assert_master work is flushed. */ 2572 flush_workqueue(dlm->dlm_worker); 2573 2574 /* get an extra reference on the mle. 2575 * otherwise the assert_master from the new 2576 * master will destroy this. 2577 * also, make sure that all callers of dlm_get_mle 2578 * take both dlm->spinlock and dlm->master_lock */ 2579 spin_lock(&dlm->spinlock); 2580 spin_lock(&dlm->master_lock); 2581 dlm_get_mle_inuse(mle); 2582 spin_unlock(&dlm->master_lock); 2583 spin_unlock(&dlm->spinlock); 2584 2585 /* notify new node and send all lock state */ 2586 /* call send_one_lockres with migration flag. 2587 * this serves as notice to the target node that a 2588 * migration is starting. */ 2589 ret = dlm_send_one_lockres(dlm, res, mres, target, 2590 DLM_MRES_MIGRATION); 2591 2592 if (ret < 0) { 2593 mlog(0, "migration to node %u failed with %d\n", 2594 target, ret); 2595 /* migration failed, detach and clean up mle */ 2596 dlm_mle_detach_hb_events(dlm, mle); 2597 dlm_put_mle(mle); 2598 dlm_put_mle_inuse(mle); 2599 spin_lock(&res->spinlock); 2600 res->state &= ~DLM_LOCK_RES_MIGRATING; 2601 wake = 1; 2602 spin_unlock(&res->spinlock); 2603 if (dlm_is_host_down(ret)) 2604 dlm_wait_for_node_death(dlm, target, 2605 DLM_NODE_DEATH_WAIT_MAX); 2606 goto leave; 2607 } 2608 2609 /* at this point, the target sends a message to all nodes, 2610 * (using dlm_do_migrate_request). this node is skipped since 2611 * we had to put an mle in the list to begin the process. this 2612 * node now waits for target to do an assert master. this node 2613 * will be the last one notified, ensuring that the migration 2614 * is complete everywhere. if the target dies while this is 2615 * going on, some nodes could potentially see the target as the 2616 * master, so it is important that my recovery finds the migration 2617 * mle and sets the master to UNKNOWN. */ 2618 2619 2620 /* wait for new node to assert master */ 2621 while (1) { 2622 ret = wait_event_interruptible_timeout(mle->wq, 2623 (atomic_read(&mle->woken) == 1), 2624 msecs_to_jiffies(5000)); 2625 2626 if (ret >= 0) { 2627 if (atomic_read(&mle->woken) == 1 || 2628 res->owner == target) 2629 break; 2630 2631 mlog(0, "%s:%.*s: timed out during migration\n", 2632 dlm->name, res->lockname.len, res->lockname.name); 2633 /* avoid hang during shutdown when migrating lockres 2634 * to a node which also goes down */ 2635 if (dlm_is_node_dead(dlm, target)) { 2636 mlog(0, "%s:%.*s: expected migration " 2637 "target %u is no longer up, restarting\n", 2638 dlm->name, res->lockname.len, 2639 res->lockname.name, target); 2640 ret = -EINVAL; 2641 /* migration failed, detach and clean up mle */ 2642 dlm_mle_detach_hb_events(dlm, mle); 2643 dlm_put_mle(mle); 2644 dlm_put_mle_inuse(mle); 2645 spin_lock(&res->spinlock); 2646 res->state &= ~DLM_LOCK_RES_MIGRATING; 2647 wake = 1; 2648 spin_unlock(&res->spinlock); 2649 goto leave; 2650 } 2651 } else 2652 mlog(0, "%s:%.*s: caught signal during migration\n", 2653 dlm->name, res->lockname.len, res->lockname.name); 2654 } 2655 2656 /* all done, set the owner, clear the flag */ 2657 spin_lock(&res->spinlock); 2658 dlm_set_lockres_owner(dlm, res, target); 2659 res->state &= ~DLM_LOCK_RES_MIGRATING; 2660 dlm_remove_nonlocal_locks(dlm, res); 2661 spin_unlock(&res->spinlock); 2662 wake_up(&res->wq); 2663 2664 /* master is known, detach if not already detached */ 2665 dlm_mle_detach_hb_events(dlm, mle); 2666 dlm_put_mle_inuse(mle); 2667 ret = 0; 2668 2669 dlm_lockres_calc_usage(dlm, res); 2670 2671 leave: 2672 /* re-dirty the lockres if we failed */ 2673 if (ret < 0) 2674 dlm_kick_thread(dlm, res); 2675 2676 /* wake up waiters if the MIGRATING flag got set 2677 * but migration failed */ 2678 if (wake) 2679 wake_up(&res->wq); 2680 2681 if (mres) 2682 free_page((unsigned long)mres); 2683 2684 dlm_put(dlm); 2685 2686 mlog(0, "%s: Migrating %.*s to %u, returns %d\n", dlm->name, namelen, 2687 name, target, ret); 2688 return ret; 2689 } 2690 2691 #define DLM_MIGRATION_RETRY_MS 100 2692 2693 /* 2694 * Should be called only after beginning the domain leave process. 2695 * There should not be any remaining locks on nonlocal lock resources, 2696 * and there should be no local locks left on locally mastered resources. 2697 * 2698 * Called with the dlm spinlock held, may drop it to do migration, but 2699 * will re-acquire before exit. 2700 * 2701 * Returns: 1 if dlm->spinlock was dropped/retaken, 0 if never dropped 2702 */ 2703 int dlm_empty_lockres(struct dlm_ctxt *dlm, struct dlm_lock_resource *res) 2704 { 2705 int ret; 2706 int lock_dropped = 0; 2707 u8 target = O2NM_MAX_NODES; 2708 2709 assert_spin_locked(&dlm->spinlock); 2710 2711 spin_lock(&res->spinlock); 2712 if (dlm_is_lockres_migrateable(dlm, res)) 2713 target = dlm_pick_migration_target(dlm, res); 2714 spin_unlock(&res->spinlock); 2715 2716 if (target == O2NM_MAX_NODES) 2717 goto leave; 2718 2719 /* Wheee! Migrate lockres here! Will sleep so drop spinlock. */ 2720 spin_unlock(&dlm->spinlock); 2721 lock_dropped = 1; 2722 ret = dlm_migrate_lockres(dlm, res, target); 2723 if (ret) 2724 mlog(0, "%s: res %.*s, Migrate to node %u failed with %d\n", 2725 dlm->name, res->lockname.len, res->lockname.name, 2726 target, ret); 2727 spin_lock(&dlm->spinlock); 2728 leave: 2729 return lock_dropped; 2730 } 2731 2732 int dlm_lock_basts_flushed(struct dlm_ctxt *dlm, struct dlm_lock *lock) 2733 { 2734 int ret; 2735 spin_lock(&dlm->ast_lock); 2736 spin_lock(&lock->spinlock); 2737 ret = (list_empty(&lock->bast_list) && !lock->bast_pending); 2738 spin_unlock(&lock->spinlock); 2739 spin_unlock(&dlm->ast_lock); 2740 return ret; 2741 } 2742 2743 static int dlm_migration_can_proceed(struct dlm_ctxt *dlm, 2744 struct dlm_lock_resource *res, 2745 u8 mig_target) 2746 { 2747 int can_proceed; 2748 spin_lock(&res->spinlock); 2749 can_proceed = !!(res->state & DLM_LOCK_RES_MIGRATING); 2750 spin_unlock(&res->spinlock); 2751 2752 /* target has died, so make the caller break out of the 2753 * wait_event, but caller must recheck the domain_map */ 2754 spin_lock(&dlm->spinlock); 2755 if (!test_bit(mig_target, dlm->domain_map)) 2756 can_proceed = 1; 2757 spin_unlock(&dlm->spinlock); 2758 return can_proceed; 2759 } 2760 2761 static int dlm_lockres_is_dirty(struct dlm_ctxt *dlm, 2762 struct dlm_lock_resource *res) 2763 { 2764 int ret; 2765 spin_lock(&res->spinlock); 2766 ret = !!(res->state & DLM_LOCK_RES_DIRTY); 2767 spin_unlock(&res->spinlock); 2768 return ret; 2769 } 2770 2771 2772 static int dlm_mark_lockres_migrating(struct dlm_ctxt *dlm, 2773 struct dlm_lock_resource *res, 2774 u8 target) 2775 { 2776 int ret = 0; 2777 2778 mlog(0, "dlm_mark_lockres_migrating: %.*s, from %u to %u\n", 2779 res->lockname.len, res->lockname.name, dlm->node_num, 2780 target); 2781 /* need to set MIGRATING flag on lockres. this is done by 2782 * ensuring that all asts have been flushed for this lockres. */ 2783 spin_lock(&res->spinlock); 2784 BUG_ON(res->migration_pending); 2785 res->migration_pending = 1; 2786 /* strategy is to reserve an extra ast then release 2787 * it below, letting the release do all of the work */ 2788 __dlm_lockres_reserve_ast(res); 2789 spin_unlock(&res->spinlock); 2790 2791 /* now flush all the pending asts */ 2792 dlm_kick_thread(dlm, res); 2793 /* before waiting on DIRTY, block processes which may 2794 * try to dirty the lockres before MIGRATING is set */ 2795 spin_lock(&res->spinlock); 2796 BUG_ON(res->state & DLM_LOCK_RES_BLOCK_DIRTY); 2797 res->state |= DLM_LOCK_RES_BLOCK_DIRTY; 2798 spin_unlock(&res->spinlock); 2799 /* now wait on any pending asts and the DIRTY state */ 2800 wait_event(dlm->ast_wq, !dlm_lockres_is_dirty(dlm, res)); 2801 dlm_lockres_release_ast(dlm, res); 2802 2803 mlog(0, "about to wait on migration_wq, dirty=%s\n", 2804 res->state & DLM_LOCK_RES_DIRTY ? "yes" : "no"); 2805 /* if the extra ref we just put was the final one, this 2806 * will pass thru immediately. otherwise, we need to wait 2807 * for the last ast to finish. */ 2808 again: 2809 ret = wait_event_interruptible_timeout(dlm->migration_wq, 2810 dlm_migration_can_proceed(dlm, res, target), 2811 msecs_to_jiffies(1000)); 2812 if (ret < 0) { 2813 mlog(0, "woken again: migrating? %s, dead? %s\n", 2814 res->state & DLM_LOCK_RES_MIGRATING ? "yes":"no", 2815 test_bit(target, dlm->domain_map) ? "no":"yes"); 2816 } else { 2817 mlog(0, "all is well: migrating? %s, dead? %s\n", 2818 res->state & DLM_LOCK_RES_MIGRATING ? "yes":"no", 2819 test_bit(target, dlm->domain_map) ? "no":"yes"); 2820 } 2821 if (!dlm_migration_can_proceed(dlm, res, target)) { 2822 mlog(0, "trying again...\n"); 2823 goto again; 2824 } 2825 2826 ret = 0; 2827 /* did the target go down or die? */ 2828 spin_lock(&dlm->spinlock); 2829 if (!test_bit(target, dlm->domain_map)) { 2830 mlog(ML_ERROR, "aha. migration target %u just went down\n", 2831 target); 2832 ret = -EHOSTDOWN; 2833 } 2834 spin_unlock(&dlm->spinlock); 2835 2836 /* 2837 * if target is down, we need to clear DLM_LOCK_RES_BLOCK_DIRTY for 2838 * another try; otherwise, we are sure the MIGRATING state is there, 2839 * drop the unneded state which blocked threads trying to DIRTY 2840 */ 2841 spin_lock(&res->spinlock); 2842 BUG_ON(!(res->state & DLM_LOCK_RES_BLOCK_DIRTY)); 2843 res->state &= ~DLM_LOCK_RES_BLOCK_DIRTY; 2844 if (!ret) 2845 BUG_ON(!(res->state & DLM_LOCK_RES_MIGRATING)); 2846 else 2847 res->migration_pending = 0; 2848 spin_unlock(&res->spinlock); 2849 2850 /* 2851 * at this point: 2852 * 2853 * o the DLM_LOCK_RES_MIGRATING flag is set if target not down 2854 * o there are no pending asts on this lockres 2855 * o all processes trying to reserve an ast on this 2856 * lockres must wait for the MIGRATING flag to clear 2857 */ 2858 return ret; 2859 } 2860 2861 /* last step in the migration process. 2862 * original master calls this to free all of the dlm_lock 2863 * structures that used to be for other nodes. */ 2864 static void dlm_remove_nonlocal_locks(struct dlm_ctxt *dlm, 2865 struct dlm_lock_resource *res) 2866 { 2867 struct list_head *queue = &res->granted; 2868 int i, bit; 2869 struct dlm_lock *lock, *next; 2870 2871 assert_spin_locked(&res->spinlock); 2872 2873 BUG_ON(res->owner == dlm->node_num); 2874 2875 for (i=0; i<3; i++) { 2876 list_for_each_entry_safe(lock, next, queue, list) { 2877 if (lock->ml.node != dlm->node_num) { 2878 mlog(0, "putting lock for node %u\n", 2879 lock->ml.node); 2880 /* be extra careful */ 2881 BUG_ON(!list_empty(&lock->ast_list)); 2882 BUG_ON(!list_empty(&lock->bast_list)); 2883 BUG_ON(lock->ast_pending); 2884 BUG_ON(lock->bast_pending); 2885 dlm_lockres_clear_refmap_bit(dlm, res, 2886 lock->ml.node); 2887 list_del_init(&lock->list); 2888 dlm_lock_put(lock); 2889 /* In a normal unlock, we would have added a 2890 * DLM_UNLOCK_FREE_LOCK action. Force it. */ 2891 dlm_lock_put(lock); 2892 } 2893 } 2894 queue++; 2895 } 2896 bit = 0; 2897 while (1) { 2898 bit = find_next_bit(res->refmap, O2NM_MAX_NODES, bit); 2899 if (bit >= O2NM_MAX_NODES) 2900 break; 2901 /* do not clear the local node reference, if there is a 2902 * process holding this, let it drop the ref itself */ 2903 if (bit != dlm->node_num) { 2904 mlog(0, "%s:%.*s: node %u had a ref to this " 2905 "migrating lockres, clearing\n", dlm->name, 2906 res->lockname.len, res->lockname.name, bit); 2907 dlm_lockres_clear_refmap_bit(dlm, res, bit); 2908 } 2909 bit++; 2910 } 2911 } 2912 2913 /* 2914 * Pick a node to migrate the lock resource to. This function selects a 2915 * potential target based first on the locks and then on refmap. It skips 2916 * nodes that are in the process of exiting the domain. 2917 */ 2918 static u8 dlm_pick_migration_target(struct dlm_ctxt *dlm, 2919 struct dlm_lock_resource *res) 2920 { 2921 enum dlm_lockres_list idx; 2922 struct list_head *queue = &res->granted; 2923 struct dlm_lock *lock; 2924 int noderef; 2925 u8 nodenum = O2NM_MAX_NODES; 2926 2927 assert_spin_locked(&dlm->spinlock); 2928 assert_spin_locked(&res->spinlock); 2929 2930 /* Go through all the locks */ 2931 for (idx = DLM_GRANTED_LIST; idx <= DLM_BLOCKED_LIST; idx++) { 2932 queue = dlm_list_idx_to_ptr(res, idx); 2933 list_for_each_entry(lock, queue, list) { 2934 if (lock->ml.node == dlm->node_num) 2935 continue; 2936 if (test_bit(lock->ml.node, dlm->exit_domain_map)) 2937 continue; 2938 nodenum = lock->ml.node; 2939 goto bail; 2940 } 2941 } 2942 2943 /* Go thru the refmap */ 2944 noderef = -1; 2945 while (1) { 2946 noderef = find_next_bit(res->refmap, O2NM_MAX_NODES, 2947 noderef + 1); 2948 if (noderef >= O2NM_MAX_NODES) 2949 break; 2950 if (noderef == dlm->node_num) 2951 continue; 2952 if (test_bit(noderef, dlm->exit_domain_map)) 2953 continue; 2954 nodenum = noderef; 2955 goto bail; 2956 } 2957 2958 bail: 2959 return nodenum; 2960 } 2961 2962 /* this is called by the new master once all lockres 2963 * data has been received */ 2964 static int dlm_do_migrate_request(struct dlm_ctxt *dlm, 2965 struct dlm_lock_resource *res, 2966 u8 master, u8 new_master, 2967 struct dlm_node_iter *iter) 2968 { 2969 struct dlm_migrate_request migrate; 2970 int ret, skip, status = 0; 2971 int nodenum; 2972 2973 memset(&migrate, 0, sizeof(migrate)); 2974 migrate.namelen = res->lockname.len; 2975 memcpy(migrate.name, res->lockname.name, migrate.namelen); 2976 migrate.new_master = new_master; 2977 migrate.master = master; 2978 2979 ret = 0; 2980 2981 /* send message to all nodes, except the master and myself */ 2982 while ((nodenum = dlm_node_iter_next(iter)) >= 0) { 2983 if (nodenum == master || 2984 nodenum == new_master) 2985 continue; 2986 2987 /* We could race exit domain. If exited, skip. */ 2988 spin_lock(&dlm->spinlock); 2989 skip = (!test_bit(nodenum, dlm->domain_map)); 2990 spin_unlock(&dlm->spinlock); 2991 if (skip) { 2992 clear_bit(nodenum, iter->node_map); 2993 continue; 2994 } 2995 2996 ret = o2net_send_message(DLM_MIGRATE_REQUEST_MSG, dlm->key, 2997 &migrate, sizeof(migrate), nodenum, 2998 &status); 2999 if (ret < 0) { 3000 mlog(ML_ERROR, "%s: res %.*s, Error %d send " 3001 "MIGRATE_REQUEST to node %u\n", dlm->name, 3002 migrate.namelen, migrate.name, ret, nodenum); 3003 if (!dlm_is_host_down(ret)) { 3004 mlog(ML_ERROR, "unhandled error=%d!\n", ret); 3005 BUG(); 3006 } 3007 clear_bit(nodenum, iter->node_map); 3008 ret = 0; 3009 } else if (status < 0) { 3010 mlog(0, "migrate request (node %u) returned %d!\n", 3011 nodenum, status); 3012 ret = status; 3013 } else if (status == DLM_MIGRATE_RESPONSE_MASTERY_REF) { 3014 /* during the migration request we short-circuited 3015 * the mastery of the lockres. make sure we have 3016 * a mastery ref for nodenum */ 3017 mlog(0, "%s:%.*s: need ref for node %u\n", 3018 dlm->name, res->lockname.len, res->lockname.name, 3019 nodenum); 3020 spin_lock(&res->spinlock); 3021 dlm_lockres_set_refmap_bit(dlm, res, nodenum); 3022 spin_unlock(&res->spinlock); 3023 } 3024 } 3025 3026 if (ret < 0) 3027 mlog_errno(ret); 3028 3029 mlog(0, "returning ret=%d\n", ret); 3030 return ret; 3031 } 3032 3033 3034 /* if there is an existing mle for this lockres, we now know who the master is. 3035 * (the one who sent us *this* message) we can clear it up right away. 3036 * since the process that put the mle on the list still has a reference to it, 3037 * we can unhash it now, set the master and wake the process. as a result, 3038 * we will have no mle in the list to start with. now we can add an mle for 3039 * the migration and this should be the only one found for those scanning the 3040 * list. */ 3041 int dlm_migrate_request_handler(struct o2net_msg *msg, u32 len, void *data, 3042 void **ret_data) 3043 { 3044 struct dlm_ctxt *dlm = data; 3045 struct dlm_lock_resource *res = NULL; 3046 struct dlm_migrate_request *migrate = (struct dlm_migrate_request *) msg->buf; 3047 struct dlm_master_list_entry *mle = NULL, *oldmle = NULL; 3048 const char *name; 3049 unsigned int namelen, hash; 3050 int ret = 0; 3051 3052 if (!dlm_grab(dlm)) 3053 return -EINVAL; 3054 3055 name = migrate->name; 3056 namelen = migrate->namelen; 3057 hash = dlm_lockid_hash(name, namelen); 3058 3059 /* preallocate.. if this fails, abort */ 3060 mle = kmem_cache_alloc(dlm_mle_cache, GFP_NOFS); 3061 3062 if (!mle) { 3063 ret = -ENOMEM; 3064 goto leave; 3065 } 3066 3067 /* check for pre-existing lock */ 3068 spin_lock(&dlm->spinlock); 3069 res = __dlm_lookup_lockres(dlm, name, namelen, hash); 3070 if (res) { 3071 spin_lock(&res->spinlock); 3072 if (res->state & DLM_LOCK_RES_RECOVERING) { 3073 /* if all is working ok, this can only mean that we got 3074 * a migrate request from a node that we now see as 3075 * dead. what can we do here? drop it to the floor? */ 3076 spin_unlock(&res->spinlock); 3077 mlog(ML_ERROR, "Got a migrate request, but the " 3078 "lockres is marked as recovering!"); 3079 kmem_cache_free(dlm_mle_cache, mle); 3080 ret = -EINVAL; /* need a better solution */ 3081 goto unlock; 3082 } 3083 res->state |= DLM_LOCK_RES_MIGRATING; 3084 spin_unlock(&res->spinlock); 3085 } 3086 3087 spin_lock(&dlm->master_lock); 3088 /* ignore status. only nonzero status would BUG. */ 3089 ret = dlm_add_migration_mle(dlm, res, mle, &oldmle, 3090 name, namelen, 3091 migrate->new_master, 3092 migrate->master); 3093 3094 spin_unlock(&dlm->master_lock); 3095 unlock: 3096 spin_unlock(&dlm->spinlock); 3097 3098 if (oldmle) { 3099 /* master is known, detach if not already detached */ 3100 dlm_mle_detach_hb_events(dlm, oldmle); 3101 dlm_put_mle(oldmle); 3102 } 3103 3104 if (res) 3105 dlm_lockres_put(res); 3106 leave: 3107 dlm_put(dlm); 3108 return ret; 3109 } 3110 3111 /* must be holding dlm->spinlock and dlm->master_lock 3112 * when adding a migration mle, we can clear any other mles 3113 * in the master list because we know with certainty that 3114 * the master is "master". so we remove any old mle from 3115 * the list after setting it's master field, and then add 3116 * the new migration mle. this way we can hold with the rule 3117 * of having only one mle for a given lock name at all times. */ 3118 static int dlm_add_migration_mle(struct dlm_ctxt *dlm, 3119 struct dlm_lock_resource *res, 3120 struct dlm_master_list_entry *mle, 3121 struct dlm_master_list_entry **oldmle, 3122 const char *name, unsigned int namelen, 3123 u8 new_master, u8 master) 3124 { 3125 int found; 3126 int ret = 0; 3127 3128 *oldmle = NULL; 3129 3130 assert_spin_locked(&dlm->spinlock); 3131 assert_spin_locked(&dlm->master_lock); 3132 3133 /* caller is responsible for any ref taken here on oldmle */ 3134 found = dlm_find_mle(dlm, oldmle, (char *)name, namelen); 3135 if (found) { 3136 struct dlm_master_list_entry *tmp = *oldmle; 3137 spin_lock(&tmp->spinlock); 3138 if (tmp->type == DLM_MLE_MIGRATION) { 3139 if (master == dlm->node_num) { 3140 /* ah another process raced me to it */ 3141 mlog(0, "tried to migrate %.*s, but some " 3142 "process beat me to it\n", 3143 namelen, name); 3144 ret = -EEXIST; 3145 } else { 3146 /* bad. 2 NODES are trying to migrate! */ 3147 mlog(ML_ERROR, "migration error mle: " 3148 "master=%u new_master=%u // request: " 3149 "master=%u new_master=%u // " 3150 "lockres=%.*s\n", 3151 tmp->master, tmp->new_master, 3152 master, new_master, 3153 namelen, name); 3154 BUG(); 3155 } 3156 } else { 3157 /* this is essentially what assert_master does */ 3158 tmp->master = master; 3159 atomic_set(&tmp->woken, 1); 3160 wake_up(&tmp->wq); 3161 /* remove it so that only one mle will be found */ 3162 __dlm_unlink_mle(dlm, tmp); 3163 __dlm_mle_detach_hb_events(dlm, tmp); 3164 if (tmp->type == DLM_MLE_MASTER) { 3165 ret = DLM_MIGRATE_RESPONSE_MASTERY_REF; 3166 mlog(0, "%s:%.*s: master=%u, newmaster=%u, " 3167 "telling master to get ref " 3168 "for cleared out mle during " 3169 "migration\n", dlm->name, 3170 namelen, name, master, 3171 new_master); 3172 } 3173 } 3174 spin_unlock(&tmp->spinlock); 3175 } 3176 3177 /* now add a migration mle to the tail of the list */ 3178 dlm_init_mle(mle, DLM_MLE_MIGRATION, dlm, res, name, namelen); 3179 mle->new_master = new_master; 3180 /* the new master will be sending an assert master for this. 3181 * at that point we will get the refmap reference */ 3182 mle->master = master; 3183 /* do this for consistency with other mle types */ 3184 set_bit(new_master, mle->maybe_map); 3185 __dlm_insert_mle(dlm, mle); 3186 3187 return ret; 3188 } 3189 3190 /* 3191 * Sets the owner of the lockres, associated to the mle, to UNKNOWN 3192 */ 3193 static struct dlm_lock_resource *dlm_reset_mleres_owner(struct dlm_ctxt *dlm, 3194 struct dlm_master_list_entry *mle) 3195 { 3196 struct dlm_lock_resource *res; 3197 3198 /* Find the lockres associated to the mle and set its owner to UNK */ 3199 res = __dlm_lookup_lockres(dlm, mle->mname, mle->mnamelen, 3200 mle->mnamehash); 3201 if (res) { 3202 spin_unlock(&dlm->master_lock); 3203 3204 /* move lockres onto recovery list */ 3205 spin_lock(&res->spinlock); 3206 dlm_set_lockres_owner(dlm, res, DLM_LOCK_RES_OWNER_UNKNOWN); 3207 dlm_move_lockres_to_recovery_list(dlm, res); 3208 spin_unlock(&res->spinlock); 3209 dlm_lockres_put(res); 3210 3211 /* about to get rid of mle, detach from heartbeat */ 3212 __dlm_mle_detach_hb_events(dlm, mle); 3213 3214 /* dump the mle */ 3215 spin_lock(&dlm->master_lock); 3216 __dlm_put_mle(mle); 3217 spin_unlock(&dlm->master_lock); 3218 } 3219 3220 return res; 3221 } 3222 3223 static void dlm_clean_migration_mle(struct dlm_ctxt *dlm, 3224 struct dlm_master_list_entry *mle) 3225 { 3226 __dlm_mle_detach_hb_events(dlm, mle); 3227 3228 spin_lock(&mle->spinlock); 3229 __dlm_unlink_mle(dlm, mle); 3230 atomic_set(&mle->woken, 1); 3231 spin_unlock(&mle->spinlock); 3232 3233 wake_up(&mle->wq); 3234 } 3235 3236 static void dlm_clean_block_mle(struct dlm_ctxt *dlm, 3237 struct dlm_master_list_entry *mle, u8 dead_node) 3238 { 3239 int bit; 3240 3241 BUG_ON(mle->type != DLM_MLE_BLOCK); 3242 3243 spin_lock(&mle->spinlock); 3244 bit = find_next_bit(mle->maybe_map, O2NM_MAX_NODES, 0); 3245 if (bit != dead_node) { 3246 mlog(0, "mle found, but dead node %u would not have been " 3247 "master\n", dead_node); 3248 spin_unlock(&mle->spinlock); 3249 } else { 3250 /* Must drop the refcount by one since the assert_master will 3251 * never arrive. This may result in the mle being unlinked and 3252 * freed, but there may still be a process waiting in the 3253 * dlmlock path which is fine. */ 3254 mlog(0, "node %u was expected master\n", dead_node); 3255 atomic_set(&mle->woken, 1); 3256 spin_unlock(&mle->spinlock); 3257 wake_up(&mle->wq); 3258 3259 /* Do not need events any longer, so detach from heartbeat */ 3260 __dlm_mle_detach_hb_events(dlm, mle); 3261 __dlm_put_mle(mle); 3262 } 3263 } 3264 3265 void dlm_clean_master_list(struct dlm_ctxt *dlm, u8 dead_node) 3266 { 3267 struct dlm_master_list_entry *mle; 3268 struct dlm_lock_resource *res; 3269 struct hlist_head *bucket; 3270 struct hlist_node *tmp; 3271 unsigned int i; 3272 3273 mlog(0, "dlm=%s, dead node=%u\n", dlm->name, dead_node); 3274 top: 3275 assert_spin_locked(&dlm->spinlock); 3276 3277 /* clean the master list */ 3278 spin_lock(&dlm->master_lock); 3279 for (i = 0; i < DLM_HASH_BUCKETS; i++) { 3280 bucket = dlm_master_hash(dlm, i); 3281 hlist_for_each_entry_safe(mle, tmp, bucket, master_hash_node) { 3282 BUG_ON(mle->type != DLM_MLE_BLOCK && 3283 mle->type != DLM_MLE_MASTER && 3284 mle->type != DLM_MLE_MIGRATION); 3285 3286 /* MASTER mles are initiated locally. The waiting 3287 * process will notice the node map change shortly. 3288 * Let that happen as normal. */ 3289 if (mle->type == DLM_MLE_MASTER) 3290 continue; 3291 3292 /* BLOCK mles are initiated by other nodes. Need to 3293 * clean up if the dead node would have been the 3294 * master. */ 3295 if (mle->type == DLM_MLE_BLOCK) { 3296 dlm_clean_block_mle(dlm, mle, dead_node); 3297 continue; 3298 } 3299 3300 /* Everything else is a MIGRATION mle */ 3301 3302 /* The rule for MIGRATION mles is that the master 3303 * becomes UNKNOWN if *either* the original or the new 3304 * master dies. All UNKNOWN lockres' are sent to 3305 * whichever node becomes the recovery master. The new 3306 * master is responsible for determining if there is 3307 * still a master for this lockres, or if he needs to 3308 * take over mastery. Either way, this node should 3309 * expect another message to resolve this. */ 3310 3311 if (mle->master != dead_node && 3312 mle->new_master != dead_node) 3313 continue; 3314 3315 /* If we have reached this point, this mle needs to be 3316 * removed from the list and freed. */ 3317 dlm_clean_migration_mle(dlm, mle); 3318 3319 mlog(0, "%s: node %u died during migration from " 3320 "%u to %u!\n", dlm->name, dead_node, mle->master, 3321 mle->new_master); 3322 3323 /* If we find a lockres associated with the mle, we've 3324 * hit this rare case that messes up our lock ordering. 3325 * If so, we need to drop the master lock so that we can 3326 * take the lockres lock, meaning that we will have to 3327 * restart from the head of list. */ 3328 res = dlm_reset_mleres_owner(dlm, mle); 3329 if (res) 3330 /* restart */ 3331 goto top; 3332 3333 /* This may be the last reference */ 3334 __dlm_put_mle(mle); 3335 } 3336 } 3337 spin_unlock(&dlm->master_lock); 3338 } 3339 3340 int dlm_finish_migration(struct dlm_ctxt *dlm, struct dlm_lock_resource *res, 3341 u8 old_master) 3342 { 3343 struct dlm_node_iter iter; 3344 int ret = 0; 3345 3346 spin_lock(&dlm->spinlock); 3347 dlm_node_iter_init(dlm->domain_map, &iter); 3348 clear_bit(old_master, iter.node_map); 3349 clear_bit(dlm->node_num, iter.node_map); 3350 spin_unlock(&dlm->spinlock); 3351 3352 /* ownership of the lockres is changing. account for the 3353 * mastery reference here since old_master will briefly have 3354 * a reference after the migration completes */ 3355 spin_lock(&res->spinlock); 3356 dlm_lockres_set_refmap_bit(dlm, res, old_master); 3357 spin_unlock(&res->spinlock); 3358 3359 mlog(0, "now time to do a migrate request to other nodes\n"); 3360 ret = dlm_do_migrate_request(dlm, res, old_master, 3361 dlm->node_num, &iter); 3362 if (ret < 0) { 3363 mlog_errno(ret); 3364 goto leave; 3365 } 3366 3367 mlog(0, "doing assert master of %.*s to all except the original node\n", 3368 res->lockname.len, res->lockname.name); 3369 /* this call now finishes out the nodemap 3370 * even if one or more nodes die */ 3371 ret = dlm_do_assert_master(dlm, res, iter.node_map, 3372 DLM_ASSERT_MASTER_FINISH_MIGRATION); 3373 if (ret < 0) { 3374 /* no longer need to retry. all living nodes contacted. */ 3375 mlog_errno(ret); 3376 ret = 0; 3377 } 3378 3379 memset(iter.node_map, 0, sizeof(iter.node_map)); 3380 set_bit(old_master, iter.node_map); 3381 mlog(0, "doing assert master of %.*s back to %u\n", 3382 res->lockname.len, res->lockname.name, old_master); 3383 ret = dlm_do_assert_master(dlm, res, iter.node_map, 3384 DLM_ASSERT_MASTER_FINISH_MIGRATION); 3385 if (ret < 0) { 3386 mlog(0, "assert master to original master failed " 3387 "with %d.\n", ret); 3388 /* the only nonzero status here would be because of 3389 * a dead original node. we're done. */ 3390 ret = 0; 3391 } 3392 3393 /* all done, set the owner, clear the flag */ 3394 spin_lock(&res->spinlock); 3395 dlm_set_lockres_owner(dlm, res, dlm->node_num); 3396 res->state &= ~DLM_LOCK_RES_MIGRATING; 3397 spin_unlock(&res->spinlock); 3398 /* re-dirty it on the new master */ 3399 dlm_kick_thread(dlm, res); 3400 wake_up(&res->wq); 3401 leave: 3402 return ret; 3403 } 3404 3405 /* 3406 * LOCKRES AST REFCOUNT 3407 * this is integral to migration 3408 */ 3409 3410 /* for future intent to call an ast, reserve one ahead of time. 3411 * this should be called only after waiting on the lockres 3412 * with dlm_wait_on_lockres, and while still holding the 3413 * spinlock after the call. */ 3414 void __dlm_lockres_reserve_ast(struct dlm_lock_resource *res) 3415 { 3416 assert_spin_locked(&res->spinlock); 3417 if (res->state & DLM_LOCK_RES_MIGRATING) { 3418 __dlm_print_one_lock_resource(res); 3419 } 3420 BUG_ON(res->state & DLM_LOCK_RES_MIGRATING); 3421 3422 atomic_inc(&res->asts_reserved); 3423 } 3424 3425 /* 3426 * used to drop the reserved ast, either because it went unused, 3427 * or because the ast/bast was actually called. 3428 * 3429 * also, if there is a pending migration on this lockres, 3430 * and this was the last pending ast on the lockres, 3431 * atomically set the MIGRATING flag before we drop the lock. 3432 * this is how we ensure that migration can proceed with no 3433 * asts in progress. note that it is ok if the state of the 3434 * queues is such that a lock should be granted in the future 3435 * or that a bast should be fired, because the new master will 3436 * shuffle the lists on this lockres as soon as it is migrated. 3437 */ 3438 void dlm_lockres_release_ast(struct dlm_ctxt *dlm, 3439 struct dlm_lock_resource *res) 3440 { 3441 if (!atomic_dec_and_lock(&res->asts_reserved, &res->spinlock)) 3442 return; 3443 3444 if (!res->migration_pending) { 3445 spin_unlock(&res->spinlock); 3446 return; 3447 } 3448 3449 BUG_ON(res->state & DLM_LOCK_RES_MIGRATING); 3450 res->migration_pending = 0; 3451 res->state |= DLM_LOCK_RES_MIGRATING; 3452 spin_unlock(&res->spinlock); 3453 wake_up(&res->wq); 3454 wake_up(&dlm->migration_wq); 3455 } 3456 3457 void dlm_force_free_mles(struct dlm_ctxt *dlm) 3458 { 3459 int i; 3460 struct hlist_head *bucket; 3461 struct dlm_master_list_entry *mle; 3462 struct hlist_node *tmp; 3463 3464 /* 3465 * We notified all other nodes that we are exiting the domain and 3466 * marked the dlm state to DLM_CTXT_LEAVING. If any mles are still 3467 * around we force free them and wake any processes that are waiting 3468 * on the mles 3469 */ 3470 spin_lock(&dlm->spinlock); 3471 spin_lock(&dlm->master_lock); 3472 3473 BUG_ON(dlm->dlm_state != DLM_CTXT_LEAVING); 3474 BUG_ON((find_next_bit(dlm->domain_map, O2NM_MAX_NODES, 0) < O2NM_MAX_NODES)); 3475 3476 for (i = 0; i < DLM_HASH_BUCKETS; i++) { 3477 bucket = dlm_master_hash(dlm, i); 3478 hlist_for_each_entry_safe(mle, tmp, bucket, master_hash_node) { 3479 if (mle->type != DLM_MLE_BLOCK) { 3480 mlog(ML_ERROR, "bad mle: %p\n", mle); 3481 dlm_print_one_mle(mle); 3482 } 3483 atomic_set(&mle->woken, 1); 3484 wake_up(&mle->wq); 3485 3486 __dlm_unlink_mle(dlm, mle); 3487 __dlm_mle_detach_hb_events(dlm, mle); 3488 __dlm_put_mle(mle); 3489 } 3490 } 3491 spin_unlock(&dlm->master_lock); 3492 spin_unlock(&dlm->spinlock); 3493 } 3494