1 /* -*- mode: c; c-basic-offset: 8; -*- 2 * vim: noexpandtab sw=8 ts=8 sts=0: 3 * 4 * dlmmod.c 5 * 6 * standalone DLM module 7 * 8 * Copyright (C) 2004 Oracle. All rights reserved. 9 * 10 * This program is free software; you can redistribute it and/or 11 * modify it under the terms of the GNU General Public 12 * License as published by the Free Software Foundation; either 13 * version 2 of the License, or (at your option) any later version. 14 * 15 * This program is distributed in the hope that it will be useful, 16 * but WITHOUT ANY WARRANTY; without even the implied warranty of 17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 18 * General Public License for more details. 19 * 20 * You should have received a copy of the GNU General Public 21 * License along with this program; if not, write to the 22 * Free Software Foundation, Inc., 59 Temple Place - Suite 330, 23 * Boston, MA 021110-1307, USA. 24 * 25 */ 26 27 28 #include <linux/module.h> 29 #include <linux/fs.h> 30 #include <linux/types.h> 31 #include <linux/slab.h> 32 #include <linux/highmem.h> 33 #include <linux/init.h> 34 #include <linux/sysctl.h> 35 #include <linux/random.h> 36 #include <linux/blkdev.h> 37 #include <linux/socket.h> 38 #include <linux/inet.h> 39 #include <linux/spinlock.h> 40 #include <linux/delay.h> 41 42 43 #include "cluster/heartbeat.h" 44 #include "cluster/nodemanager.h" 45 #include "cluster/tcp.h" 46 47 #include "dlmapi.h" 48 #include "dlmcommon.h" 49 #include "dlmdomain.h" 50 #include "dlmdebug.h" 51 52 #define MLOG_MASK_PREFIX (ML_DLM|ML_DLM_MASTER) 53 #include "cluster/masklog.h" 54 55 static void dlm_mle_node_down(struct dlm_ctxt *dlm, 56 struct dlm_master_list_entry *mle, 57 struct o2nm_node *node, 58 int idx); 59 static void dlm_mle_node_up(struct dlm_ctxt *dlm, 60 struct dlm_master_list_entry *mle, 61 struct o2nm_node *node, 62 int idx); 63 64 static void dlm_assert_master_worker(struct dlm_work_item *item, void *data); 65 static int dlm_do_assert_master(struct dlm_ctxt *dlm, 66 struct dlm_lock_resource *res, 67 void *nodemap, u32 flags); 68 static void dlm_deref_lockres_worker(struct dlm_work_item *item, void *data); 69 70 static inline int dlm_mle_equal(struct dlm_ctxt *dlm, 71 struct dlm_master_list_entry *mle, 72 const char *name, 73 unsigned int namelen) 74 { 75 if (dlm != mle->dlm) 76 return 0; 77 78 if (namelen != mle->mnamelen || 79 memcmp(name, mle->mname, namelen) != 0) 80 return 0; 81 82 return 1; 83 } 84 85 static struct kmem_cache *dlm_lockres_cache = NULL; 86 static struct kmem_cache *dlm_lockname_cache = NULL; 87 static struct kmem_cache *dlm_mle_cache = NULL; 88 89 static void dlm_mle_release(struct kref *kref); 90 static void dlm_init_mle(struct dlm_master_list_entry *mle, 91 enum dlm_mle_type type, 92 struct dlm_ctxt *dlm, 93 struct dlm_lock_resource *res, 94 const char *name, 95 unsigned int namelen); 96 static void dlm_put_mle(struct dlm_master_list_entry *mle); 97 static void __dlm_put_mle(struct dlm_master_list_entry *mle); 98 static int dlm_find_mle(struct dlm_ctxt *dlm, 99 struct dlm_master_list_entry **mle, 100 char *name, unsigned int namelen); 101 102 static int dlm_do_master_request(struct dlm_lock_resource *res, 103 struct dlm_master_list_entry *mle, int to); 104 105 106 static int dlm_wait_for_lock_mastery(struct dlm_ctxt *dlm, 107 struct dlm_lock_resource *res, 108 struct dlm_master_list_entry *mle, 109 int *blocked); 110 static int dlm_restart_lock_mastery(struct dlm_ctxt *dlm, 111 struct dlm_lock_resource *res, 112 struct dlm_master_list_entry *mle, 113 int blocked); 114 static int dlm_add_migration_mle(struct dlm_ctxt *dlm, 115 struct dlm_lock_resource *res, 116 struct dlm_master_list_entry *mle, 117 struct dlm_master_list_entry **oldmle, 118 const char *name, unsigned int namelen, 119 u8 new_master, u8 master); 120 121 static u8 dlm_pick_migration_target(struct dlm_ctxt *dlm, 122 struct dlm_lock_resource *res); 123 static void dlm_remove_nonlocal_locks(struct dlm_ctxt *dlm, 124 struct dlm_lock_resource *res); 125 static int dlm_mark_lockres_migrating(struct dlm_ctxt *dlm, 126 struct dlm_lock_resource *res, 127 u8 target); 128 static int dlm_pre_master_reco_lockres(struct dlm_ctxt *dlm, 129 struct dlm_lock_resource *res); 130 131 132 int dlm_is_host_down(int errno) 133 { 134 switch (errno) { 135 case -EBADF: 136 case -ECONNREFUSED: 137 case -ENOTCONN: 138 case -ECONNRESET: 139 case -EPIPE: 140 case -EHOSTDOWN: 141 case -EHOSTUNREACH: 142 case -ETIMEDOUT: 143 case -ECONNABORTED: 144 case -ENETDOWN: 145 case -ENETUNREACH: 146 case -ENETRESET: 147 case -ESHUTDOWN: 148 case -ENOPROTOOPT: 149 case -EINVAL: /* if returned from our tcp code, 150 this means there is no socket */ 151 return 1; 152 } 153 return 0; 154 } 155 156 157 /* 158 * MASTER LIST FUNCTIONS 159 */ 160 161 162 /* 163 * regarding master list entries and heartbeat callbacks: 164 * 165 * in order to avoid sleeping and allocation that occurs in 166 * heartbeat, master list entries are simply attached to the 167 * dlm's established heartbeat callbacks. the mle is attached 168 * when it is created, and since the dlm->spinlock is held at 169 * that time, any heartbeat event will be properly discovered 170 * by the mle. the mle needs to be detached from the 171 * dlm->mle_hb_events list as soon as heartbeat events are no 172 * longer useful to the mle, and before the mle is freed. 173 * 174 * as a general rule, heartbeat events are no longer needed by 175 * the mle once an "answer" regarding the lock master has been 176 * received. 177 */ 178 static inline void __dlm_mle_attach_hb_events(struct dlm_ctxt *dlm, 179 struct dlm_master_list_entry *mle) 180 { 181 assert_spin_locked(&dlm->spinlock); 182 183 list_add_tail(&mle->hb_events, &dlm->mle_hb_events); 184 } 185 186 187 static inline void __dlm_mle_detach_hb_events(struct dlm_ctxt *dlm, 188 struct dlm_master_list_entry *mle) 189 { 190 if (!list_empty(&mle->hb_events)) 191 list_del_init(&mle->hb_events); 192 } 193 194 195 static inline void dlm_mle_detach_hb_events(struct dlm_ctxt *dlm, 196 struct dlm_master_list_entry *mle) 197 { 198 spin_lock(&dlm->spinlock); 199 __dlm_mle_detach_hb_events(dlm, mle); 200 spin_unlock(&dlm->spinlock); 201 } 202 203 static void dlm_get_mle_inuse(struct dlm_master_list_entry *mle) 204 { 205 struct dlm_ctxt *dlm; 206 dlm = mle->dlm; 207 208 assert_spin_locked(&dlm->spinlock); 209 assert_spin_locked(&dlm->master_lock); 210 mle->inuse++; 211 kref_get(&mle->mle_refs); 212 } 213 214 static void dlm_put_mle_inuse(struct dlm_master_list_entry *mle) 215 { 216 struct dlm_ctxt *dlm; 217 dlm = mle->dlm; 218 219 spin_lock(&dlm->spinlock); 220 spin_lock(&dlm->master_lock); 221 mle->inuse--; 222 __dlm_put_mle(mle); 223 spin_unlock(&dlm->master_lock); 224 spin_unlock(&dlm->spinlock); 225 226 } 227 228 /* remove from list and free */ 229 static void __dlm_put_mle(struct dlm_master_list_entry *mle) 230 { 231 struct dlm_ctxt *dlm; 232 dlm = mle->dlm; 233 234 assert_spin_locked(&dlm->spinlock); 235 assert_spin_locked(&dlm->master_lock); 236 if (!atomic_read(&mle->mle_refs.refcount)) { 237 /* this may or may not crash, but who cares. 238 * it's a BUG. */ 239 mlog(ML_ERROR, "bad mle: %p\n", mle); 240 dlm_print_one_mle(mle); 241 BUG(); 242 } else 243 kref_put(&mle->mle_refs, dlm_mle_release); 244 } 245 246 247 /* must not have any spinlocks coming in */ 248 static void dlm_put_mle(struct dlm_master_list_entry *mle) 249 { 250 struct dlm_ctxt *dlm; 251 dlm = mle->dlm; 252 253 spin_lock(&dlm->spinlock); 254 spin_lock(&dlm->master_lock); 255 __dlm_put_mle(mle); 256 spin_unlock(&dlm->master_lock); 257 spin_unlock(&dlm->spinlock); 258 } 259 260 static inline void dlm_get_mle(struct dlm_master_list_entry *mle) 261 { 262 kref_get(&mle->mle_refs); 263 } 264 265 static void dlm_init_mle(struct dlm_master_list_entry *mle, 266 enum dlm_mle_type type, 267 struct dlm_ctxt *dlm, 268 struct dlm_lock_resource *res, 269 const char *name, 270 unsigned int namelen) 271 { 272 assert_spin_locked(&dlm->spinlock); 273 274 mle->dlm = dlm; 275 mle->type = type; 276 INIT_HLIST_NODE(&mle->master_hash_node); 277 INIT_LIST_HEAD(&mle->hb_events); 278 memset(mle->maybe_map, 0, sizeof(mle->maybe_map)); 279 spin_lock_init(&mle->spinlock); 280 init_waitqueue_head(&mle->wq); 281 atomic_set(&mle->woken, 0); 282 kref_init(&mle->mle_refs); 283 memset(mle->response_map, 0, sizeof(mle->response_map)); 284 mle->master = O2NM_MAX_NODES; 285 mle->new_master = O2NM_MAX_NODES; 286 mle->inuse = 0; 287 288 BUG_ON(mle->type != DLM_MLE_BLOCK && 289 mle->type != DLM_MLE_MASTER && 290 mle->type != DLM_MLE_MIGRATION); 291 292 if (mle->type == DLM_MLE_MASTER) { 293 BUG_ON(!res); 294 mle->mleres = res; 295 memcpy(mle->mname, res->lockname.name, res->lockname.len); 296 mle->mnamelen = res->lockname.len; 297 mle->mnamehash = res->lockname.hash; 298 } else { 299 BUG_ON(!name); 300 mle->mleres = NULL; 301 memcpy(mle->mname, name, namelen); 302 mle->mnamelen = namelen; 303 mle->mnamehash = dlm_lockid_hash(name, namelen); 304 } 305 306 atomic_inc(&dlm->mle_tot_count[mle->type]); 307 atomic_inc(&dlm->mle_cur_count[mle->type]); 308 309 /* copy off the node_map and register hb callbacks on our copy */ 310 memcpy(mle->node_map, dlm->domain_map, sizeof(mle->node_map)); 311 memcpy(mle->vote_map, dlm->domain_map, sizeof(mle->vote_map)); 312 clear_bit(dlm->node_num, mle->vote_map); 313 clear_bit(dlm->node_num, mle->node_map); 314 315 /* attach the mle to the domain node up/down events */ 316 __dlm_mle_attach_hb_events(dlm, mle); 317 } 318 319 void __dlm_unlink_mle(struct dlm_ctxt *dlm, struct dlm_master_list_entry *mle) 320 { 321 assert_spin_locked(&dlm->spinlock); 322 assert_spin_locked(&dlm->master_lock); 323 324 if (!hlist_unhashed(&mle->master_hash_node)) 325 hlist_del_init(&mle->master_hash_node); 326 } 327 328 void __dlm_insert_mle(struct dlm_ctxt *dlm, struct dlm_master_list_entry *mle) 329 { 330 struct hlist_head *bucket; 331 332 assert_spin_locked(&dlm->master_lock); 333 334 bucket = dlm_master_hash(dlm, mle->mnamehash); 335 hlist_add_head(&mle->master_hash_node, bucket); 336 } 337 338 /* returns 1 if found, 0 if not */ 339 static int dlm_find_mle(struct dlm_ctxt *dlm, 340 struct dlm_master_list_entry **mle, 341 char *name, unsigned int namelen) 342 { 343 struct dlm_master_list_entry *tmpmle; 344 struct hlist_head *bucket; 345 unsigned int hash; 346 347 assert_spin_locked(&dlm->master_lock); 348 349 hash = dlm_lockid_hash(name, namelen); 350 bucket = dlm_master_hash(dlm, hash); 351 hlist_for_each_entry(tmpmle, bucket, master_hash_node) { 352 if (!dlm_mle_equal(dlm, tmpmle, name, namelen)) 353 continue; 354 dlm_get_mle(tmpmle); 355 *mle = tmpmle; 356 return 1; 357 } 358 return 0; 359 } 360 361 void dlm_hb_event_notify_attached(struct dlm_ctxt *dlm, int idx, int node_up) 362 { 363 struct dlm_master_list_entry *mle; 364 365 assert_spin_locked(&dlm->spinlock); 366 367 list_for_each_entry(mle, &dlm->mle_hb_events, hb_events) { 368 if (node_up) 369 dlm_mle_node_up(dlm, mle, NULL, idx); 370 else 371 dlm_mle_node_down(dlm, mle, NULL, idx); 372 } 373 } 374 375 static void dlm_mle_node_down(struct dlm_ctxt *dlm, 376 struct dlm_master_list_entry *mle, 377 struct o2nm_node *node, int idx) 378 { 379 spin_lock(&mle->spinlock); 380 381 if (!test_bit(idx, mle->node_map)) 382 mlog(0, "node %u already removed from nodemap!\n", idx); 383 else 384 clear_bit(idx, mle->node_map); 385 386 spin_unlock(&mle->spinlock); 387 } 388 389 static void dlm_mle_node_up(struct dlm_ctxt *dlm, 390 struct dlm_master_list_entry *mle, 391 struct o2nm_node *node, int idx) 392 { 393 spin_lock(&mle->spinlock); 394 395 if (test_bit(idx, mle->node_map)) 396 mlog(0, "node %u already in node map!\n", idx); 397 else 398 set_bit(idx, mle->node_map); 399 400 spin_unlock(&mle->spinlock); 401 } 402 403 404 int dlm_init_mle_cache(void) 405 { 406 dlm_mle_cache = kmem_cache_create("o2dlm_mle", 407 sizeof(struct dlm_master_list_entry), 408 0, SLAB_HWCACHE_ALIGN, 409 NULL); 410 if (dlm_mle_cache == NULL) 411 return -ENOMEM; 412 return 0; 413 } 414 415 void dlm_destroy_mle_cache(void) 416 { 417 if (dlm_mle_cache) 418 kmem_cache_destroy(dlm_mle_cache); 419 } 420 421 static void dlm_mle_release(struct kref *kref) 422 { 423 struct dlm_master_list_entry *mle; 424 struct dlm_ctxt *dlm; 425 426 mle = container_of(kref, struct dlm_master_list_entry, mle_refs); 427 dlm = mle->dlm; 428 429 assert_spin_locked(&dlm->spinlock); 430 assert_spin_locked(&dlm->master_lock); 431 432 mlog(0, "Releasing mle for %.*s, type %d\n", mle->mnamelen, mle->mname, 433 mle->type); 434 435 /* remove from list if not already */ 436 __dlm_unlink_mle(dlm, mle); 437 438 /* detach the mle from the domain node up/down events */ 439 __dlm_mle_detach_hb_events(dlm, mle); 440 441 atomic_dec(&dlm->mle_cur_count[mle->type]); 442 443 /* NOTE: kfree under spinlock here. 444 * if this is bad, we can move this to a freelist. */ 445 kmem_cache_free(dlm_mle_cache, mle); 446 } 447 448 449 /* 450 * LOCK RESOURCE FUNCTIONS 451 */ 452 453 int dlm_init_master_caches(void) 454 { 455 dlm_lockres_cache = kmem_cache_create("o2dlm_lockres", 456 sizeof(struct dlm_lock_resource), 457 0, SLAB_HWCACHE_ALIGN, NULL); 458 if (!dlm_lockres_cache) 459 goto bail; 460 461 dlm_lockname_cache = kmem_cache_create("o2dlm_lockname", 462 DLM_LOCKID_NAME_MAX, 0, 463 SLAB_HWCACHE_ALIGN, NULL); 464 if (!dlm_lockname_cache) 465 goto bail; 466 467 return 0; 468 bail: 469 dlm_destroy_master_caches(); 470 return -ENOMEM; 471 } 472 473 void dlm_destroy_master_caches(void) 474 { 475 if (dlm_lockname_cache) 476 kmem_cache_destroy(dlm_lockname_cache); 477 478 if (dlm_lockres_cache) 479 kmem_cache_destroy(dlm_lockres_cache); 480 } 481 482 static void dlm_lockres_release(struct kref *kref) 483 { 484 struct dlm_lock_resource *res; 485 struct dlm_ctxt *dlm; 486 487 res = container_of(kref, struct dlm_lock_resource, refs); 488 dlm = res->dlm; 489 490 /* This should not happen -- all lockres' have a name 491 * associated with them at init time. */ 492 BUG_ON(!res->lockname.name); 493 494 mlog(0, "destroying lockres %.*s\n", res->lockname.len, 495 res->lockname.name); 496 497 spin_lock(&dlm->track_lock); 498 if (!list_empty(&res->tracking)) 499 list_del_init(&res->tracking); 500 else { 501 mlog(ML_ERROR, "Resource %.*s not on the Tracking list\n", 502 res->lockname.len, res->lockname.name); 503 dlm_print_one_lock_resource(res); 504 } 505 spin_unlock(&dlm->track_lock); 506 507 atomic_dec(&dlm->res_cur_count); 508 509 if (!hlist_unhashed(&res->hash_node) || 510 !list_empty(&res->granted) || 511 !list_empty(&res->converting) || 512 !list_empty(&res->blocked) || 513 !list_empty(&res->dirty) || 514 !list_empty(&res->recovering) || 515 !list_empty(&res->purge)) { 516 mlog(ML_ERROR, 517 "Going to BUG for resource %.*s." 518 " We're on a list! [%c%c%c%c%c%c%c]\n", 519 res->lockname.len, res->lockname.name, 520 !hlist_unhashed(&res->hash_node) ? 'H' : ' ', 521 !list_empty(&res->granted) ? 'G' : ' ', 522 !list_empty(&res->converting) ? 'C' : ' ', 523 !list_empty(&res->blocked) ? 'B' : ' ', 524 !list_empty(&res->dirty) ? 'D' : ' ', 525 !list_empty(&res->recovering) ? 'R' : ' ', 526 !list_empty(&res->purge) ? 'P' : ' '); 527 528 dlm_print_one_lock_resource(res); 529 } 530 531 /* By the time we're ready to blow this guy away, we shouldn't 532 * be on any lists. */ 533 BUG_ON(!hlist_unhashed(&res->hash_node)); 534 BUG_ON(!list_empty(&res->granted)); 535 BUG_ON(!list_empty(&res->converting)); 536 BUG_ON(!list_empty(&res->blocked)); 537 BUG_ON(!list_empty(&res->dirty)); 538 BUG_ON(!list_empty(&res->recovering)); 539 BUG_ON(!list_empty(&res->purge)); 540 541 kmem_cache_free(dlm_lockname_cache, (void *)res->lockname.name); 542 543 kmem_cache_free(dlm_lockres_cache, res); 544 } 545 546 void dlm_lockres_put(struct dlm_lock_resource *res) 547 { 548 kref_put(&res->refs, dlm_lockres_release); 549 } 550 551 static void dlm_init_lockres(struct dlm_ctxt *dlm, 552 struct dlm_lock_resource *res, 553 const char *name, unsigned int namelen) 554 { 555 char *qname; 556 557 /* If we memset here, we lose our reference to the kmalloc'd 558 * res->lockname.name, so be sure to init every field 559 * correctly! */ 560 561 qname = (char *) res->lockname.name; 562 memcpy(qname, name, namelen); 563 564 res->lockname.len = namelen; 565 res->lockname.hash = dlm_lockid_hash(name, namelen); 566 567 init_waitqueue_head(&res->wq); 568 spin_lock_init(&res->spinlock); 569 INIT_HLIST_NODE(&res->hash_node); 570 INIT_LIST_HEAD(&res->granted); 571 INIT_LIST_HEAD(&res->converting); 572 INIT_LIST_HEAD(&res->blocked); 573 INIT_LIST_HEAD(&res->dirty); 574 INIT_LIST_HEAD(&res->recovering); 575 INIT_LIST_HEAD(&res->purge); 576 INIT_LIST_HEAD(&res->tracking); 577 atomic_set(&res->asts_reserved, 0); 578 res->migration_pending = 0; 579 res->inflight_locks = 0; 580 581 res->dlm = dlm; 582 583 kref_init(&res->refs); 584 585 atomic_inc(&dlm->res_tot_count); 586 atomic_inc(&dlm->res_cur_count); 587 588 /* just for consistency */ 589 spin_lock(&res->spinlock); 590 dlm_set_lockres_owner(dlm, res, DLM_LOCK_RES_OWNER_UNKNOWN); 591 spin_unlock(&res->spinlock); 592 593 res->state = DLM_LOCK_RES_IN_PROGRESS; 594 595 res->last_used = 0; 596 597 spin_lock(&dlm->spinlock); 598 list_add_tail(&res->tracking, &dlm->tracking_list); 599 spin_unlock(&dlm->spinlock); 600 601 memset(res->lvb, 0, DLM_LVB_LEN); 602 memset(res->refmap, 0, sizeof(res->refmap)); 603 } 604 605 struct dlm_lock_resource *dlm_new_lockres(struct dlm_ctxt *dlm, 606 const char *name, 607 unsigned int namelen) 608 { 609 struct dlm_lock_resource *res = NULL; 610 611 res = kmem_cache_zalloc(dlm_lockres_cache, GFP_NOFS); 612 if (!res) 613 goto error; 614 615 res->lockname.name = kmem_cache_zalloc(dlm_lockname_cache, GFP_NOFS); 616 if (!res->lockname.name) 617 goto error; 618 619 dlm_init_lockres(dlm, res, name, namelen); 620 return res; 621 622 error: 623 if (res && res->lockname.name) 624 kmem_cache_free(dlm_lockname_cache, (void *)res->lockname.name); 625 626 if (res) 627 kmem_cache_free(dlm_lockres_cache, res); 628 return NULL; 629 } 630 631 void dlm_lockres_set_refmap_bit(struct dlm_ctxt *dlm, 632 struct dlm_lock_resource *res, int bit) 633 { 634 assert_spin_locked(&res->spinlock); 635 636 mlog(0, "res %.*s, set node %u, %ps()\n", res->lockname.len, 637 res->lockname.name, bit, __builtin_return_address(0)); 638 639 set_bit(bit, res->refmap); 640 } 641 642 void dlm_lockres_clear_refmap_bit(struct dlm_ctxt *dlm, 643 struct dlm_lock_resource *res, int bit) 644 { 645 assert_spin_locked(&res->spinlock); 646 647 mlog(0, "res %.*s, clr node %u, %ps()\n", res->lockname.len, 648 res->lockname.name, bit, __builtin_return_address(0)); 649 650 clear_bit(bit, res->refmap); 651 } 652 653 654 void dlm_lockres_grab_inflight_ref(struct dlm_ctxt *dlm, 655 struct dlm_lock_resource *res) 656 { 657 assert_spin_locked(&res->spinlock); 658 659 res->inflight_locks++; 660 661 mlog(0, "%s: res %.*s, inflight++: now %u, %ps()\n", dlm->name, 662 res->lockname.len, res->lockname.name, res->inflight_locks, 663 __builtin_return_address(0)); 664 } 665 666 void dlm_lockres_drop_inflight_ref(struct dlm_ctxt *dlm, 667 struct dlm_lock_resource *res) 668 { 669 assert_spin_locked(&res->spinlock); 670 671 BUG_ON(res->inflight_locks == 0); 672 673 res->inflight_locks--; 674 675 mlog(0, "%s: res %.*s, inflight--: now %u, %ps()\n", dlm->name, 676 res->lockname.len, res->lockname.name, res->inflight_locks, 677 __builtin_return_address(0)); 678 679 wake_up(&res->wq); 680 } 681 682 /* 683 * lookup a lock resource by name. 684 * may already exist in the hashtable. 685 * lockid is null terminated 686 * 687 * if not, allocate enough for the lockres and for 688 * the temporary structure used in doing the mastering. 689 * 690 * also, do a lookup in the dlm->master_list to see 691 * if another node has begun mastering the same lock. 692 * if so, there should be a block entry in there 693 * for this name, and we should *not* attempt to master 694 * the lock here. need to wait around for that node 695 * to assert_master (or die). 696 * 697 */ 698 struct dlm_lock_resource * dlm_get_lock_resource(struct dlm_ctxt *dlm, 699 const char *lockid, 700 int namelen, 701 int flags) 702 { 703 struct dlm_lock_resource *tmpres=NULL, *res=NULL; 704 struct dlm_master_list_entry *mle = NULL; 705 struct dlm_master_list_entry *alloc_mle = NULL; 706 int blocked = 0; 707 int ret, nodenum; 708 struct dlm_node_iter iter; 709 unsigned int hash; 710 int tries = 0; 711 int bit, wait_on_recovery = 0; 712 713 BUG_ON(!lockid); 714 715 hash = dlm_lockid_hash(lockid, namelen); 716 717 mlog(0, "get lockres %s (len %d)\n", lockid, namelen); 718 719 lookup: 720 spin_lock(&dlm->spinlock); 721 tmpres = __dlm_lookup_lockres_full(dlm, lockid, namelen, hash); 722 if (tmpres) { 723 spin_unlock(&dlm->spinlock); 724 spin_lock(&tmpres->spinlock); 725 /* Wait on the thread that is mastering the resource */ 726 if (tmpres->owner == DLM_LOCK_RES_OWNER_UNKNOWN) { 727 __dlm_wait_on_lockres(tmpres); 728 BUG_ON(tmpres->owner == DLM_LOCK_RES_OWNER_UNKNOWN); 729 spin_unlock(&tmpres->spinlock); 730 dlm_lockres_put(tmpres); 731 tmpres = NULL; 732 goto lookup; 733 } 734 735 /* Wait on the resource purge to complete before continuing */ 736 if (tmpres->state & DLM_LOCK_RES_DROPPING_REF) { 737 BUG_ON(tmpres->owner == dlm->node_num); 738 __dlm_wait_on_lockres_flags(tmpres, 739 DLM_LOCK_RES_DROPPING_REF); 740 spin_unlock(&tmpres->spinlock); 741 dlm_lockres_put(tmpres); 742 tmpres = NULL; 743 goto lookup; 744 } 745 746 /* Grab inflight ref to pin the resource */ 747 dlm_lockres_grab_inflight_ref(dlm, tmpres); 748 749 spin_unlock(&tmpres->spinlock); 750 if (res) 751 dlm_lockres_put(res); 752 res = tmpres; 753 goto leave; 754 } 755 756 if (!res) { 757 spin_unlock(&dlm->spinlock); 758 mlog(0, "allocating a new resource\n"); 759 /* nothing found and we need to allocate one. */ 760 alloc_mle = kmem_cache_alloc(dlm_mle_cache, GFP_NOFS); 761 if (!alloc_mle) 762 goto leave; 763 res = dlm_new_lockres(dlm, lockid, namelen); 764 if (!res) 765 goto leave; 766 goto lookup; 767 } 768 769 mlog(0, "no lockres found, allocated our own: %p\n", res); 770 771 if (flags & LKM_LOCAL) { 772 /* caller knows it's safe to assume it's not mastered elsewhere 773 * DONE! return right away */ 774 spin_lock(&res->spinlock); 775 dlm_change_lockres_owner(dlm, res, dlm->node_num); 776 __dlm_insert_lockres(dlm, res); 777 dlm_lockres_grab_inflight_ref(dlm, res); 778 spin_unlock(&res->spinlock); 779 spin_unlock(&dlm->spinlock); 780 /* lockres still marked IN_PROGRESS */ 781 goto wake_waiters; 782 } 783 784 /* check master list to see if another node has started mastering it */ 785 spin_lock(&dlm->master_lock); 786 787 /* if we found a block, wait for lock to be mastered by another node */ 788 blocked = dlm_find_mle(dlm, &mle, (char *)lockid, namelen); 789 if (blocked) { 790 int mig; 791 if (mle->type == DLM_MLE_MASTER) { 792 mlog(ML_ERROR, "master entry for nonexistent lock!\n"); 793 BUG(); 794 } 795 mig = (mle->type == DLM_MLE_MIGRATION); 796 /* if there is a migration in progress, let the migration 797 * finish before continuing. we can wait for the absence 798 * of the MIGRATION mle: either the migrate finished or 799 * one of the nodes died and the mle was cleaned up. 800 * if there is a BLOCK here, but it already has a master 801 * set, we are too late. the master does not have a ref 802 * for us in the refmap. detach the mle and drop it. 803 * either way, go back to the top and start over. */ 804 if (mig || mle->master != O2NM_MAX_NODES) { 805 BUG_ON(mig && mle->master == dlm->node_num); 806 /* we arrived too late. the master does not 807 * have a ref for us. retry. */ 808 mlog(0, "%s:%.*s: late on %s\n", 809 dlm->name, namelen, lockid, 810 mig ? "MIGRATION" : "BLOCK"); 811 spin_unlock(&dlm->master_lock); 812 spin_unlock(&dlm->spinlock); 813 814 /* master is known, detach */ 815 if (!mig) 816 dlm_mle_detach_hb_events(dlm, mle); 817 dlm_put_mle(mle); 818 mle = NULL; 819 /* this is lame, but we can't wait on either 820 * the mle or lockres waitqueue here */ 821 if (mig) 822 msleep(100); 823 goto lookup; 824 } 825 } else { 826 /* go ahead and try to master lock on this node */ 827 mle = alloc_mle; 828 /* make sure this does not get freed below */ 829 alloc_mle = NULL; 830 dlm_init_mle(mle, DLM_MLE_MASTER, dlm, res, NULL, 0); 831 set_bit(dlm->node_num, mle->maybe_map); 832 __dlm_insert_mle(dlm, mle); 833 834 /* still holding the dlm spinlock, check the recovery map 835 * to see if there are any nodes that still need to be 836 * considered. these will not appear in the mle nodemap 837 * but they might own this lockres. wait on them. */ 838 bit = find_next_bit(dlm->recovery_map, O2NM_MAX_NODES, 0); 839 if (bit < O2NM_MAX_NODES) { 840 mlog(0, "%s: res %.*s, At least one node (%d) " 841 "to recover before lock mastery can begin\n", 842 dlm->name, namelen, (char *)lockid, bit); 843 wait_on_recovery = 1; 844 } 845 } 846 847 /* at this point there is either a DLM_MLE_BLOCK or a 848 * DLM_MLE_MASTER on the master list, so it's safe to add the 849 * lockres to the hashtable. anyone who finds the lock will 850 * still have to wait on the IN_PROGRESS. */ 851 852 /* finally add the lockres to its hash bucket */ 853 __dlm_insert_lockres(dlm, res); 854 855 /* Grab inflight ref to pin the resource */ 856 spin_lock(&res->spinlock); 857 dlm_lockres_grab_inflight_ref(dlm, res); 858 spin_unlock(&res->spinlock); 859 860 /* get an extra ref on the mle in case this is a BLOCK 861 * if so, the creator of the BLOCK may try to put the last 862 * ref at this time in the assert master handler, so we 863 * need an extra one to keep from a bad ptr deref. */ 864 dlm_get_mle_inuse(mle); 865 spin_unlock(&dlm->master_lock); 866 spin_unlock(&dlm->spinlock); 867 868 redo_request: 869 while (wait_on_recovery) { 870 /* any cluster changes that occurred after dropping the 871 * dlm spinlock would be detectable be a change on the mle, 872 * so we only need to clear out the recovery map once. */ 873 if (dlm_is_recovery_lock(lockid, namelen)) { 874 mlog(0, "%s: Recovery map is not empty, but must " 875 "master $RECOVERY lock now\n", dlm->name); 876 if (!dlm_pre_master_reco_lockres(dlm, res)) 877 wait_on_recovery = 0; 878 else { 879 mlog(0, "%s: waiting 500ms for heartbeat state " 880 "change\n", dlm->name); 881 msleep(500); 882 } 883 continue; 884 } 885 886 dlm_kick_recovery_thread(dlm); 887 msleep(1000); 888 dlm_wait_for_recovery(dlm); 889 890 spin_lock(&dlm->spinlock); 891 bit = find_next_bit(dlm->recovery_map, O2NM_MAX_NODES, 0); 892 if (bit < O2NM_MAX_NODES) { 893 mlog(0, "%s: res %.*s, At least one node (%d) " 894 "to recover before lock mastery can begin\n", 895 dlm->name, namelen, (char *)lockid, bit); 896 wait_on_recovery = 1; 897 } else 898 wait_on_recovery = 0; 899 spin_unlock(&dlm->spinlock); 900 901 if (wait_on_recovery) 902 dlm_wait_for_node_recovery(dlm, bit, 10000); 903 } 904 905 /* must wait for lock to be mastered elsewhere */ 906 if (blocked) 907 goto wait; 908 909 ret = -EINVAL; 910 dlm_node_iter_init(mle->vote_map, &iter); 911 while ((nodenum = dlm_node_iter_next(&iter)) >= 0) { 912 ret = dlm_do_master_request(res, mle, nodenum); 913 if (ret < 0) 914 mlog_errno(ret); 915 if (mle->master != O2NM_MAX_NODES) { 916 /* found a master ! */ 917 if (mle->master <= nodenum) 918 break; 919 /* if our master request has not reached the master 920 * yet, keep going until it does. this is how the 921 * master will know that asserts are needed back to 922 * the lower nodes. */ 923 mlog(0, "%s: res %.*s, Requests only up to %u but " 924 "master is %u, keep going\n", dlm->name, namelen, 925 lockid, nodenum, mle->master); 926 } 927 } 928 929 wait: 930 /* keep going until the response map includes all nodes */ 931 ret = dlm_wait_for_lock_mastery(dlm, res, mle, &blocked); 932 if (ret < 0) { 933 wait_on_recovery = 1; 934 mlog(0, "%s: res %.*s, Node map changed, redo the master " 935 "request now, blocked=%d\n", dlm->name, res->lockname.len, 936 res->lockname.name, blocked); 937 if (++tries > 20) { 938 mlog(ML_ERROR, "%s: res %.*s, Spinning on " 939 "dlm_wait_for_lock_mastery, blocked = %d\n", 940 dlm->name, res->lockname.len, 941 res->lockname.name, blocked); 942 dlm_print_one_lock_resource(res); 943 dlm_print_one_mle(mle); 944 tries = 0; 945 } 946 goto redo_request; 947 } 948 949 mlog(0, "%s: res %.*s, Mastered by %u\n", dlm->name, res->lockname.len, 950 res->lockname.name, res->owner); 951 /* make sure we never continue without this */ 952 BUG_ON(res->owner == O2NM_MAX_NODES); 953 954 /* master is known, detach if not already detached */ 955 dlm_mle_detach_hb_events(dlm, mle); 956 dlm_put_mle(mle); 957 /* put the extra ref */ 958 dlm_put_mle_inuse(mle); 959 960 wake_waiters: 961 spin_lock(&res->spinlock); 962 res->state &= ~DLM_LOCK_RES_IN_PROGRESS; 963 spin_unlock(&res->spinlock); 964 wake_up(&res->wq); 965 966 leave: 967 /* need to free the unused mle */ 968 if (alloc_mle) 969 kmem_cache_free(dlm_mle_cache, alloc_mle); 970 971 return res; 972 } 973 974 975 #define DLM_MASTERY_TIMEOUT_MS 5000 976 977 static int dlm_wait_for_lock_mastery(struct dlm_ctxt *dlm, 978 struct dlm_lock_resource *res, 979 struct dlm_master_list_entry *mle, 980 int *blocked) 981 { 982 u8 m; 983 int ret, bit; 984 int map_changed, voting_done; 985 int assert, sleep; 986 987 recheck: 988 ret = 0; 989 assert = 0; 990 991 /* check if another node has already become the owner */ 992 spin_lock(&res->spinlock); 993 if (res->owner != DLM_LOCK_RES_OWNER_UNKNOWN) { 994 mlog(0, "%s:%.*s: owner is suddenly %u\n", dlm->name, 995 res->lockname.len, res->lockname.name, res->owner); 996 spin_unlock(&res->spinlock); 997 /* this will cause the master to re-assert across 998 * the whole cluster, freeing up mles */ 999 if (res->owner != dlm->node_num) { 1000 ret = dlm_do_master_request(res, mle, res->owner); 1001 if (ret < 0) { 1002 /* give recovery a chance to run */ 1003 mlog(ML_ERROR, "link to %u went down?: %d\n", res->owner, ret); 1004 msleep(500); 1005 goto recheck; 1006 } 1007 } 1008 ret = 0; 1009 goto leave; 1010 } 1011 spin_unlock(&res->spinlock); 1012 1013 spin_lock(&mle->spinlock); 1014 m = mle->master; 1015 map_changed = (memcmp(mle->vote_map, mle->node_map, 1016 sizeof(mle->vote_map)) != 0); 1017 voting_done = (memcmp(mle->vote_map, mle->response_map, 1018 sizeof(mle->vote_map)) == 0); 1019 1020 /* restart if we hit any errors */ 1021 if (map_changed) { 1022 int b; 1023 mlog(0, "%s: %.*s: node map changed, restarting\n", 1024 dlm->name, res->lockname.len, res->lockname.name); 1025 ret = dlm_restart_lock_mastery(dlm, res, mle, *blocked); 1026 b = (mle->type == DLM_MLE_BLOCK); 1027 if ((*blocked && !b) || (!*blocked && b)) { 1028 mlog(0, "%s:%.*s: status change: old=%d new=%d\n", 1029 dlm->name, res->lockname.len, res->lockname.name, 1030 *blocked, b); 1031 *blocked = b; 1032 } 1033 spin_unlock(&mle->spinlock); 1034 if (ret < 0) { 1035 mlog_errno(ret); 1036 goto leave; 1037 } 1038 mlog(0, "%s:%.*s: restart lock mastery succeeded, " 1039 "rechecking now\n", dlm->name, res->lockname.len, 1040 res->lockname.name); 1041 goto recheck; 1042 } else { 1043 if (!voting_done) { 1044 mlog(0, "map not changed and voting not done " 1045 "for %s:%.*s\n", dlm->name, res->lockname.len, 1046 res->lockname.name); 1047 } 1048 } 1049 1050 if (m != O2NM_MAX_NODES) { 1051 /* another node has done an assert! 1052 * all done! */ 1053 sleep = 0; 1054 } else { 1055 sleep = 1; 1056 /* have all nodes responded? */ 1057 if (voting_done && !*blocked) { 1058 bit = find_next_bit(mle->maybe_map, O2NM_MAX_NODES, 0); 1059 if (dlm->node_num <= bit) { 1060 /* my node number is lowest. 1061 * now tell other nodes that I am 1062 * mastering this. */ 1063 mle->master = dlm->node_num; 1064 /* ref was grabbed in get_lock_resource 1065 * will be dropped in dlmlock_master */ 1066 assert = 1; 1067 sleep = 0; 1068 } 1069 /* if voting is done, but we have not received 1070 * an assert master yet, we must sleep */ 1071 } 1072 } 1073 1074 spin_unlock(&mle->spinlock); 1075 1076 /* sleep if we haven't finished voting yet */ 1077 if (sleep) { 1078 unsigned long timeo = msecs_to_jiffies(DLM_MASTERY_TIMEOUT_MS); 1079 1080 /* 1081 if (atomic_read(&mle->mle_refs.refcount) < 2) 1082 mlog(ML_ERROR, "mle (%p) refs=%d, name=%.*s\n", mle, 1083 atomic_read(&mle->mle_refs.refcount), 1084 res->lockname.len, res->lockname.name); 1085 */ 1086 atomic_set(&mle->woken, 0); 1087 (void)wait_event_timeout(mle->wq, 1088 (atomic_read(&mle->woken) == 1), 1089 timeo); 1090 if (res->owner == O2NM_MAX_NODES) { 1091 mlog(0, "%s:%.*s: waiting again\n", dlm->name, 1092 res->lockname.len, res->lockname.name); 1093 goto recheck; 1094 } 1095 mlog(0, "done waiting, master is %u\n", res->owner); 1096 ret = 0; 1097 goto leave; 1098 } 1099 1100 ret = 0; /* done */ 1101 if (assert) { 1102 m = dlm->node_num; 1103 mlog(0, "about to master %.*s here, this=%u\n", 1104 res->lockname.len, res->lockname.name, m); 1105 ret = dlm_do_assert_master(dlm, res, mle->vote_map, 0); 1106 if (ret) { 1107 /* This is a failure in the network path, 1108 * not in the response to the assert_master 1109 * (any nonzero response is a BUG on this node). 1110 * Most likely a socket just got disconnected 1111 * due to node death. */ 1112 mlog_errno(ret); 1113 } 1114 /* no longer need to restart lock mastery. 1115 * all living nodes have been contacted. */ 1116 ret = 0; 1117 } 1118 1119 /* set the lockres owner */ 1120 spin_lock(&res->spinlock); 1121 /* mastery reference obtained either during 1122 * assert_master_handler or in get_lock_resource */ 1123 dlm_change_lockres_owner(dlm, res, m); 1124 spin_unlock(&res->spinlock); 1125 1126 leave: 1127 return ret; 1128 } 1129 1130 struct dlm_bitmap_diff_iter 1131 { 1132 int curnode; 1133 unsigned long *orig_bm; 1134 unsigned long *cur_bm; 1135 unsigned long diff_bm[BITS_TO_LONGS(O2NM_MAX_NODES)]; 1136 }; 1137 1138 enum dlm_node_state_change 1139 { 1140 NODE_DOWN = -1, 1141 NODE_NO_CHANGE = 0, 1142 NODE_UP 1143 }; 1144 1145 static void dlm_bitmap_diff_iter_init(struct dlm_bitmap_diff_iter *iter, 1146 unsigned long *orig_bm, 1147 unsigned long *cur_bm) 1148 { 1149 unsigned long p1, p2; 1150 int i; 1151 1152 iter->curnode = -1; 1153 iter->orig_bm = orig_bm; 1154 iter->cur_bm = cur_bm; 1155 1156 for (i = 0; i < BITS_TO_LONGS(O2NM_MAX_NODES); i++) { 1157 p1 = *(iter->orig_bm + i); 1158 p2 = *(iter->cur_bm + i); 1159 iter->diff_bm[i] = (p1 & ~p2) | (p2 & ~p1); 1160 } 1161 } 1162 1163 static int dlm_bitmap_diff_iter_next(struct dlm_bitmap_diff_iter *iter, 1164 enum dlm_node_state_change *state) 1165 { 1166 int bit; 1167 1168 if (iter->curnode >= O2NM_MAX_NODES) 1169 return -ENOENT; 1170 1171 bit = find_next_bit(iter->diff_bm, O2NM_MAX_NODES, 1172 iter->curnode+1); 1173 if (bit >= O2NM_MAX_NODES) { 1174 iter->curnode = O2NM_MAX_NODES; 1175 return -ENOENT; 1176 } 1177 1178 /* if it was there in the original then this node died */ 1179 if (test_bit(bit, iter->orig_bm)) 1180 *state = NODE_DOWN; 1181 else 1182 *state = NODE_UP; 1183 1184 iter->curnode = bit; 1185 return bit; 1186 } 1187 1188 1189 static int dlm_restart_lock_mastery(struct dlm_ctxt *dlm, 1190 struct dlm_lock_resource *res, 1191 struct dlm_master_list_entry *mle, 1192 int blocked) 1193 { 1194 struct dlm_bitmap_diff_iter bdi; 1195 enum dlm_node_state_change sc; 1196 int node; 1197 int ret = 0; 1198 1199 mlog(0, "something happened such that the " 1200 "master process may need to be restarted!\n"); 1201 1202 assert_spin_locked(&mle->spinlock); 1203 1204 dlm_bitmap_diff_iter_init(&bdi, mle->vote_map, mle->node_map); 1205 node = dlm_bitmap_diff_iter_next(&bdi, &sc); 1206 while (node >= 0) { 1207 if (sc == NODE_UP) { 1208 /* a node came up. clear any old vote from 1209 * the response map and set it in the vote map 1210 * then restart the mastery. */ 1211 mlog(ML_NOTICE, "node %d up while restarting\n", node); 1212 1213 /* redo the master request, but only for the new node */ 1214 mlog(0, "sending request to new node\n"); 1215 clear_bit(node, mle->response_map); 1216 set_bit(node, mle->vote_map); 1217 } else { 1218 mlog(ML_ERROR, "node down! %d\n", node); 1219 if (blocked) { 1220 int lowest = find_next_bit(mle->maybe_map, 1221 O2NM_MAX_NODES, 0); 1222 1223 /* act like it was never there */ 1224 clear_bit(node, mle->maybe_map); 1225 1226 if (node == lowest) { 1227 mlog(0, "expected master %u died" 1228 " while this node was blocked " 1229 "waiting on it!\n", node); 1230 lowest = find_next_bit(mle->maybe_map, 1231 O2NM_MAX_NODES, 1232 lowest+1); 1233 if (lowest < O2NM_MAX_NODES) { 1234 mlog(0, "%s:%.*s:still " 1235 "blocked. waiting on %u " 1236 "now\n", dlm->name, 1237 res->lockname.len, 1238 res->lockname.name, 1239 lowest); 1240 } else { 1241 /* mle is an MLE_BLOCK, but 1242 * there is now nothing left to 1243 * block on. we need to return 1244 * all the way back out and try 1245 * again with an MLE_MASTER. 1246 * dlm_do_local_recovery_cleanup 1247 * has already run, so the mle 1248 * refcount is ok */ 1249 mlog(0, "%s:%.*s: no " 1250 "longer blocking. try to " 1251 "master this here\n", 1252 dlm->name, 1253 res->lockname.len, 1254 res->lockname.name); 1255 mle->type = DLM_MLE_MASTER; 1256 mle->mleres = res; 1257 } 1258 } 1259 } 1260 1261 /* now blank out everything, as if we had never 1262 * contacted anyone */ 1263 memset(mle->maybe_map, 0, sizeof(mle->maybe_map)); 1264 memset(mle->response_map, 0, sizeof(mle->response_map)); 1265 /* reset the vote_map to the current node_map */ 1266 memcpy(mle->vote_map, mle->node_map, 1267 sizeof(mle->node_map)); 1268 /* put myself into the maybe map */ 1269 if (mle->type != DLM_MLE_BLOCK) 1270 set_bit(dlm->node_num, mle->maybe_map); 1271 } 1272 ret = -EAGAIN; 1273 node = dlm_bitmap_diff_iter_next(&bdi, &sc); 1274 } 1275 return ret; 1276 } 1277 1278 1279 /* 1280 * DLM_MASTER_REQUEST_MSG 1281 * 1282 * returns: 0 on success, 1283 * -errno on a network error 1284 * 1285 * on error, the caller should assume the target node is "dead" 1286 * 1287 */ 1288 1289 static int dlm_do_master_request(struct dlm_lock_resource *res, 1290 struct dlm_master_list_entry *mle, int to) 1291 { 1292 struct dlm_ctxt *dlm = mle->dlm; 1293 struct dlm_master_request request; 1294 int ret, response=0, resend; 1295 1296 memset(&request, 0, sizeof(request)); 1297 request.node_idx = dlm->node_num; 1298 1299 BUG_ON(mle->type == DLM_MLE_MIGRATION); 1300 1301 request.namelen = (u8)mle->mnamelen; 1302 memcpy(request.name, mle->mname, request.namelen); 1303 1304 again: 1305 ret = o2net_send_message(DLM_MASTER_REQUEST_MSG, dlm->key, &request, 1306 sizeof(request), to, &response); 1307 if (ret < 0) { 1308 if (ret == -ESRCH) { 1309 /* should never happen */ 1310 mlog(ML_ERROR, "TCP stack not ready!\n"); 1311 BUG(); 1312 } else if (ret == -EINVAL) { 1313 mlog(ML_ERROR, "bad args passed to o2net!\n"); 1314 BUG(); 1315 } else if (ret == -ENOMEM) { 1316 mlog(ML_ERROR, "out of memory while trying to send " 1317 "network message! retrying\n"); 1318 /* this is totally crude */ 1319 msleep(50); 1320 goto again; 1321 } else if (!dlm_is_host_down(ret)) { 1322 /* not a network error. bad. */ 1323 mlog_errno(ret); 1324 mlog(ML_ERROR, "unhandled error!"); 1325 BUG(); 1326 } 1327 /* all other errors should be network errors, 1328 * and likely indicate node death */ 1329 mlog(ML_ERROR, "link to %d went down!\n", to); 1330 goto out; 1331 } 1332 1333 ret = 0; 1334 resend = 0; 1335 spin_lock(&mle->spinlock); 1336 switch (response) { 1337 case DLM_MASTER_RESP_YES: 1338 set_bit(to, mle->response_map); 1339 mlog(0, "node %u is the master, response=YES\n", to); 1340 mlog(0, "%s:%.*s: master node %u now knows I have a " 1341 "reference\n", dlm->name, res->lockname.len, 1342 res->lockname.name, to); 1343 mle->master = to; 1344 break; 1345 case DLM_MASTER_RESP_NO: 1346 mlog(0, "node %u not master, response=NO\n", to); 1347 set_bit(to, mle->response_map); 1348 break; 1349 case DLM_MASTER_RESP_MAYBE: 1350 mlog(0, "node %u not master, response=MAYBE\n", to); 1351 set_bit(to, mle->response_map); 1352 set_bit(to, mle->maybe_map); 1353 break; 1354 case DLM_MASTER_RESP_ERROR: 1355 mlog(0, "node %u hit an error, resending\n", to); 1356 resend = 1; 1357 response = 0; 1358 break; 1359 default: 1360 mlog(ML_ERROR, "bad response! %u\n", response); 1361 BUG(); 1362 } 1363 spin_unlock(&mle->spinlock); 1364 if (resend) { 1365 /* this is also totally crude */ 1366 msleep(50); 1367 goto again; 1368 } 1369 1370 out: 1371 return ret; 1372 } 1373 1374 /* 1375 * locks that can be taken here: 1376 * dlm->spinlock 1377 * res->spinlock 1378 * mle->spinlock 1379 * dlm->master_list 1380 * 1381 * if possible, TRIM THIS DOWN!!! 1382 */ 1383 int dlm_master_request_handler(struct o2net_msg *msg, u32 len, void *data, 1384 void **ret_data) 1385 { 1386 u8 response = DLM_MASTER_RESP_MAYBE; 1387 struct dlm_ctxt *dlm = data; 1388 struct dlm_lock_resource *res = NULL; 1389 struct dlm_master_request *request = (struct dlm_master_request *) msg->buf; 1390 struct dlm_master_list_entry *mle = NULL, *tmpmle = NULL; 1391 char *name; 1392 unsigned int namelen, hash; 1393 int found, ret; 1394 int set_maybe; 1395 int dispatch_assert = 0; 1396 1397 if (!dlm_grab(dlm)) 1398 return DLM_MASTER_RESP_NO; 1399 1400 if (!dlm_domain_fully_joined(dlm)) { 1401 response = DLM_MASTER_RESP_NO; 1402 goto send_response; 1403 } 1404 1405 name = request->name; 1406 namelen = request->namelen; 1407 hash = dlm_lockid_hash(name, namelen); 1408 1409 if (namelen > DLM_LOCKID_NAME_MAX) { 1410 response = DLM_IVBUFLEN; 1411 goto send_response; 1412 } 1413 1414 way_up_top: 1415 spin_lock(&dlm->spinlock); 1416 res = __dlm_lookup_lockres(dlm, name, namelen, hash); 1417 if (res) { 1418 spin_unlock(&dlm->spinlock); 1419 1420 /* take care of the easy cases up front */ 1421 spin_lock(&res->spinlock); 1422 if (res->state & (DLM_LOCK_RES_RECOVERING| 1423 DLM_LOCK_RES_MIGRATING)) { 1424 spin_unlock(&res->spinlock); 1425 mlog(0, "returning DLM_MASTER_RESP_ERROR since res is " 1426 "being recovered/migrated\n"); 1427 response = DLM_MASTER_RESP_ERROR; 1428 if (mle) 1429 kmem_cache_free(dlm_mle_cache, mle); 1430 goto send_response; 1431 } 1432 1433 if (res->owner == dlm->node_num) { 1434 dlm_lockres_set_refmap_bit(dlm, res, request->node_idx); 1435 spin_unlock(&res->spinlock); 1436 response = DLM_MASTER_RESP_YES; 1437 if (mle) 1438 kmem_cache_free(dlm_mle_cache, mle); 1439 1440 /* this node is the owner. 1441 * there is some extra work that needs to 1442 * happen now. the requesting node has 1443 * caused all nodes up to this one to 1444 * create mles. this node now needs to 1445 * go back and clean those up. */ 1446 dispatch_assert = 1; 1447 goto send_response; 1448 } else if (res->owner != DLM_LOCK_RES_OWNER_UNKNOWN) { 1449 spin_unlock(&res->spinlock); 1450 // mlog(0, "node %u is the master\n", res->owner); 1451 response = DLM_MASTER_RESP_NO; 1452 if (mle) 1453 kmem_cache_free(dlm_mle_cache, mle); 1454 goto send_response; 1455 } 1456 1457 /* ok, there is no owner. either this node is 1458 * being blocked, or it is actively trying to 1459 * master this lock. */ 1460 if (!(res->state & DLM_LOCK_RES_IN_PROGRESS)) { 1461 mlog(ML_ERROR, "lock with no owner should be " 1462 "in-progress!\n"); 1463 BUG(); 1464 } 1465 1466 // mlog(0, "lockres is in progress...\n"); 1467 spin_lock(&dlm->master_lock); 1468 found = dlm_find_mle(dlm, &tmpmle, name, namelen); 1469 if (!found) { 1470 mlog(ML_ERROR, "no mle found for this lock!\n"); 1471 BUG(); 1472 } 1473 set_maybe = 1; 1474 spin_lock(&tmpmle->spinlock); 1475 if (tmpmle->type == DLM_MLE_BLOCK) { 1476 // mlog(0, "this node is waiting for " 1477 // "lockres to be mastered\n"); 1478 response = DLM_MASTER_RESP_NO; 1479 } else if (tmpmle->type == DLM_MLE_MIGRATION) { 1480 mlog(0, "node %u is master, but trying to migrate to " 1481 "node %u.\n", tmpmle->master, tmpmle->new_master); 1482 if (tmpmle->master == dlm->node_num) { 1483 mlog(ML_ERROR, "no owner on lockres, but this " 1484 "node is trying to migrate it to %u?!\n", 1485 tmpmle->new_master); 1486 BUG(); 1487 } else { 1488 /* the real master can respond on its own */ 1489 response = DLM_MASTER_RESP_NO; 1490 } 1491 } else if (tmpmle->master != DLM_LOCK_RES_OWNER_UNKNOWN) { 1492 set_maybe = 0; 1493 if (tmpmle->master == dlm->node_num) { 1494 response = DLM_MASTER_RESP_YES; 1495 /* this node will be the owner. 1496 * go back and clean the mles on any 1497 * other nodes */ 1498 dispatch_assert = 1; 1499 dlm_lockres_set_refmap_bit(dlm, res, 1500 request->node_idx); 1501 } else 1502 response = DLM_MASTER_RESP_NO; 1503 } else { 1504 // mlog(0, "this node is attempting to " 1505 // "master lockres\n"); 1506 response = DLM_MASTER_RESP_MAYBE; 1507 } 1508 if (set_maybe) 1509 set_bit(request->node_idx, tmpmle->maybe_map); 1510 spin_unlock(&tmpmle->spinlock); 1511 1512 spin_unlock(&dlm->master_lock); 1513 spin_unlock(&res->spinlock); 1514 1515 /* keep the mle attached to heartbeat events */ 1516 dlm_put_mle(tmpmle); 1517 if (mle) 1518 kmem_cache_free(dlm_mle_cache, mle); 1519 goto send_response; 1520 } 1521 1522 /* 1523 * lockres doesn't exist on this node 1524 * if there is an MLE_BLOCK, return NO 1525 * if there is an MLE_MASTER, return MAYBE 1526 * otherwise, add an MLE_BLOCK, return NO 1527 */ 1528 spin_lock(&dlm->master_lock); 1529 found = dlm_find_mle(dlm, &tmpmle, name, namelen); 1530 if (!found) { 1531 /* this lockid has never been seen on this node yet */ 1532 // mlog(0, "no mle found\n"); 1533 if (!mle) { 1534 spin_unlock(&dlm->master_lock); 1535 spin_unlock(&dlm->spinlock); 1536 1537 mle = kmem_cache_alloc(dlm_mle_cache, GFP_NOFS); 1538 if (!mle) { 1539 response = DLM_MASTER_RESP_ERROR; 1540 mlog_errno(-ENOMEM); 1541 goto send_response; 1542 } 1543 goto way_up_top; 1544 } 1545 1546 // mlog(0, "this is second time thru, already allocated, " 1547 // "add the block.\n"); 1548 dlm_init_mle(mle, DLM_MLE_BLOCK, dlm, NULL, name, namelen); 1549 set_bit(request->node_idx, mle->maybe_map); 1550 __dlm_insert_mle(dlm, mle); 1551 response = DLM_MASTER_RESP_NO; 1552 } else { 1553 // mlog(0, "mle was found\n"); 1554 set_maybe = 1; 1555 spin_lock(&tmpmle->spinlock); 1556 if (tmpmle->master == dlm->node_num) { 1557 mlog(ML_ERROR, "no lockres, but an mle with this node as master!\n"); 1558 BUG(); 1559 } 1560 if (tmpmle->type == DLM_MLE_BLOCK) 1561 response = DLM_MASTER_RESP_NO; 1562 else if (tmpmle->type == DLM_MLE_MIGRATION) { 1563 mlog(0, "migration mle was found (%u->%u)\n", 1564 tmpmle->master, tmpmle->new_master); 1565 /* real master can respond on its own */ 1566 response = DLM_MASTER_RESP_NO; 1567 } else 1568 response = DLM_MASTER_RESP_MAYBE; 1569 if (set_maybe) 1570 set_bit(request->node_idx, tmpmle->maybe_map); 1571 spin_unlock(&tmpmle->spinlock); 1572 } 1573 spin_unlock(&dlm->master_lock); 1574 spin_unlock(&dlm->spinlock); 1575 1576 if (found) { 1577 /* keep the mle attached to heartbeat events */ 1578 dlm_put_mle(tmpmle); 1579 } 1580 send_response: 1581 /* 1582 * __dlm_lookup_lockres() grabbed a reference to this lockres. 1583 * The reference is released by dlm_assert_master_worker() under 1584 * the call to dlm_dispatch_assert_master(). If 1585 * dlm_assert_master_worker() isn't called, we drop it here. 1586 */ 1587 if (dispatch_assert) { 1588 if (response != DLM_MASTER_RESP_YES) 1589 mlog(ML_ERROR, "invalid response %d\n", response); 1590 if (!res) { 1591 mlog(ML_ERROR, "bad lockres while trying to assert!\n"); 1592 BUG(); 1593 } 1594 mlog(0, "%u is the owner of %.*s, cleaning everyone else\n", 1595 dlm->node_num, res->lockname.len, res->lockname.name); 1596 ret = dlm_dispatch_assert_master(dlm, res, 0, request->node_idx, 1597 DLM_ASSERT_MASTER_MLE_CLEANUP); 1598 if (ret < 0) { 1599 mlog(ML_ERROR, "failed to dispatch assert master work\n"); 1600 response = DLM_MASTER_RESP_ERROR; 1601 dlm_lockres_put(res); 1602 } 1603 } else { 1604 if (res) 1605 dlm_lockres_put(res); 1606 } 1607 1608 dlm_put(dlm); 1609 return response; 1610 } 1611 1612 /* 1613 * DLM_ASSERT_MASTER_MSG 1614 */ 1615 1616 1617 /* 1618 * NOTE: this can be used for debugging 1619 * can periodically run all locks owned by this node 1620 * and re-assert across the cluster... 1621 */ 1622 static int dlm_do_assert_master(struct dlm_ctxt *dlm, 1623 struct dlm_lock_resource *res, 1624 void *nodemap, u32 flags) 1625 { 1626 struct dlm_assert_master assert; 1627 int to, tmpret; 1628 struct dlm_node_iter iter; 1629 int ret = 0; 1630 int reassert; 1631 const char *lockname = res->lockname.name; 1632 unsigned int namelen = res->lockname.len; 1633 1634 BUG_ON(namelen > O2NM_MAX_NAME_LEN); 1635 1636 spin_lock(&res->spinlock); 1637 res->state |= DLM_LOCK_RES_SETREF_INPROG; 1638 spin_unlock(&res->spinlock); 1639 1640 again: 1641 reassert = 0; 1642 1643 /* note that if this nodemap is empty, it returns 0 */ 1644 dlm_node_iter_init(nodemap, &iter); 1645 while ((to = dlm_node_iter_next(&iter)) >= 0) { 1646 int r = 0; 1647 struct dlm_master_list_entry *mle = NULL; 1648 1649 mlog(0, "sending assert master to %d (%.*s)\n", to, 1650 namelen, lockname); 1651 memset(&assert, 0, sizeof(assert)); 1652 assert.node_idx = dlm->node_num; 1653 assert.namelen = namelen; 1654 memcpy(assert.name, lockname, namelen); 1655 assert.flags = cpu_to_be32(flags); 1656 1657 tmpret = o2net_send_message(DLM_ASSERT_MASTER_MSG, dlm->key, 1658 &assert, sizeof(assert), to, &r); 1659 if (tmpret < 0) { 1660 mlog(ML_ERROR, "Error %d when sending message %u (key " 1661 "0x%x) to node %u\n", tmpret, 1662 DLM_ASSERT_MASTER_MSG, dlm->key, to); 1663 if (!dlm_is_host_down(tmpret)) { 1664 mlog(ML_ERROR, "unhandled error=%d!\n", tmpret); 1665 BUG(); 1666 } 1667 /* a node died. finish out the rest of the nodes. */ 1668 mlog(0, "link to %d went down!\n", to); 1669 /* any nonzero status return will do */ 1670 ret = tmpret; 1671 r = 0; 1672 } else if (r < 0) { 1673 /* ok, something horribly messed. kill thyself. */ 1674 mlog(ML_ERROR,"during assert master of %.*s to %u, " 1675 "got %d.\n", namelen, lockname, to, r); 1676 spin_lock(&dlm->spinlock); 1677 spin_lock(&dlm->master_lock); 1678 if (dlm_find_mle(dlm, &mle, (char *)lockname, 1679 namelen)) { 1680 dlm_print_one_mle(mle); 1681 __dlm_put_mle(mle); 1682 } 1683 spin_unlock(&dlm->master_lock); 1684 spin_unlock(&dlm->spinlock); 1685 BUG(); 1686 } 1687 1688 if (r & DLM_ASSERT_RESPONSE_REASSERT && 1689 !(r & DLM_ASSERT_RESPONSE_MASTERY_REF)) { 1690 mlog(ML_ERROR, "%.*s: very strange, " 1691 "master MLE but no lockres on %u\n", 1692 namelen, lockname, to); 1693 } 1694 1695 if (r & DLM_ASSERT_RESPONSE_REASSERT) { 1696 mlog(0, "%.*s: node %u create mles on other " 1697 "nodes and requests a re-assert\n", 1698 namelen, lockname, to); 1699 reassert = 1; 1700 } 1701 if (r & DLM_ASSERT_RESPONSE_MASTERY_REF) { 1702 mlog(0, "%.*s: node %u has a reference to this " 1703 "lockres, set the bit in the refmap\n", 1704 namelen, lockname, to); 1705 spin_lock(&res->spinlock); 1706 dlm_lockres_set_refmap_bit(dlm, res, to); 1707 spin_unlock(&res->spinlock); 1708 } 1709 } 1710 1711 if (reassert) 1712 goto again; 1713 1714 spin_lock(&res->spinlock); 1715 res->state &= ~DLM_LOCK_RES_SETREF_INPROG; 1716 spin_unlock(&res->spinlock); 1717 wake_up(&res->wq); 1718 1719 return ret; 1720 } 1721 1722 /* 1723 * locks that can be taken here: 1724 * dlm->spinlock 1725 * res->spinlock 1726 * mle->spinlock 1727 * dlm->master_list 1728 * 1729 * if possible, TRIM THIS DOWN!!! 1730 */ 1731 int dlm_assert_master_handler(struct o2net_msg *msg, u32 len, void *data, 1732 void **ret_data) 1733 { 1734 struct dlm_ctxt *dlm = data; 1735 struct dlm_master_list_entry *mle = NULL; 1736 struct dlm_assert_master *assert = (struct dlm_assert_master *)msg->buf; 1737 struct dlm_lock_resource *res = NULL; 1738 char *name; 1739 unsigned int namelen, hash; 1740 u32 flags; 1741 int master_request = 0, have_lockres_ref = 0; 1742 int ret = 0; 1743 1744 if (!dlm_grab(dlm)) 1745 return 0; 1746 1747 name = assert->name; 1748 namelen = assert->namelen; 1749 hash = dlm_lockid_hash(name, namelen); 1750 flags = be32_to_cpu(assert->flags); 1751 1752 if (namelen > DLM_LOCKID_NAME_MAX) { 1753 mlog(ML_ERROR, "Invalid name length!"); 1754 goto done; 1755 } 1756 1757 spin_lock(&dlm->spinlock); 1758 1759 if (flags) 1760 mlog(0, "assert_master with flags: %u\n", flags); 1761 1762 /* find the MLE */ 1763 spin_lock(&dlm->master_lock); 1764 if (!dlm_find_mle(dlm, &mle, name, namelen)) { 1765 /* not an error, could be master just re-asserting */ 1766 mlog(0, "just got an assert_master from %u, but no " 1767 "MLE for it! (%.*s)\n", assert->node_idx, 1768 namelen, name); 1769 } else { 1770 int bit = find_next_bit (mle->maybe_map, O2NM_MAX_NODES, 0); 1771 if (bit >= O2NM_MAX_NODES) { 1772 /* not necessarily an error, though less likely. 1773 * could be master just re-asserting. */ 1774 mlog(0, "no bits set in the maybe_map, but %u " 1775 "is asserting! (%.*s)\n", assert->node_idx, 1776 namelen, name); 1777 } else if (bit != assert->node_idx) { 1778 if (flags & DLM_ASSERT_MASTER_MLE_CLEANUP) { 1779 mlog(0, "master %u was found, %u should " 1780 "back off\n", assert->node_idx, bit); 1781 } else { 1782 /* with the fix for bug 569, a higher node 1783 * number winning the mastery will respond 1784 * YES to mastery requests, but this node 1785 * had no way of knowing. let it pass. */ 1786 mlog(0, "%u is the lowest node, " 1787 "%u is asserting. (%.*s) %u must " 1788 "have begun after %u won.\n", bit, 1789 assert->node_idx, namelen, name, bit, 1790 assert->node_idx); 1791 } 1792 } 1793 if (mle->type == DLM_MLE_MIGRATION) { 1794 if (flags & DLM_ASSERT_MASTER_MLE_CLEANUP) { 1795 mlog(0, "%s:%.*s: got cleanup assert" 1796 " from %u for migration\n", 1797 dlm->name, namelen, name, 1798 assert->node_idx); 1799 } else if (!(flags & DLM_ASSERT_MASTER_FINISH_MIGRATION)) { 1800 mlog(0, "%s:%.*s: got unrelated assert" 1801 " from %u for migration, ignoring\n", 1802 dlm->name, namelen, name, 1803 assert->node_idx); 1804 __dlm_put_mle(mle); 1805 spin_unlock(&dlm->master_lock); 1806 spin_unlock(&dlm->spinlock); 1807 goto done; 1808 } 1809 } 1810 } 1811 spin_unlock(&dlm->master_lock); 1812 1813 /* ok everything checks out with the MLE 1814 * now check to see if there is a lockres */ 1815 res = __dlm_lookup_lockres(dlm, name, namelen, hash); 1816 if (res) { 1817 spin_lock(&res->spinlock); 1818 if (res->state & DLM_LOCK_RES_RECOVERING) { 1819 mlog(ML_ERROR, "%u asserting but %.*s is " 1820 "RECOVERING!\n", assert->node_idx, namelen, name); 1821 goto kill; 1822 } 1823 if (!mle) { 1824 if (res->owner != DLM_LOCK_RES_OWNER_UNKNOWN && 1825 res->owner != assert->node_idx) { 1826 mlog(ML_ERROR, "DIE! Mastery assert from %u, " 1827 "but current owner is %u! (%.*s)\n", 1828 assert->node_idx, res->owner, namelen, 1829 name); 1830 __dlm_print_one_lock_resource(res); 1831 BUG(); 1832 } 1833 } else if (mle->type != DLM_MLE_MIGRATION) { 1834 if (res->owner != DLM_LOCK_RES_OWNER_UNKNOWN) { 1835 /* owner is just re-asserting */ 1836 if (res->owner == assert->node_idx) { 1837 mlog(0, "owner %u re-asserting on " 1838 "lock %.*s\n", assert->node_idx, 1839 namelen, name); 1840 goto ok; 1841 } 1842 mlog(ML_ERROR, "got assert_master from " 1843 "node %u, but %u is the owner! " 1844 "(%.*s)\n", assert->node_idx, 1845 res->owner, namelen, name); 1846 goto kill; 1847 } 1848 if (!(res->state & DLM_LOCK_RES_IN_PROGRESS)) { 1849 mlog(ML_ERROR, "got assert from %u, but lock " 1850 "with no owner should be " 1851 "in-progress! (%.*s)\n", 1852 assert->node_idx, 1853 namelen, name); 1854 goto kill; 1855 } 1856 } else /* mle->type == DLM_MLE_MIGRATION */ { 1857 /* should only be getting an assert from new master */ 1858 if (assert->node_idx != mle->new_master) { 1859 mlog(ML_ERROR, "got assert from %u, but " 1860 "new master is %u, and old master " 1861 "was %u (%.*s)\n", 1862 assert->node_idx, mle->new_master, 1863 mle->master, namelen, name); 1864 goto kill; 1865 } 1866 1867 } 1868 ok: 1869 spin_unlock(&res->spinlock); 1870 } 1871 1872 // mlog(0, "woo! got an assert_master from node %u!\n", 1873 // assert->node_idx); 1874 if (mle) { 1875 int extra_ref = 0; 1876 int nn = -1; 1877 int rr, err = 0; 1878 1879 spin_lock(&mle->spinlock); 1880 if (mle->type == DLM_MLE_BLOCK || mle->type == DLM_MLE_MIGRATION) 1881 extra_ref = 1; 1882 else { 1883 /* MASTER mle: if any bits set in the response map 1884 * then the calling node needs to re-assert to clear 1885 * up nodes that this node contacted */ 1886 while ((nn = find_next_bit (mle->response_map, O2NM_MAX_NODES, 1887 nn+1)) < O2NM_MAX_NODES) { 1888 if (nn != dlm->node_num && nn != assert->node_idx) 1889 master_request = 1; 1890 } 1891 } 1892 mle->master = assert->node_idx; 1893 atomic_set(&mle->woken, 1); 1894 wake_up(&mle->wq); 1895 spin_unlock(&mle->spinlock); 1896 1897 if (res) { 1898 int wake = 0; 1899 spin_lock(&res->spinlock); 1900 if (mle->type == DLM_MLE_MIGRATION) { 1901 mlog(0, "finishing off migration of lockres %.*s, " 1902 "from %u to %u\n", 1903 res->lockname.len, res->lockname.name, 1904 dlm->node_num, mle->new_master); 1905 res->state &= ~DLM_LOCK_RES_MIGRATING; 1906 wake = 1; 1907 dlm_change_lockres_owner(dlm, res, mle->new_master); 1908 BUG_ON(res->state & DLM_LOCK_RES_DIRTY); 1909 } else { 1910 dlm_change_lockres_owner(dlm, res, mle->master); 1911 } 1912 spin_unlock(&res->spinlock); 1913 have_lockres_ref = 1; 1914 if (wake) 1915 wake_up(&res->wq); 1916 } 1917 1918 /* master is known, detach if not already detached. 1919 * ensures that only one assert_master call will happen 1920 * on this mle. */ 1921 spin_lock(&dlm->master_lock); 1922 1923 rr = atomic_read(&mle->mle_refs.refcount); 1924 if (mle->inuse > 0) { 1925 if (extra_ref && rr < 3) 1926 err = 1; 1927 else if (!extra_ref && rr < 2) 1928 err = 1; 1929 } else { 1930 if (extra_ref && rr < 2) 1931 err = 1; 1932 else if (!extra_ref && rr < 1) 1933 err = 1; 1934 } 1935 if (err) { 1936 mlog(ML_ERROR, "%s:%.*s: got assert master from %u " 1937 "that will mess up this node, refs=%d, extra=%d, " 1938 "inuse=%d\n", dlm->name, namelen, name, 1939 assert->node_idx, rr, extra_ref, mle->inuse); 1940 dlm_print_one_mle(mle); 1941 } 1942 __dlm_unlink_mle(dlm, mle); 1943 __dlm_mle_detach_hb_events(dlm, mle); 1944 __dlm_put_mle(mle); 1945 if (extra_ref) { 1946 /* the assert master message now balances the extra 1947 * ref given by the master / migration request message. 1948 * if this is the last put, it will be removed 1949 * from the list. */ 1950 __dlm_put_mle(mle); 1951 } 1952 spin_unlock(&dlm->master_lock); 1953 } else if (res) { 1954 if (res->owner != assert->node_idx) { 1955 mlog(0, "assert_master from %u, but current " 1956 "owner is %u (%.*s), no mle\n", assert->node_idx, 1957 res->owner, namelen, name); 1958 } 1959 } 1960 spin_unlock(&dlm->spinlock); 1961 1962 done: 1963 ret = 0; 1964 if (res) { 1965 spin_lock(&res->spinlock); 1966 res->state |= DLM_LOCK_RES_SETREF_INPROG; 1967 spin_unlock(&res->spinlock); 1968 *ret_data = (void *)res; 1969 } 1970 dlm_put(dlm); 1971 if (master_request) { 1972 mlog(0, "need to tell master to reassert\n"); 1973 /* positive. negative would shoot down the node. */ 1974 ret |= DLM_ASSERT_RESPONSE_REASSERT; 1975 if (!have_lockres_ref) { 1976 mlog(ML_ERROR, "strange, got assert from %u, MASTER " 1977 "mle present here for %s:%.*s, but no lockres!\n", 1978 assert->node_idx, dlm->name, namelen, name); 1979 } 1980 } 1981 if (have_lockres_ref) { 1982 /* let the master know we have a reference to the lockres */ 1983 ret |= DLM_ASSERT_RESPONSE_MASTERY_REF; 1984 mlog(0, "%s:%.*s: got assert from %u, need a ref\n", 1985 dlm->name, namelen, name, assert->node_idx); 1986 } 1987 return ret; 1988 1989 kill: 1990 /* kill the caller! */ 1991 mlog(ML_ERROR, "Bad message received from another node. Dumping state " 1992 "and killing the other node now! This node is OK and can continue.\n"); 1993 __dlm_print_one_lock_resource(res); 1994 spin_unlock(&res->spinlock); 1995 spin_unlock(&dlm->spinlock); 1996 *ret_data = (void *)res; 1997 dlm_put(dlm); 1998 return -EINVAL; 1999 } 2000 2001 void dlm_assert_master_post_handler(int status, void *data, void *ret_data) 2002 { 2003 struct dlm_lock_resource *res = (struct dlm_lock_resource *)ret_data; 2004 2005 if (ret_data) { 2006 spin_lock(&res->spinlock); 2007 res->state &= ~DLM_LOCK_RES_SETREF_INPROG; 2008 spin_unlock(&res->spinlock); 2009 wake_up(&res->wq); 2010 dlm_lockres_put(res); 2011 } 2012 return; 2013 } 2014 2015 int dlm_dispatch_assert_master(struct dlm_ctxt *dlm, 2016 struct dlm_lock_resource *res, 2017 int ignore_higher, u8 request_from, u32 flags) 2018 { 2019 struct dlm_work_item *item; 2020 item = kzalloc(sizeof(*item), GFP_ATOMIC); 2021 if (!item) 2022 return -ENOMEM; 2023 2024 2025 /* queue up work for dlm_assert_master_worker */ 2026 dlm_grab(dlm); /* get an extra ref for the work item */ 2027 dlm_init_work_item(dlm, item, dlm_assert_master_worker, NULL); 2028 item->u.am.lockres = res; /* already have a ref */ 2029 /* can optionally ignore node numbers higher than this node */ 2030 item->u.am.ignore_higher = ignore_higher; 2031 item->u.am.request_from = request_from; 2032 item->u.am.flags = flags; 2033 2034 if (ignore_higher) 2035 mlog(0, "IGNORE HIGHER: %.*s\n", res->lockname.len, 2036 res->lockname.name); 2037 2038 spin_lock(&dlm->work_lock); 2039 list_add_tail(&item->list, &dlm->work_list); 2040 spin_unlock(&dlm->work_lock); 2041 2042 queue_work(dlm->dlm_worker, &dlm->dispatched_work); 2043 return 0; 2044 } 2045 2046 static void dlm_assert_master_worker(struct dlm_work_item *item, void *data) 2047 { 2048 struct dlm_ctxt *dlm = data; 2049 int ret = 0; 2050 struct dlm_lock_resource *res; 2051 unsigned long nodemap[BITS_TO_LONGS(O2NM_MAX_NODES)]; 2052 int ignore_higher; 2053 int bit; 2054 u8 request_from; 2055 u32 flags; 2056 2057 dlm = item->dlm; 2058 res = item->u.am.lockres; 2059 ignore_higher = item->u.am.ignore_higher; 2060 request_from = item->u.am.request_from; 2061 flags = item->u.am.flags; 2062 2063 spin_lock(&dlm->spinlock); 2064 memcpy(nodemap, dlm->domain_map, sizeof(nodemap)); 2065 spin_unlock(&dlm->spinlock); 2066 2067 clear_bit(dlm->node_num, nodemap); 2068 if (ignore_higher) { 2069 /* if is this just to clear up mles for nodes below 2070 * this node, do not send the message to the original 2071 * caller or any node number higher than this */ 2072 clear_bit(request_from, nodemap); 2073 bit = dlm->node_num; 2074 while (1) { 2075 bit = find_next_bit(nodemap, O2NM_MAX_NODES, 2076 bit+1); 2077 if (bit >= O2NM_MAX_NODES) 2078 break; 2079 clear_bit(bit, nodemap); 2080 } 2081 } 2082 2083 /* 2084 * If we're migrating this lock to someone else, we are no 2085 * longer allowed to assert out own mastery. OTOH, we need to 2086 * prevent migration from starting while we're still asserting 2087 * our dominance. The reserved ast delays migration. 2088 */ 2089 spin_lock(&res->spinlock); 2090 if (res->state & DLM_LOCK_RES_MIGRATING) { 2091 mlog(0, "Someone asked us to assert mastery, but we're " 2092 "in the middle of migration. Skipping assert, " 2093 "the new master will handle that.\n"); 2094 spin_unlock(&res->spinlock); 2095 goto put; 2096 } else 2097 __dlm_lockres_reserve_ast(res); 2098 spin_unlock(&res->spinlock); 2099 2100 /* this call now finishes out the nodemap 2101 * even if one or more nodes die */ 2102 mlog(0, "worker about to master %.*s here, this=%u\n", 2103 res->lockname.len, res->lockname.name, dlm->node_num); 2104 ret = dlm_do_assert_master(dlm, res, nodemap, flags); 2105 if (ret < 0) { 2106 /* no need to restart, we are done */ 2107 if (!dlm_is_host_down(ret)) 2108 mlog_errno(ret); 2109 } 2110 2111 /* Ok, we've asserted ourselves. Let's let migration start. */ 2112 dlm_lockres_release_ast(dlm, res); 2113 2114 put: 2115 dlm_lockres_put(res); 2116 2117 mlog(0, "finished with dlm_assert_master_worker\n"); 2118 } 2119 2120 /* SPECIAL CASE for the $RECOVERY lock used by the recovery thread. 2121 * We cannot wait for node recovery to complete to begin mastering this 2122 * lockres because this lockres is used to kick off recovery! ;-) 2123 * So, do a pre-check on all living nodes to see if any of those nodes 2124 * think that $RECOVERY is currently mastered by a dead node. If so, 2125 * we wait a short time to allow that node to get notified by its own 2126 * heartbeat stack, then check again. All $RECOVERY lock resources 2127 * mastered by dead nodes are purged when the hearbeat callback is 2128 * fired, so we can know for sure that it is safe to continue once 2129 * the node returns a live node or no node. */ 2130 static int dlm_pre_master_reco_lockres(struct dlm_ctxt *dlm, 2131 struct dlm_lock_resource *res) 2132 { 2133 struct dlm_node_iter iter; 2134 int nodenum; 2135 int ret = 0; 2136 u8 master = DLM_LOCK_RES_OWNER_UNKNOWN; 2137 2138 spin_lock(&dlm->spinlock); 2139 dlm_node_iter_init(dlm->domain_map, &iter); 2140 spin_unlock(&dlm->spinlock); 2141 2142 while ((nodenum = dlm_node_iter_next(&iter)) >= 0) { 2143 /* do not send to self */ 2144 if (nodenum == dlm->node_num) 2145 continue; 2146 ret = dlm_do_master_requery(dlm, res, nodenum, &master); 2147 if (ret < 0) { 2148 mlog_errno(ret); 2149 if (!dlm_is_host_down(ret)) 2150 BUG(); 2151 /* host is down, so answer for that node would be 2152 * DLM_LOCK_RES_OWNER_UNKNOWN. continue. */ 2153 ret = 0; 2154 } 2155 2156 if (master != DLM_LOCK_RES_OWNER_UNKNOWN) { 2157 /* check to see if this master is in the recovery map */ 2158 spin_lock(&dlm->spinlock); 2159 if (test_bit(master, dlm->recovery_map)) { 2160 mlog(ML_NOTICE, "%s: node %u has not seen " 2161 "node %u go down yet, and thinks the " 2162 "dead node is mastering the recovery " 2163 "lock. must wait.\n", dlm->name, 2164 nodenum, master); 2165 ret = -EAGAIN; 2166 } 2167 spin_unlock(&dlm->spinlock); 2168 mlog(0, "%s: reco lock master is %u\n", dlm->name, 2169 master); 2170 break; 2171 } 2172 } 2173 return ret; 2174 } 2175 2176 /* 2177 * DLM_DEREF_LOCKRES_MSG 2178 */ 2179 2180 int dlm_drop_lockres_ref(struct dlm_ctxt *dlm, struct dlm_lock_resource *res) 2181 { 2182 struct dlm_deref_lockres deref; 2183 int ret = 0, r; 2184 const char *lockname; 2185 unsigned int namelen; 2186 2187 lockname = res->lockname.name; 2188 namelen = res->lockname.len; 2189 BUG_ON(namelen > O2NM_MAX_NAME_LEN); 2190 2191 memset(&deref, 0, sizeof(deref)); 2192 deref.node_idx = dlm->node_num; 2193 deref.namelen = namelen; 2194 memcpy(deref.name, lockname, namelen); 2195 2196 ret = o2net_send_message(DLM_DEREF_LOCKRES_MSG, dlm->key, 2197 &deref, sizeof(deref), res->owner, &r); 2198 if (ret < 0) 2199 mlog(ML_ERROR, "%s: res %.*s, error %d send DEREF to node %u\n", 2200 dlm->name, namelen, lockname, ret, res->owner); 2201 else if (r < 0) { 2202 /* BAD. other node says I did not have a ref. */ 2203 mlog(ML_ERROR, "%s: res %.*s, DEREF to node %u got %d\n", 2204 dlm->name, namelen, lockname, res->owner, r); 2205 dlm_print_one_lock_resource(res); 2206 BUG(); 2207 } 2208 return ret; 2209 } 2210 2211 int dlm_deref_lockres_handler(struct o2net_msg *msg, u32 len, void *data, 2212 void **ret_data) 2213 { 2214 struct dlm_ctxt *dlm = data; 2215 struct dlm_deref_lockres *deref = (struct dlm_deref_lockres *)msg->buf; 2216 struct dlm_lock_resource *res = NULL; 2217 char *name; 2218 unsigned int namelen; 2219 int ret = -EINVAL; 2220 u8 node; 2221 unsigned int hash; 2222 struct dlm_work_item *item; 2223 int cleared = 0; 2224 int dispatch = 0; 2225 2226 if (!dlm_grab(dlm)) 2227 return 0; 2228 2229 name = deref->name; 2230 namelen = deref->namelen; 2231 node = deref->node_idx; 2232 2233 if (namelen > DLM_LOCKID_NAME_MAX) { 2234 mlog(ML_ERROR, "Invalid name length!"); 2235 goto done; 2236 } 2237 if (deref->node_idx >= O2NM_MAX_NODES) { 2238 mlog(ML_ERROR, "Invalid node number: %u\n", node); 2239 goto done; 2240 } 2241 2242 hash = dlm_lockid_hash(name, namelen); 2243 2244 spin_lock(&dlm->spinlock); 2245 res = __dlm_lookup_lockres_full(dlm, name, namelen, hash); 2246 if (!res) { 2247 spin_unlock(&dlm->spinlock); 2248 mlog(ML_ERROR, "%s:%.*s: bad lockres name\n", 2249 dlm->name, namelen, name); 2250 goto done; 2251 } 2252 spin_unlock(&dlm->spinlock); 2253 2254 spin_lock(&res->spinlock); 2255 if (res->state & DLM_LOCK_RES_SETREF_INPROG) 2256 dispatch = 1; 2257 else { 2258 BUG_ON(res->state & DLM_LOCK_RES_DROPPING_REF); 2259 if (test_bit(node, res->refmap)) { 2260 dlm_lockres_clear_refmap_bit(dlm, res, node); 2261 cleared = 1; 2262 } 2263 } 2264 spin_unlock(&res->spinlock); 2265 2266 if (!dispatch) { 2267 if (cleared) 2268 dlm_lockres_calc_usage(dlm, res); 2269 else { 2270 mlog(ML_ERROR, "%s:%.*s: node %u trying to drop ref " 2271 "but it is already dropped!\n", dlm->name, 2272 res->lockname.len, res->lockname.name, node); 2273 dlm_print_one_lock_resource(res); 2274 } 2275 ret = 0; 2276 goto done; 2277 } 2278 2279 item = kzalloc(sizeof(*item), GFP_NOFS); 2280 if (!item) { 2281 ret = -ENOMEM; 2282 mlog_errno(ret); 2283 goto done; 2284 } 2285 2286 dlm_init_work_item(dlm, item, dlm_deref_lockres_worker, NULL); 2287 item->u.dl.deref_res = res; 2288 item->u.dl.deref_node = node; 2289 2290 spin_lock(&dlm->work_lock); 2291 list_add_tail(&item->list, &dlm->work_list); 2292 spin_unlock(&dlm->work_lock); 2293 2294 queue_work(dlm->dlm_worker, &dlm->dispatched_work); 2295 return 0; 2296 2297 done: 2298 if (res) 2299 dlm_lockres_put(res); 2300 dlm_put(dlm); 2301 2302 return ret; 2303 } 2304 2305 static void dlm_deref_lockres_worker(struct dlm_work_item *item, void *data) 2306 { 2307 struct dlm_ctxt *dlm; 2308 struct dlm_lock_resource *res; 2309 u8 node; 2310 u8 cleared = 0; 2311 2312 dlm = item->dlm; 2313 res = item->u.dl.deref_res; 2314 node = item->u.dl.deref_node; 2315 2316 spin_lock(&res->spinlock); 2317 BUG_ON(res->state & DLM_LOCK_RES_DROPPING_REF); 2318 if (test_bit(node, res->refmap)) { 2319 __dlm_wait_on_lockres_flags(res, DLM_LOCK_RES_SETREF_INPROG); 2320 dlm_lockres_clear_refmap_bit(dlm, res, node); 2321 cleared = 1; 2322 } 2323 spin_unlock(&res->spinlock); 2324 2325 if (cleared) { 2326 mlog(0, "%s:%.*s node %u ref dropped in dispatch\n", 2327 dlm->name, res->lockname.len, res->lockname.name, node); 2328 dlm_lockres_calc_usage(dlm, res); 2329 } else { 2330 mlog(ML_ERROR, "%s:%.*s: node %u trying to drop ref " 2331 "but it is already dropped!\n", dlm->name, 2332 res->lockname.len, res->lockname.name, node); 2333 dlm_print_one_lock_resource(res); 2334 } 2335 2336 dlm_lockres_put(res); 2337 } 2338 2339 /* 2340 * A migrateable resource is one that is : 2341 * 1. locally mastered, and, 2342 * 2. zero local locks, and, 2343 * 3. one or more non-local locks, or, one or more references 2344 * Returns 1 if yes, 0 if not. 2345 */ 2346 static int dlm_is_lockres_migrateable(struct dlm_ctxt *dlm, 2347 struct dlm_lock_resource *res) 2348 { 2349 enum dlm_lockres_list idx; 2350 int nonlocal = 0, node_ref; 2351 struct list_head *queue; 2352 struct dlm_lock *lock; 2353 u64 cookie; 2354 2355 assert_spin_locked(&res->spinlock); 2356 2357 if (res->owner != dlm->node_num) 2358 return 0; 2359 2360 for (idx = DLM_GRANTED_LIST; idx <= DLM_BLOCKED_LIST; idx++) { 2361 queue = dlm_list_idx_to_ptr(res, idx); 2362 list_for_each_entry(lock, queue, list) { 2363 if (lock->ml.node != dlm->node_num) { 2364 nonlocal++; 2365 continue; 2366 } 2367 cookie = be64_to_cpu(lock->ml.cookie); 2368 mlog(0, "%s: Not migrateable res %.*s, lock %u:%llu on " 2369 "%s list\n", dlm->name, res->lockname.len, 2370 res->lockname.name, 2371 dlm_get_lock_cookie_node(cookie), 2372 dlm_get_lock_cookie_seq(cookie), 2373 dlm_list_in_text(idx)); 2374 return 0; 2375 } 2376 } 2377 2378 if (!nonlocal) { 2379 node_ref = find_next_bit(res->refmap, O2NM_MAX_NODES, 0); 2380 if (node_ref >= O2NM_MAX_NODES) 2381 return 0; 2382 } 2383 2384 mlog(0, "%s: res %.*s, Migrateable\n", dlm->name, res->lockname.len, 2385 res->lockname.name); 2386 2387 return 1; 2388 } 2389 2390 /* 2391 * DLM_MIGRATE_LOCKRES 2392 */ 2393 2394 2395 static int dlm_migrate_lockres(struct dlm_ctxt *dlm, 2396 struct dlm_lock_resource *res, u8 target) 2397 { 2398 struct dlm_master_list_entry *mle = NULL; 2399 struct dlm_master_list_entry *oldmle = NULL; 2400 struct dlm_migratable_lockres *mres = NULL; 2401 int ret = 0; 2402 const char *name; 2403 unsigned int namelen; 2404 int mle_added = 0; 2405 int wake = 0; 2406 2407 if (!dlm_grab(dlm)) 2408 return -EINVAL; 2409 2410 BUG_ON(target == O2NM_MAX_NODES); 2411 2412 name = res->lockname.name; 2413 namelen = res->lockname.len; 2414 2415 mlog(0, "%s: Migrating %.*s to node %u\n", dlm->name, namelen, name, 2416 target); 2417 2418 /* preallocate up front. if this fails, abort */ 2419 ret = -ENOMEM; 2420 mres = (struct dlm_migratable_lockres *) __get_free_page(GFP_NOFS); 2421 if (!mres) { 2422 mlog_errno(ret); 2423 goto leave; 2424 } 2425 2426 mle = kmem_cache_alloc(dlm_mle_cache, GFP_NOFS); 2427 if (!mle) { 2428 mlog_errno(ret); 2429 goto leave; 2430 } 2431 ret = 0; 2432 2433 /* 2434 * clear any existing master requests and 2435 * add the migration mle to the list 2436 */ 2437 spin_lock(&dlm->spinlock); 2438 spin_lock(&dlm->master_lock); 2439 ret = dlm_add_migration_mle(dlm, res, mle, &oldmle, name, 2440 namelen, target, dlm->node_num); 2441 spin_unlock(&dlm->master_lock); 2442 spin_unlock(&dlm->spinlock); 2443 2444 if (ret == -EEXIST) { 2445 mlog(0, "another process is already migrating it\n"); 2446 goto fail; 2447 } 2448 mle_added = 1; 2449 2450 /* 2451 * set the MIGRATING flag and flush asts 2452 * if we fail after this we need to re-dirty the lockres 2453 */ 2454 if (dlm_mark_lockres_migrating(dlm, res, target) < 0) { 2455 mlog(ML_ERROR, "tried to migrate %.*s to %u, but " 2456 "the target went down.\n", res->lockname.len, 2457 res->lockname.name, target); 2458 spin_lock(&res->spinlock); 2459 res->state &= ~DLM_LOCK_RES_MIGRATING; 2460 wake = 1; 2461 spin_unlock(&res->spinlock); 2462 ret = -EINVAL; 2463 } 2464 2465 fail: 2466 if (oldmle) { 2467 /* master is known, detach if not already detached */ 2468 dlm_mle_detach_hb_events(dlm, oldmle); 2469 dlm_put_mle(oldmle); 2470 } 2471 2472 if (ret < 0) { 2473 if (mle_added) { 2474 dlm_mle_detach_hb_events(dlm, mle); 2475 dlm_put_mle(mle); 2476 } else if (mle) { 2477 kmem_cache_free(dlm_mle_cache, mle); 2478 mle = NULL; 2479 } 2480 goto leave; 2481 } 2482 2483 /* 2484 * at this point, we have a migration target, an mle 2485 * in the master list, and the MIGRATING flag set on 2486 * the lockres 2487 */ 2488 2489 /* now that remote nodes are spinning on the MIGRATING flag, 2490 * ensure that all assert_master work is flushed. */ 2491 flush_workqueue(dlm->dlm_worker); 2492 2493 /* get an extra reference on the mle. 2494 * otherwise the assert_master from the new 2495 * master will destroy this. 2496 * also, make sure that all callers of dlm_get_mle 2497 * take both dlm->spinlock and dlm->master_lock */ 2498 spin_lock(&dlm->spinlock); 2499 spin_lock(&dlm->master_lock); 2500 dlm_get_mle_inuse(mle); 2501 spin_unlock(&dlm->master_lock); 2502 spin_unlock(&dlm->spinlock); 2503 2504 /* notify new node and send all lock state */ 2505 /* call send_one_lockres with migration flag. 2506 * this serves as notice to the target node that a 2507 * migration is starting. */ 2508 ret = dlm_send_one_lockres(dlm, res, mres, target, 2509 DLM_MRES_MIGRATION); 2510 2511 if (ret < 0) { 2512 mlog(0, "migration to node %u failed with %d\n", 2513 target, ret); 2514 /* migration failed, detach and clean up mle */ 2515 dlm_mle_detach_hb_events(dlm, mle); 2516 dlm_put_mle(mle); 2517 dlm_put_mle_inuse(mle); 2518 spin_lock(&res->spinlock); 2519 res->state &= ~DLM_LOCK_RES_MIGRATING; 2520 wake = 1; 2521 spin_unlock(&res->spinlock); 2522 if (dlm_is_host_down(ret)) 2523 dlm_wait_for_node_death(dlm, target, 2524 DLM_NODE_DEATH_WAIT_MAX); 2525 goto leave; 2526 } 2527 2528 /* at this point, the target sends a message to all nodes, 2529 * (using dlm_do_migrate_request). this node is skipped since 2530 * we had to put an mle in the list to begin the process. this 2531 * node now waits for target to do an assert master. this node 2532 * will be the last one notified, ensuring that the migration 2533 * is complete everywhere. if the target dies while this is 2534 * going on, some nodes could potentially see the target as the 2535 * master, so it is important that my recovery finds the migration 2536 * mle and sets the master to UNKNOWN. */ 2537 2538 2539 /* wait for new node to assert master */ 2540 while (1) { 2541 ret = wait_event_interruptible_timeout(mle->wq, 2542 (atomic_read(&mle->woken) == 1), 2543 msecs_to_jiffies(5000)); 2544 2545 if (ret >= 0) { 2546 if (atomic_read(&mle->woken) == 1 || 2547 res->owner == target) 2548 break; 2549 2550 mlog(0, "%s:%.*s: timed out during migration\n", 2551 dlm->name, res->lockname.len, res->lockname.name); 2552 /* avoid hang during shutdown when migrating lockres 2553 * to a node which also goes down */ 2554 if (dlm_is_node_dead(dlm, target)) { 2555 mlog(0, "%s:%.*s: expected migration " 2556 "target %u is no longer up, restarting\n", 2557 dlm->name, res->lockname.len, 2558 res->lockname.name, target); 2559 ret = -EINVAL; 2560 /* migration failed, detach and clean up mle */ 2561 dlm_mle_detach_hb_events(dlm, mle); 2562 dlm_put_mle(mle); 2563 dlm_put_mle_inuse(mle); 2564 spin_lock(&res->spinlock); 2565 res->state &= ~DLM_LOCK_RES_MIGRATING; 2566 wake = 1; 2567 spin_unlock(&res->spinlock); 2568 goto leave; 2569 } 2570 } else 2571 mlog(0, "%s:%.*s: caught signal during migration\n", 2572 dlm->name, res->lockname.len, res->lockname.name); 2573 } 2574 2575 /* all done, set the owner, clear the flag */ 2576 spin_lock(&res->spinlock); 2577 dlm_set_lockres_owner(dlm, res, target); 2578 res->state &= ~DLM_LOCK_RES_MIGRATING; 2579 dlm_remove_nonlocal_locks(dlm, res); 2580 spin_unlock(&res->spinlock); 2581 wake_up(&res->wq); 2582 2583 /* master is known, detach if not already detached */ 2584 dlm_mle_detach_hb_events(dlm, mle); 2585 dlm_put_mle_inuse(mle); 2586 ret = 0; 2587 2588 dlm_lockres_calc_usage(dlm, res); 2589 2590 leave: 2591 /* re-dirty the lockres if we failed */ 2592 if (ret < 0) 2593 dlm_kick_thread(dlm, res); 2594 2595 /* wake up waiters if the MIGRATING flag got set 2596 * but migration failed */ 2597 if (wake) 2598 wake_up(&res->wq); 2599 2600 if (mres) 2601 free_page((unsigned long)mres); 2602 2603 dlm_put(dlm); 2604 2605 mlog(0, "%s: Migrating %.*s to %u, returns %d\n", dlm->name, namelen, 2606 name, target, ret); 2607 return ret; 2608 } 2609 2610 #define DLM_MIGRATION_RETRY_MS 100 2611 2612 /* 2613 * Should be called only after beginning the domain leave process. 2614 * There should not be any remaining locks on nonlocal lock resources, 2615 * and there should be no local locks left on locally mastered resources. 2616 * 2617 * Called with the dlm spinlock held, may drop it to do migration, but 2618 * will re-acquire before exit. 2619 * 2620 * Returns: 1 if dlm->spinlock was dropped/retaken, 0 if never dropped 2621 */ 2622 int dlm_empty_lockres(struct dlm_ctxt *dlm, struct dlm_lock_resource *res) 2623 { 2624 int ret; 2625 int lock_dropped = 0; 2626 u8 target = O2NM_MAX_NODES; 2627 2628 assert_spin_locked(&dlm->spinlock); 2629 2630 spin_lock(&res->spinlock); 2631 if (dlm_is_lockres_migrateable(dlm, res)) 2632 target = dlm_pick_migration_target(dlm, res); 2633 spin_unlock(&res->spinlock); 2634 2635 if (target == O2NM_MAX_NODES) 2636 goto leave; 2637 2638 /* Wheee! Migrate lockres here! Will sleep so drop spinlock. */ 2639 spin_unlock(&dlm->spinlock); 2640 lock_dropped = 1; 2641 ret = dlm_migrate_lockres(dlm, res, target); 2642 if (ret) 2643 mlog(0, "%s: res %.*s, Migrate to node %u failed with %d\n", 2644 dlm->name, res->lockname.len, res->lockname.name, 2645 target, ret); 2646 spin_lock(&dlm->spinlock); 2647 leave: 2648 return lock_dropped; 2649 } 2650 2651 int dlm_lock_basts_flushed(struct dlm_ctxt *dlm, struct dlm_lock *lock) 2652 { 2653 int ret; 2654 spin_lock(&dlm->ast_lock); 2655 spin_lock(&lock->spinlock); 2656 ret = (list_empty(&lock->bast_list) && !lock->bast_pending); 2657 spin_unlock(&lock->spinlock); 2658 spin_unlock(&dlm->ast_lock); 2659 return ret; 2660 } 2661 2662 static int dlm_migration_can_proceed(struct dlm_ctxt *dlm, 2663 struct dlm_lock_resource *res, 2664 u8 mig_target) 2665 { 2666 int can_proceed; 2667 spin_lock(&res->spinlock); 2668 can_proceed = !!(res->state & DLM_LOCK_RES_MIGRATING); 2669 spin_unlock(&res->spinlock); 2670 2671 /* target has died, so make the caller break out of the 2672 * wait_event, but caller must recheck the domain_map */ 2673 spin_lock(&dlm->spinlock); 2674 if (!test_bit(mig_target, dlm->domain_map)) 2675 can_proceed = 1; 2676 spin_unlock(&dlm->spinlock); 2677 return can_proceed; 2678 } 2679 2680 static int dlm_lockres_is_dirty(struct dlm_ctxt *dlm, 2681 struct dlm_lock_resource *res) 2682 { 2683 int ret; 2684 spin_lock(&res->spinlock); 2685 ret = !!(res->state & DLM_LOCK_RES_DIRTY); 2686 spin_unlock(&res->spinlock); 2687 return ret; 2688 } 2689 2690 2691 static int dlm_mark_lockres_migrating(struct dlm_ctxt *dlm, 2692 struct dlm_lock_resource *res, 2693 u8 target) 2694 { 2695 int ret = 0; 2696 2697 mlog(0, "dlm_mark_lockres_migrating: %.*s, from %u to %u\n", 2698 res->lockname.len, res->lockname.name, dlm->node_num, 2699 target); 2700 /* need to set MIGRATING flag on lockres. this is done by 2701 * ensuring that all asts have been flushed for this lockres. */ 2702 spin_lock(&res->spinlock); 2703 BUG_ON(res->migration_pending); 2704 res->migration_pending = 1; 2705 /* strategy is to reserve an extra ast then release 2706 * it below, letting the release do all of the work */ 2707 __dlm_lockres_reserve_ast(res); 2708 spin_unlock(&res->spinlock); 2709 2710 /* now flush all the pending asts */ 2711 dlm_kick_thread(dlm, res); 2712 /* before waiting on DIRTY, block processes which may 2713 * try to dirty the lockres before MIGRATING is set */ 2714 spin_lock(&res->spinlock); 2715 BUG_ON(res->state & DLM_LOCK_RES_BLOCK_DIRTY); 2716 res->state |= DLM_LOCK_RES_BLOCK_DIRTY; 2717 spin_unlock(&res->spinlock); 2718 /* now wait on any pending asts and the DIRTY state */ 2719 wait_event(dlm->ast_wq, !dlm_lockres_is_dirty(dlm, res)); 2720 dlm_lockres_release_ast(dlm, res); 2721 2722 mlog(0, "about to wait on migration_wq, dirty=%s\n", 2723 res->state & DLM_LOCK_RES_DIRTY ? "yes" : "no"); 2724 /* if the extra ref we just put was the final one, this 2725 * will pass thru immediately. otherwise, we need to wait 2726 * for the last ast to finish. */ 2727 again: 2728 ret = wait_event_interruptible_timeout(dlm->migration_wq, 2729 dlm_migration_can_proceed(dlm, res, target), 2730 msecs_to_jiffies(1000)); 2731 if (ret < 0) { 2732 mlog(0, "woken again: migrating? %s, dead? %s\n", 2733 res->state & DLM_LOCK_RES_MIGRATING ? "yes":"no", 2734 test_bit(target, dlm->domain_map) ? "no":"yes"); 2735 } else { 2736 mlog(0, "all is well: migrating? %s, dead? %s\n", 2737 res->state & DLM_LOCK_RES_MIGRATING ? "yes":"no", 2738 test_bit(target, dlm->domain_map) ? "no":"yes"); 2739 } 2740 if (!dlm_migration_can_proceed(dlm, res, target)) { 2741 mlog(0, "trying again...\n"); 2742 goto again; 2743 } 2744 2745 ret = 0; 2746 /* did the target go down or die? */ 2747 spin_lock(&dlm->spinlock); 2748 if (!test_bit(target, dlm->domain_map)) { 2749 mlog(ML_ERROR, "aha. migration target %u just went down\n", 2750 target); 2751 ret = -EHOSTDOWN; 2752 } 2753 spin_unlock(&dlm->spinlock); 2754 2755 /* 2756 * if target is down, we need to clear DLM_LOCK_RES_BLOCK_DIRTY for 2757 * another try; otherwise, we are sure the MIGRATING state is there, 2758 * drop the unneded state which blocked threads trying to DIRTY 2759 */ 2760 spin_lock(&res->spinlock); 2761 BUG_ON(!(res->state & DLM_LOCK_RES_BLOCK_DIRTY)); 2762 res->state &= ~DLM_LOCK_RES_BLOCK_DIRTY; 2763 if (!ret) 2764 BUG_ON(!(res->state & DLM_LOCK_RES_MIGRATING)); 2765 spin_unlock(&res->spinlock); 2766 2767 /* 2768 * at this point: 2769 * 2770 * o the DLM_LOCK_RES_MIGRATING flag is set if target not down 2771 * o there are no pending asts on this lockres 2772 * o all processes trying to reserve an ast on this 2773 * lockres must wait for the MIGRATING flag to clear 2774 */ 2775 return ret; 2776 } 2777 2778 /* last step in the migration process. 2779 * original master calls this to free all of the dlm_lock 2780 * structures that used to be for other nodes. */ 2781 static void dlm_remove_nonlocal_locks(struct dlm_ctxt *dlm, 2782 struct dlm_lock_resource *res) 2783 { 2784 struct list_head *queue = &res->granted; 2785 int i, bit; 2786 struct dlm_lock *lock, *next; 2787 2788 assert_spin_locked(&res->spinlock); 2789 2790 BUG_ON(res->owner == dlm->node_num); 2791 2792 for (i=0; i<3; i++) { 2793 list_for_each_entry_safe(lock, next, queue, list) { 2794 if (lock->ml.node != dlm->node_num) { 2795 mlog(0, "putting lock for node %u\n", 2796 lock->ml.node); 2797 /* be extra careful */ 2798 BUG_ON(!list_empty(&lock->ast_list)); 2799 BUG_ON(!list_empty(&lock->bast_list)); 2800 BUG_ON(lock->ast_pending); 2801 BUG_ON(lock->bast_pending); 2802 dlm_lockres_clear_refmap_bit(dlm, res, 2803 lock->ml.node); 2804 list_del_init(&lock->list); 2805 dlm_lock_put(lock); 2806 /* In a normal unlock, we would have added a 2807 * DLM_UNLOCK_FREE_LOCK action. Force it. */ 2808 dlm_lock_put(lock); 2809 } 2810 } 2811 queue++; 2812 } 2813 bit = 0; 2814 while (1) { 2815 bit = find_next_bit(res->refmap, O2NM_MAX_NODES, bit); 2816 if (bit >= O2NM_MAX_NODES) 2817 break; 2818 /* do not clear the local node reference, if there is a 2819 * process holding this, let it drop the ref itself */ 2820 if (bit != dlm->node_num) { 2821 mlog(0, "%s:%.*s: node %u had a ref to this " 2822 "migrating lockres, clearing\n", dlm->name, 2823 res->lockname.len, res->lockname.name, bit); 2824 dlm_lockres_clear_refmap_bit(dlm, res, bit); 2825 } 2826 bit++; 2827 } 2828 } 2829 2830 /* 2831 * Pick a node to migrate the lock resource to. This function selects a 2832 * potential target based first on the locks and then on refmap. It skips 2833 * nodes that are in the process of exiting the domain. 2834 */ 2835 static u8 dlm_pick_migration_target(struct dlm_ctxt *dlm, 2836 struct dlm_lock_resource *res) 2837 { 2838 enum dlm_lockres_list idx; 2839 struct list_head *queue = &res->granted; 2840 struct dlm_lock *lock; 2841 int noderef; 2842 u8 nodenum = O2NM_MAX_NODES; 2843 2844 assert_spin_locked(&dlm->spinlock); 2845 assert_spin_locked(&res->spinlock); 2846 2847 /* Go through all the locks */ 2848 for (idx = DLM_GRANTED_LIST; idx <= DLM_BLOCKED_LIST; idx++) { 2849 queue = dlm_list_idx_to_ptr(res, idx); 2850 list_for_each_entry(lock, queue, list) { 2851 if (lock->ml.node == dlm->node_num) 2852 continue; 2853 if (test_bit(lock->ml.node, dlm->exit_domain_map)) 2854 continue; 2855 nodenum = lock->ml.node; 2856 goto bail; 2857 } 2858 } 2859 2860 /* Go thru the refmap */ 2861 noderef = -1; 2862 while (1) { 2863 noderef = find_next_bit(res->refmap, O2NM_MAX_NODES, 2864 noderef + 1); 2865 if (noderef >= O2NM_MAX_NODES) 2866 break; 2867 if (noderef == dlm->node_num) 2868 continue; 2869 if (test_bit(noderef, dlm->exit_domain_map)) 2870 continue; 2871 nodenum = noderef; 2872 goto bail; 2873 } 2874 2875 bail: 2876 return nodenum; 2877 } 2878 2879 /* this is called by the new master once all lockres 2880 * data has been received */ 2881 static int dlm_do_migrate_request(struct dlm_ctxt *dlm, 2882 struct dlm_lock_resource *res, 2883 u8 master, u8 new_master, 2884 struct dlm_node_iter *iter) 2885 { 2886 struct dlm_migrate_request migrate; 2887 int ret, skip, status = 0; 2888 int nodenum; 2889 2890 memset(&migrate, 0, sizeof(migrate)); 2891 migrate.namelen = res->lockname.len; 2892 memcpy(migrate.name, res->lockname.name, migrate.namelen); 2893 migrate.new_master = new_master; 2894 migrate.master = master; 2895 2896 ret = 0; 2897 2898 /* send message to all nodes, except the master and myself */ 2899 while ((nodenum = dlm_node_iter_next(iter)) >= 0) { 2900 if (nodenum == master || 2901 nodenum == new_master) 2902 continue; 2903 2904 /* We could race exit domain. If exited, skip. */ 2905 spin_lock(&dlm->spinlock); 2906 skip = (!test_bit(nodenum, dlm->domain_map)); 2907 spin_unlock(&dlm->spinlock); 2908 if (skip) { 2909 clear_bit(nodenum, iter->node_map); 2910 continue; 2911 } 2912 2913 ret = o2net_send_message(DLM_MIGRATE_REQUEST_MSG, dlm->key, 2914 &migrate, sizeof(migrate), nodenum, 2915 &status); 2916 if (ret < 0) { 2917 mlog(ML_ERROR, "%s: res %.*s, Error %d send " 2918 "MIGRATE_REQUEST to node %u\n", dlm->name, 2919 migrate.namelen, migrate.name, ret, nodenum); 2920 if (!dlm_is_host_down(ret)) { 2921 mlog(ML_ERROR, "unhandled error=%d!\n", ret); 2922 BUG(); 2923 } 2924 clear_bit(nodenum, iter->node_map); 2925 ret = 0; 2926 } else if (status < 0) { 2927 mlog(0, "migrate request (node %u) returned %d!\n", 2928 nodenum, status); 2929 ret = status; 2930 } else if (status == DLM_MIGRATE_RESPONSE_MASTERY_REF) { 2931 /* during the migration request we short-circuited 2932 * the mastery of the lockres. make sure we have 2933 * a mastery ref for nodenum */ 2934 mlog(0, "%s:%.*s: need ref for node %u\n", 2935 dlm->name, res->lockname.len, res->lockname.name, 2936 nodenum); 2937 spin_lock(&res->spinlock); 2938 dlm_lockres_set_refmap_bit(dlm, res, nodenum); 2939 spin_unlock(&res->spinlock); 2940 } 2941 } 2942 2943 if (ret < 0) 2944 mlog_errno(ret); 2945 2946 mlog(0, "returning ret=%d\n", ret); 2947 return ret; 2948 } 2949 2950 2951 /* if there is an existing mle for this lockres, we now know who the master is. 2952 * (the one who sent us *this* message) we can clear it up right away. 2953 * since the process that put the mle on the list still has a reference to it, 2954 * we can unhash it now, set the master and wake the process. as a result, 2955 * we will have no mle in the list to start with. now we can add an mle for 2956 * the migration and this should be the only one found for those scanning the 2957 * list. */ 2958 int dlm_migrate_request_handler(struct o2net_msg *msg, u32 len, void *data, 2959 void **ret_data) 2960 { 2961 struct dlm_ctxt *dlm = data; 2962 struct dlm_lock_resource *res = NULL; 2963 struct dlm_migrate_request *migrate = (struct dlm_migrate_request *) msg->buf; 2964 struct dlm_master_list_entry *mle = NULL, *oldmle = NULL; 2965 const char *name; 2966 unsigned int namelen, hash; 2967 int ret = 0; 2968 2969 if (!dlm_grab(dlm)) 2970 return -EINVAL; 2971 2972 name = migrate->name; 2973 namelen = migrate->namelen; 2974 hash = dlm_lockid_hash(name, namelen); 2975 2976 /* preallocate.. if this fails, abort */ 2977 mle = kmem_cache_alloc(dlm_mle_cache, GFP_NOFS); 2978 2979 if (!mle) { 2980 ret = -ENOMEM; 2981 goto leave; 2982 } 2983 2984 /* check for pre-existing lock */ 2985 spin_lock(&dlm->spinlock); 2986 res = __dlm_lookup_lockres(dlm, name, namelen, hash); 2987 if (res) { 2988 spin_lock(&res->spinlock); 2989 if (res->state & DLM_LOCK_RES_RECOVERING) { 2990 /* if all is working ok, this can only mean that we got 2991 * a migrate request from a node that we now see as 2992 * dead. what can we do here? drop it to the floor? */ 2993 spin_unlock(&res->spinlock); 2994 mlog(ML_ERROR, "Got a migrate request, but the " 2995 "lockres is marked as recovering!"); 2996 kmem_cache_free(dlm_mle_cache, mle); 2997 ret = -EINVAL; /* need a better solution */ 2998 goto unlock; 2999 } 3000 res->state |= DLM_LOCK_RES_MIGRATING; 3001 spin_unlock(&res->spinlock); 3002 } 3003 3004 spin_lock(&dlm->master_lock); 3005 /* ignore status. only nonzero status would BUG. */ 3006 ret = dlm_add_migration_mle(dlm, res, mle, &oldmle, 3007 name, namelen, 3008 migrate->new_master, 3009 migrate->master); 3010 3011 spin_unlock(&dlm->master_lock); 3012 unlock: 3013 spin_unlock(&dlm->spinlock); 3014 3015 if (oldmle) { 3016 /* master is known, detach if not already detached */ 3017 dlm_mle_detach_hb_events(dlm, oldmle); 3018 dlm_put_mle(oldmle); 3019 } 3020 3021 if (res) 3022 dlm_lockres_put(res); 3023 leave: 3024 dlm_put(dlm); 3025 return ret; 3026 } 3027 3028 /* must be holding dlm->spinlock and dlm->master_lock 3029 * when adding a migration mle, we can clear any other mles 3030 * in the master list because we know with certainty that 3031 * the master is "master". so we remove any old mle from 3032 * the list after setting it's master field, and then add 3033 * the new migration mle. this way we can hold with the rule 3034 * of having only one mle for a given lock name at all times. */ 3035 static int dlm_add_migration_mle(struct dlm_ctxt *dlm, 3036 struct dlm_lock_resource *res, 3037 struct dlm_master_list_entry *mle, 3038 struct dlm_master_list_entry **oldmle, 3039 const char *name, unsigned int namelen, 3040 u8 new_master, u8 master) 3041 { 3042 int found; 3043 int ret = 0; 3044 3045 *oldmle = NULL; 3046 3047 assert_spin_locked(&dlm->spinlock); 3048 assert_spin_locked(&dlm->master_lock); 3049 3050 /* caller is responsible for any ref taken here on oldmle */ 3051 found = dlm_find_mle(dlm, oldmle, (char *)name, namelen); 3052 if (found) { 3053 struct dlm_master_list_entry *tmp = *oldmle; 3054 spin_lock(&tmp->spinlock); 3055 if (tmp->type == DLM_MLE_MIGRATION) { 3056 if (master == dlm->node_num) { 3057 /* ah another process raced me to it */ 3058 mlog(0, "tried to migrate %.*s, but some " 3059 "process beat me to it\n", 3060 namelen, name); 3061 ret = -EEXIST; 3062 } else { 3063 /* bad. 2 NODES are trying to migrate! */ 3064 mlog(ML_ERROR, "migration error mle: " 3065 "master=%u new_master=%u // request: " 3066 "master=%u new_master=%u // " 3067 "lockres=%.*s\n", 3068 tmp->master, tmp->new_master, 3069 master, new_master, 3070 namelen, name); 3071 BUG(); 3072 } 3073 } else { 3074 /* this is essentially what assert_master does */ 3075 tmp->master = master; 3076 atomic_set(&tmp->woken, 1); 3077 wake_up(&tmp->wq); 3078 /* remove it so that only one mle will be found */ 3079 __dlm_unlink_mle(dlm, tmp); 3080 __dlm_mle_detach_hb_events(dlm, tmp); 3081 ret = DLM_MIGRATE_RESPONSE_MASTERY_REF; 3082 mlog(0, "%s:%.*s: master=%u, newmaster=%u, " 3083 "telling master to get ref for cleared out mle " 3084 "during migration\n", dlm->name, namelen, name, 3085 master, new_master); 3086 } 3087 spin_unlock(&tmp->spinlock); 3088 } 3089 3090 /* now add a migration mle to the tail of the list */ 3091 dlm_init_mle(mle, DLM_MLE_MIGRATION, dlm, res, name, namelen); 3092 mle->new_master = new_master; 3093 /* the new master will be sending an assert master for this. 3094 * at that point we will get the refmap reference */ 3095 mle->master = master; 3096 /* do this for consistency with other mle types */ 3097 set_bit(new_master, mle->maybe_map); 3098 __dlm_insert_mle(dlm, mle); 3099 3100 return ret; 3101 } 3102 3103 /* 3104 * Sets the owner of the lockres, associated to the mle, to UNKNOWN 3105 */ 3106 static struct dlm_lock_resource *dlm_reset_mleres_owner(struct dlm_ctxt *dlm, 3107 struct dlm_master_list_entry *mle) 3108 { 3109 struct dlm_lock_resource *res; 3110 3111 /* Find the lockres associated to the mle and set its owner to UNK */ 3112 res = __dlm_lookup_lockres(dlm, mle->mname, mle->mnamelen, 3113 mle->mnamehash); 3114 if (res) { 3115 spin_unlock(&dlm->master_lock); 3116 3117 /* move lockres onto recovery list */ 3118 spin_lock(&res->spinlock); 3119 dlm_set_lockres_owner(dlm, res, DLM_LOCK_RES_OWNER_UNKNOWN); 3120 dlm_move_lockres_to_recovery_list(dlm, res); 3121 spin_unlock(&res->spinlock); 3122 dlm_lockres_put(res); 3123 3124 /* about to get rid of mle, detach from heartbeat */ 3125 __dlm_mle_detach_hb_events(dlm, mle); 3126 3127 /* dump the mle */ 3128 spin_lock(&dlm->master_lock); 3129 __dlm_put_mle(mle); 3130 spin_unlock(&dlm->master_lock); 3131 } 3132 3133 return res; 3134 } 3135 3136 static void dlm_clean_migration_mle(struct dlm_ctxt *dlm, 3137 struct dlm_master_list_entry *mle) 3138 { 3139 __dlm_mle_detach_hb_events(dlm, mle); 3140 3141 spin_lock(&mle->spinlock); 3142 __dlm_unlink_mle(dlm, mle); 3143 atomic_set(&mle->woken, 1); 3144 spin_unlock(&mle->spinlock); 3145 3146 wake_up(&mle->wq); 3147 } 3148 3149 static void dlm_clean_block_mle(struct dlm_ctxt *dlm, 3150 struct dlm_master_list_entry *mle, u8 dead_node) 3151 { 3152 int bit; 3153 3154 BUG_ON(mle->type != DLM_MLE_BLOCK); 3155 3156 spin_lock(&mle->spinlock); 3157 bit = find_next_bit(mle->maybe_map, O2NM_MAX_NODES, 0); 3158 if (bit != dead_node) { 3159 mlog(0, "mle found, but dead node %u would not have been " 3160 "master\n", dead_node); 3161 spin_unlock(&mle->spinlock); 3162 } else { 3163 /* Must drop the refcount by one since the assert_master will 3164 * never arrive. This may result in the mle being unlinked and 3165 * freed, but there may still be a process waiting in the 3166 * dlmlock path which is fine. */ 3167 mlog(0, "node %u was expected master\n", dead_node); 3168 atomic_set(&mle->woken, 1); 3169 spin_unlock(&mle->spinlock); 3170 wake_up(&mle->wq); 3171 3172 /* Do not need events any longer, so detach from heartbeat */ 3173 __dlm_mle_detach_hb_events(dlm, mle); 3174 __dlm_put_mle(mle); 3175 } 3176 } 3177 3178 void dlm_clean_master_list(struct dlm_ctxt *dlm, u8 dead_node) 3179 { 3180 struct dlm_master_list_entry *mle; 3181 struct dlm_lock_resource *res; 3182 struct hlist_head *bucket; 3183 struct hlist_node *tmp; 3184 unsigned int i; 3185 3186 mlog(0, "dlm=%s, dead node=%u\n", dlm->name, dead_node); 3187 top: 3188 assert_spin_locked(&dlm->spinlock); 3189 3190 /* clean the master list */ 3191 spin_lock(&dlm->master_lock); 3192 for (i = 0; i < DLM_HASH_BUCKETS; i++) { 3193 bucket = dlm_master_hash(dlm, i); 3194 hlist_for_each_entry_safe(mle, tmp, bucket, master_hash_node) { 3195 BUG_ON(mle->type != DLM_MLE_BLOCK && 3196 mle->type != DLM_MLE_MASTER && 3197 mle->type != DLM_MLE_MIGRATION); 3198 3199 /* MASTER mles are initiated locally. The waiting 3200 * process will notice the node map change shortly. 3201 * Let that happen as normal. */ 3202 if (mle->type == DLM_MLE_MASTER) 3203 continue; 3204 3205 /* BLOCK mles are initiated by other nodes. Need to 3206 * clean up if the dead node would have been the 3207 * master. */ 3208 if (mle->type == DLM_MLE_BLOCK) { 3209 dlm_clean_block_mle(dlm, mle, dead_node); 3210 continue; 3211 } 3212 3213 /* Everything else is a MIGRATION mle */ 3214 3215 /* The rule for MIGRATION mles is that the master 3216 * becomes UNKNOWN if *either* the original or the new 3217 * master dies. All UNKNOWN lockres' are sent to 3218 * whichever node becomes the recovery master. The new 3219 * master is responsible for determining if there is 3220 * still a master for this lockres, or if he needs to 3221 * take over mastery. Either way, this node should 3222 * expect another message to resolve this. */ 3223 3224 if (mle->master != dead_node && 3225 mle->new_master != dead_node) 3226 continue; 3227 3228 /* If we have reached this point, this mle needs to be 3229 * removed from the list and freed. */ 3230 dlm_clean_migration_mle(dlm, mle); 3231 3232 mlog(0, "%s: node %u died during migration from " 3233 "%u to %u!\n", dlm->name, dead_node, mle->master, 3234 mle->new_master); 3235 3236 /* If we find a lockres associated with the mle, we've 3237 * hit this rare case that messes up our lock ordering. 3238 * If so, we need to drop the master lock so that we can 3239 * take the lockres lock, meaning that we will have to 3240 * restart from the head of list. */ 3241 res = dlm_reset_mleres_owner(dlm, mle); 3242 if (res) 3243 /* restart */ 3244 goto top; 3245 3246 /* This may be the last reference */ 3247 __dlm_put_mle(mle); 3248 } 3249 } 3250 spin_unlock(&dlm->master_lock); 3251 } 3252 3253 int dlm_finish_migration(struct dlm_ctxt *dlm, struct dlm_lock_resource *res, 3254 u8 old_master) 3255 { 3256 struct dlm_node_iter iter; 3257 int ret = 0; 3258 3259 spin_lock(&dlm->spinlock); 3260 dlm_node_iter_init(dlm->domain_map, &iter); 3261 clear_bit(old_master, iter.node_map); 3262 clear_bit(dlm->node_num, iter.node_map); 3263 spin_unlock(&dlm->spinlock); 3264 3265 /* ownership of the lockres is changing. account for the 3266 * mastery reference here since old_master will briefly have 3267 * a reference after the migration completes */ 3268 spin_lock(&res->spinlock); 3269 dlm_lockres_set_refmap_bit(dlm, res, old_master); 3270 spin_unlock(&res->spinlock); 3271 3272 mlog(0, "now time to do a migrate request to other nodes\n"); 3273 ret = dlm_do_migrate_request(dlm, res, old_master, 3274 dlm->node_num, &iter); 3275 if (ret < 0) { 3276 mlog_errno(ret); 3277 goto leave; 3278 } 3279 3280 mlog(0, "doing assert master of %.*s to all except the original node\n", 3281 res->lockname.len, res->lockname.name); 3282 /* this call now finishes out the nodemap 3283 * even if one or more nodes die */ 3284 ret = dlm_do_assert_master(dlm, res, iter.node_map, 3285 DLM_ASSERT_MASTER_FINISH_MIGRATION); 3286 if (ret < 0) { 3287 /* no longer need to retry. all living nodes contacted. */ 3288 mlog_errno(ret); 3289 ret = 0; 3290 } 3291 3292 memset(iter.node_map, 0, sizeof(iter.node_map)); 3293 set_bit(old_master, iter.node_map); 3294 mlog(0, "doing assert master of %.*s back to %u\n", 3295 res->lockname.len, res->lockname.name, old_master); 3296 ret = dlm_do_assert_master(dlm, res, iter.node_map, 3297 DLM_ASSERT_MASTER_FINISH_MIGRATION); 3298 if (ret < 0) { 3299 mlog(0, "assert master to original master failed " 3300 "with %d.\n", ret); 3301 /* the only nonzero status here would be because of 3302 * a dead original node. we're done. */ 3303 ret = 0; 3304 } 3305 3306 /* all done, set the owner, clear the flag */ 3307 spin_lock(&res->spinlock); 3308 dlm_set_lockres_owner(dlm, res, dlm->node_num); 3309 res->state &= ~DLM_LOCK_RES_MIGRATING; 3310 spin_unlock(&res->spinlock); 3311 /* re-dirty it on the new master */ 3312 dlm_kick_thread(dlm, res); 3313 wake_up(&res->wq); 3314 leave: 3315 return ret; 3316 } 3317 3318 /* 3319 * LOCKRES AST REFCOUNT 3320 * this is integral to migration 3321 */ 3322 3323 /* for future intent to call an ast, reserve one ahead of time. 3324 * this should be called only after waiting on the lockres 3325 * with dlm_wait_on_lockres, and while still holding the 3326 * spinlock after the call. */ 3327 void __dlm_lockres_reserve_ast(struct dlm_lock_resource *res) 3328 { 3329 assert_spin_locked(&res->spinlock); 3330 if (res->state & DLM_LOCK_RES_MIGRATING) { 3331 __dlm_print_one_lock_resource(res); 3332 } 3333 BUG_ON(res->state & DLM_LOCK_RES_MIGRATING); 3334 3335 atomic_inc(&res->asts_reserved); 3336 } 3337 3338 /* 3339 * used to drop the reserved ast, either because it went unused, 3340 * or because the ast/bast was actually called. 3341 * 3342 * also, if there is a pending migration on this lockres, 3343 * and this was the last pending ast on the lockres, 3344 * atomically set the MIGRATING flag before we drop the lock. 3345 * this is how we ensure that migration can proceed with no 3346 * asts in progress. note that it is ok if the state of the 3347 * queues is such that a lock should be granted in the future 3348 * or that a bast should be fired, because the new master will 3349 * shuffle the lists on this lockres as soon as it is migrated. 3350 */ 3351 void dlm_lockres_release_ast(struct dlm_ctxt *dlm, 3352 struct dlm_lock_resource *res) 3353 { 3354 if (!atomic_dec_and_lock(&res->asts_reserved, &res->spinlock)) 3355 return; 3356 3357 if (!res->migration_pending) { 3358 spin_unlock(&res->spinlock); 3359 return; 3360 } 3361 3362 BUG_ON(res->state & DLM_LOCK_RES_MIGRATING); 3363 res->migration_pending = 0; 3364 res->state |= DLM_LOCK_RES_MIGRATING; 3365 spin_unlock(&res->spinlock); 3366 wake_up(&res->wq); 3367 wake_up(&dlm->migration_wq); 3368 } 3369 3370 void dlm_force_free_mles(struct dlm_ctxt *dlm) 3371 { 3372 int i; 3373 struct hlist_head *bucket; 3374 struct dlm_master_list_entry *mle; 3375 struct hlist_node *tmp; 3376 3377 /* 3378 * We notified all other nodes that we are exiting the domain and 3379 * marked the dlm state to DLM_CTXT_LEAVING. If any mles are still 3380 * around we force free them and wake any processes that are waiting 3381 * on the mles 3382 */ 3383 spin_lock(&dlm->spinlock); 3384 spin_lock(&dlm->master_lock); 3385 3386 BUG_ON(dlm->dlm_state != DLM_CTXT_LEAVING); 3387 BUG_ON((find_next_bit(dlm->domain_map, O2NM_MAX_NODES, 0) < O2NM_MAX_NODES)); 3388 3389 for (i = 0; i < DLM_HASH_BUCKETS; i++) { 3390 bucket = dlm_master_hash(dlm, i); 3391 hlist_for_each_entry_safe(mle, tmp, bucket, master_hash_node) { 3392 if (mle->type != DLM_MLE_BLOCK) { 3393 mlog(ML_ERROR, "bad mle: %p\n", mle); 3394 dlm_print_one_mle(mle); 3395 } 3396 atomic_set(&mle->woken, 1); 3397 wake_up(&mle->wq); 3398 3399 __dlm_unlink_mle(dlm, mle); 3400 __dlm_mle_detach_hb_events(dlm, mle); 3401 __dlm_put_mle(mle); 3402 } 3403 } 3404 spin_unlock(&dlm->master_lock); 3405 spin_unlock(&dlm->spinlock); 3406 } 3407