1 /* -*- mode: c; c-basic-offset: 8; -*- 2 * vim: noexpandtab sw=8 ts=8 sts=0: 3 * 4 * dlmmod.c 5 * 6 * standalone DLM module 7 * 8 * Copyright (C) 2004 Oracle. All rights reserved. 9 * 10 * This program is free software; you can redistribute it and/or 11 * modify it under the terms of the GNU General Public 12 * License as published by the Free Software Foundation; either 13 * version 2 of the License, or (at your option) any later version. 14 * 15 * This program is distributed in the hope that it will be useful, 16 * but WITHOUT ANY WARRANTY; without even the implied warranty of 17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 18 * General Public License for more details. 19 * 20 * You should have received a copy of the GNU General Public 21 * License along with this program; if not, write to the 22 * Free Software Foundation, Inc., 59 Temple Place - Suite 330, 23 * Boston, MA 021110-1307, USA. 24 * 25 */ 26 27 28 #include <linux/module.h> 29 #include <linux/fs.h> 30 #include <linux/types.h> 31 #include <linux/slab.h> 32 #include <linux/highmem.h> 33 #include <linux/utsname.h> 34 #include <linux/init.h> 35 #include <linux/sysctl.h> 36 #include <linux/random.h> 37 #include <linux/blkdev.h> 38 #include <linux/socket.h> 39 #include <linux/inet.h> 40 #include <linux/spinlock.h> 41 #include <linux/delay.h> 42 43 44 #include "cluster/heartbeat.h" 45 #include "cluster/nodemanager.h" 46 #include "cluster/tcp.h" 47 48 #include "dlmapi.h" 49 #include "dlmcommon.h" 50 #include "dlmdomain.h" 51 #include "dlmdebug.h" 52 53 #define MLOG_MASK_PREFIX (ML_DLM|ML_DLM_MASTER) 54 #include "cluster/masklog.h" 55 56 static void dlm_mle_node_down(struct dlm_ctxt *dlm, 57 struct dlm_master_list_entry *mle, 58 struct o2nm_node *node, 59 int idx); 60 static void dlm_mle_node_up(struct dlm_ctxt *dlm, 61 struct dlm_master_list_entry *mle, 62 struct o2nm_node *node, 63 int idx); 64 65 static void dlm_assert_master_worker(struct dlm_work_item *item, void *data); 66 static int dlm_do_assert_master(struct dlm_ctxt *dlm, 67 struct dlm_lock_resource *res, 68 void *nodemap, u32 flags); 69 static void dlm_deref_lockres_worker(struct dlm_work_item *item, void *data); 70 71 static inline int dlm_mle_equal(struct dlm_ctxt *dlm, 72 struct dlm_master_list_entry *mle, 73 const char *name, 74 unsigned int namelen) 75 { 76 struct dlm_lock_resource *res; 77 78 if (dlm != mle->dlm) 79 return 0; 80 81 if (mle->type == DLM_MLE_BLOCK || 82 mle->type == DLM_MLE_MIGRATION) { 83 if (namelen != mle->u.name.len || 84 memcmp(name, mle->u.name.name, namelen)!=0) 85 return 0; 86 } else { 87 res = mle->u.res; 88 if (namelen != res->lockname.len || 89 memcmp(res->lockname.name, name, namelen) != 0) 90 return 0; 91 } 92 return 1; 93 } 94 95 static struct kmem_cache *dlm_lockres_cache = NULL; 96 static struct kmem_cache *dlm_lockname_cache = NULL; 97 static struct kmem_cache *dlm_mle_cache = NULL; 98 99 static void dlm_mle_release(struct kref *kref); 100 static void dlm_init_mle(struct dlm_master_list_entry *mle, 101 enum dlm_mle_type type, 102 struct dlm_ctxt *dlm, 103 struct dlm_lock_resource *res, 104 const char *name, 105 unsigned int namelen); 106 static void dlm_put_mle(struct dlm_master_list_entry *mle); 107 static void __dlm_put_mle(struct dlm_master_list_entry *mle); 108 static int dlm_find_mle(struct dlm_ctxt *dlm, 109 struct dlm_master_list_entry **mle, 110 char *name, unsigned int namelen); 111 112 static int dlm_do_master_request(struct dlm_lock_resource *res, 113 struct dlm_master_list_entry *mle, int to); 114 115 116 static int dlm_wait_for_lock_mastery(struct dlm_ctxt *dlm, 117 struct dlm_lock_resource *res, 118 struct dlm_master_list_entry *mle, 119 int *blocked); 120 static int dlm_restart_lock_mastery(struct dlm_ctxt *dlm, 121 struct dlm_lock_resource *res, 122 struct dlm_master_list_entry *mle, 123 int blocked); 124 static int dlm_add_migration_mle(struct dlm_ctxt *dlm, 125 struct dlm_lock_resource *res, 126 struct dlm_master_list_entry *mle, 127 struct dlm_master_list_entry **oldmle, 128 const char *name, unsigned int namelen, 129 u8 new_master, u8 master); 130 131 static u8 dlm_pick_migration_target(struct dlm_ctxt *dlm, 132 struct dlm_lock_resource *res); 133 static void dlm_remove_nonlocal_locks(struct dlm_ctxt *dlm, 134 struct dlm_lock_resource *res); 135 static int dlm_mark_lockres_migrating(struct dlm_ctxt *dlm, 136 struct dlm_lock_resource *res, 137 u8 target); 138 static int dlm_pre_master_reco_lockres(struct dlm_ctxt *dlm, 139 struct dlm_lock_resource *res); 140 141 142 int dlm_is_host_down(int errno) 143 { 144 switch (errno) { 145 case -EBADF: 146 case -ECONNREFUSED: 147 case -ENOTCONN: 148 case -ECONNRESET: 149 case -EPIPE: 150 case -EHOSTDOWN: 151 case -EHOSTUNREACH: 152 case -ETIMEDOUT: 153 case -ECONNABORTED: 154 case -ENETDOWN: 155 case -ENETUNREACH: 156 case -ENETRESET: 157 case -ESHUTDOWN: 158 case -ENOPROTOOPT: 159 case -EINVAL: /* if returned from our tcp code, 160 this means there is no socket */ 161 return 1; 162 } 163 return 0; 164 } 165 166 167 /* 168 * MASTER LIST FUNCTIONS 169 */ 170 171 172 /* 173 * regarding master list entries and heartbeat callbacks: 174 * 175 * in order to avoid sleeping and allocation that occurs in 176 * heartbeat, master list entries are simply attached to the 177 * dlm's established heartbeat callbacks. the mle is attached 178 * when it is created, and since the dlm->spinlock is held at 179 * that time, any heartbeat event will be properly discovered 180 * by the mle. the mle needs to be detached from the 181 * dlm->mle_hb_events list as soon as heartbeat events are no 182 * longer useful to the mle, and before the mle is freed. 183 * 184 * as a general rule, heartbeat events are no longer needed by 185 * the mle once an "answer" regarding the lock master has been 186 * received. 187 */ 188 static inline void __dlm_mle_attach_hb_events(struct dlm_ctxt *dlm, 189 struct dlm_master_list_entry *mle) 190 { 191 assert_spin_locked(&dlm->spinlock); 192 193 list_add_tail(&mle->hb_events, &dlm->mle_hb_events); 194 } 195 196 197 static inline void __dlm_mle_detach_hb_events(struct dlm_ctxt *dlm, 198 struct dlm_master_list_entry *mle) 199 { 200 if (!list_empty(&mle->hb_events)) 201 list_del_init(&mle->hb_events); 202 } 203 204 205 static inline void dlm_mle_detach_hb_events(struct dlm_ctxt *dlm, 206 struct dlm_master_list_entry *mle) 207 { 208 spin_lock(&dlm->spinlock); 209 __dlm_mle_detach_hb_events(dlm, mle); 210 spin_unlock(&dlm->spinlock); 211 } 212 213 static void dlm_get_mle_inuse(struct dlm_master_list_entry *mle) 214 { 215 struct dlm_ctxt *dlm; 216 dlm = mle->dlm; 217 218 assert_spin_locked(&dlm->spinlock); 219 assert_spin_locked(&dlm->master_lock); 220 mle->inuse++; 221 kref_get(&mle->mle_refs); 222 } 223 224 static void dlm_put_mle_inuse(struct dlm_master_list_entry *mle) 225 { 226 struct dlm_ctxt *dlm; 227 dlm = mle->dlm; 228 229 spin_lock(&dlm->spinlock); 230 spin_lock(&dlm->master_lock); 231 mle->inuse--; 232 __dlm_put_mle(mle); 233 spin_unlock(&dlm->master_lock); 234 spin_unlock(&dlm->spinlock); 235 236 } 237 238 /* remove from list and free */ 239 static void __dlm_put_mle(struct dlm_master_list_entry *mle) 240 { 241 struct dlm_ctxt *dlm; 242 dlm = mle->dlm; 243 244 assert_spin_locked(&dlm->spinlock); 245 assert_spin_locked(&dlm->master_lock); 246 if (!atomic_read(&mle->mle_refs.refcount)) { 247 /* this may or may not crash, but who cares. 248 * it's a BUG. */ 249 mlog(ML_ERROR, "bad mle: %p\n", mle); 250 dlm_print_one_mle(mle); 251 BUG(); 252 } else 253 kref_put(&mle->mle_refs, dlm_mle_release); 254 } 255 256 257 /* must not have any spinlocks coming in */ 258 static void dlm_put_mle(struct dlm_master_list_entry *mle) 259 { 260 struct dlm_ctxt *dlm; 261 dlm = mle->dlm; 262 263 spin_lock(&dlm->spinlock); 264 spin_lock(&dlm->master_lock); 265 __dlm_put_mle(mle); 266 spin_unlock(&dlm->master_lock); 267 spin_unlock(&dlm->spinlock); 268 } 269 270 static inline void dlm_get_mle(struct dlm_master_list_entry *mle) 271 { 272 kref_get(&mle->mle_refs); 273 } 274 275 static void dlm_init_mle(struct dlm_master_list_entry *mle, 276 enum dlm_mle_type type, 277 struct dlm_ctxt *dlm, 278 struct dlm_lock_resource *res, 279 const char *name, 280 unsigned int namelen) 281 { 282 assert_spin_locked(&dlm->spinlock); 283 284 mle->dlm = dlm; 285 mle->type = type; 286 INIT_LIST_HEAD(&mle->list); 287 INIT_LIST_HEAD(&mle->hb_events); 288 memset(mle->maybe_map, 0, sizeof(mle->maybe_map)); 289 spin_lock_init(&mle->spinlock); 290 init_waitqueue_head(&mle->wq); 291 atomic_set(&mle->woken, 0); 292 kref_init(&mle->mle_refs); 293 memset(mle->response_map, 0, sizeof(mle->response_map)); 294 mle->master = O2NM_MAX_NODES; 295 mle->new_master = O2NM_MAX_NODES; 296 mle->inuse = 0; 297 298 if (mle->type == DLM_MLE_MASTER) { 299 BUG_ON(!res); 300 mle->u.res = res; 301 } else if (mle->type == DLM_MLE_BLOCK) { 302 BUG_ON(!name); 303 memcpy(mle->u.name.name, name, namelen); 304 mle->u.name.len = namelen; 305 } else /* DLM_MLE_MIGRATION */ { 306 BUG_ON(!name); 307 memcpy(mle->u.name.name, name, namelen); 308 mle->u.name.len = namelen; 309 } 310 311 /* copy off the node_map and register hb callbacks on our copy */ 312 memcpy(mle->node_map, dlm->domain_map, sizeof(mle->node_map)); 313 memcpy(mle->vote_map, dlm->domain_map, sizeof(mle->vote_map)); 314 clear_bit(dlm->node_num, mle->vote_map); 315 clear_bit(dlm->node_num, mle->node_map); 316 317 /* attach the mle to the domain node up/down events */ 318 __dlm_mle_attach_hb_events(dlm, mle); 319 } 320 321 322 /* returns 1 if found, 0 if not */ 323 static int dlm_find_mle(struct dlm_ctxt *dlm, 324 struct dlm_master_list_entry **mle, 325 char *name, unsigned int namelen) 326 { 327 struct dlm_master_list_entry *tmpmle; 328 329 assert_spin_locked(&dlm->master_lock); 330 331 list_for_each_entry(tmpmle, &dlm->master_list, list) { 332 if (!dlm_mle_equal(dlm, tmpmle, name, namelen)) 333 continue; 334 dlm_get_mle(tmpmle); 335 *mle = tmpmle; 336 return 1; 337 } 338 return 0; 339 } 340 341 void dlm_hb_event_notify_attached(struct dlm_ctxt *dlm, int idx, int node_up) 342 { 343 struct dlm_master_list_entry *mle; 344 345 assert_spin_locked(&dlm->spinlock); 346 347 list_for_each_entry(mle, &dlm->mle_hb_events, hb_events) { 348 if (node_up) 349 dlm_mle_node_up(dlm, mle, NULL, idx); 350 else 351 dlm_mle_node_down(dlm, mle, NULL, idx); 352 } 353 } 354 355 static void dlm_mle_node_down(struct dlm_ctxt *dlm, 356 struct dlm_master_list_entry *mle, 357 struct o2nm_node *node, int idx) 358 { 359 spin_lock(&mle->spinlock); 360 361 if (!test_bit(idx, mle->node_map)) 362 mlog(0, "node %u already removed from nodemap!\n", idx); 363 else 364 clear_bit(idx, mle->node_map); 365 366 spin_unlock(&mle->spinlock); 367 } 368 369 static void dlm_mle_node_up(struct dlm_ctxt *dlm, 370 struct dlm_master_list_entry *mle, 371 struct o2nm_node *node, int idx) 372 { 373 spin_lock(&mle->spinlock); 374 375 if (test_bit(idx, mle->node_map)) 376 mlog(0, "node %u already in node map!\n", idx); 377 else 378 set_bit(idx, mle->node_map); 379 380 spin_unlock(&mle->spinlock); 381 } 382 383 384 int dlm_init_mle_cache(void) 385 { 386 dlm_mle_cache = kmem_cache_create("o2dlm_mle", 387 sizeof(struct dlm_master_list_entry), 388 0, SLAB_HWCACHE_ALIGN, 389 NULL); 390 if (dlm_mle_cache == NULL) 391 return -ENOMEM; 392 return 0; 393 } 394 395 void dlm_destroy_mle_cache(void) 396 { 397 if (dlm_mle_cache) 398 kmem_cache_destroy(dlm_mle_cache); 399 } 400 401 static void dlm_mle_release(struct kref *kref) 402 { 403 struct dlm_master_list_entry *mle; 404 struct dlm_ctxt *dlm; 405 406 mlog_entry_void(); 407 408 mle = container_of(kref, struct dlm_master_list_entry, mle_refs); 409 dlm = mle->dlm; 410 411 if (mle->type != DLM_MLE_MASTER) { 412 mlog(0, "calling mle_release for %.*s, type %d\n", 413 mle->u.name.len, mle->u.name.name, mle->type); 414 } else { 415 mlog(0, "calling mle_release for %.*s, type %d\n", 416 mle->u.res->lockname.len, 417 mle->u.res->lockname.name, mle->type); 418 } 419 assert_spin_locked(&dlm->spinlock); 420 assert_spin_locked(&dlm->master_lock); 421 422 /* remove from list if not already */ 423 if (!list_empty(&mle->list)) 424 list_del_init(&mle->list); 425 426 /* detach the mle from the domain node up/down events */ 427 __dlm_mle_detach_hb_events(dlm, mle); 428 429 /* NOTE: kfree under spinlock here. 430 * if this is bad, we can move this to a freelist. */ 431 kmem_cache_free(dlm_mle_cache, mle); 432 } 433 434 435 /* 436 * LOCK RESOURCE FUNCTIONS 437 */ 438 439 int dlm_init_master_caches(void) 440 { 441 dlm_lockres_cache = kmem_cache_create("o2dlm_lockres", 442 sizeof(struct dlm_lock_resource), 443 0, SLAB_HWCACHE_ALIGN, NULL); 444 if (!dlm_lockres_cache) 445 goto bail; 446 447 dlm_lockname_cache = kmem_cache_create("o2dlm_lockname", 448 DLM_LOCKID_NAME_MAX, 0, 449 SLAB_HWCACHE_ALIGN, NULL); 450 if (!dlm_lockname_cache) 451 goto bail; 452 453 return 0; 454 bail: 455 dlm_destroy_master_caches(); 456 return -ENOMEM; 457 } 458 459 void dlm_destroy_master_caches(void) 460 { 461 if (dlm_lockname_cache) 462 kmem_cache_destroy(dlm_lockname_cache); 463 464 if (dlm_lockres_cache) 465 kmem_cache_destroy(dlm_lockres_cache); 466 } 467 468 static void dlm_set_lockres_owner(struct dlm_ctxt *dlm, 469 struct dlm_lock_resource *res, 470 u8 owner) 471 { 472 assert_spin_locked(&res->spinlock); 473 474 mlog_entry("%.*s, %u\n", res->lockname.len, res->lockname.name, owner); 475 476 if (owner == dlm->node_num) 477 atomic_inc(&dlm->local_resources); 478 else if (owner == DLM_LOCK_RES_OWNER_UNKNOWN) 479 atomic_inc(&dlm->unknown_resources); 480 else 481 atomic_inc(&dlm->remote_resources); 482 483 res->owner = owner; 484 } 485 486 void dlm_change_lockres_owner(struct dlm_ctxt *dlm, 487 struct dlm_lock_resource *res, u8 owner) 488 { 489 assert_spin_locked(&res->spinlock); 490 491 if (owner == res->owner) 492 return; 493 494 if (res->owner == dlm->node_num) 495 atomic_dec(&dlm->local_resources); 496 else if (res->owner == DLM_LOCK_RES_OWNER_UNKNOWN) 497 atomic_dec(&dlm->unknown_resources); 498 else 499 atomic_dec(&dlm->remote_resources); 500 501 dlm_set_lockres_owner(dlm, res, owner); 502 } 503 504 505 static void dlm_lockres_release(struct kref *kref) 506 { 507 struct dlm_lock_resource *res; 508 509 res = container_of(kref, struct dlm_lock_resource, refs); 510 511 /* This should not happen -- all lockres' have a name 512 * associated with them at init time. */ 513 BUG_ON(!res->lockname.name); 514 515 mlog(0, "destroying lockres %.*s\n", res->lockname.len, 516 res->lockname.name); 517 518 if (!list_empty(&res->tracking)) 519 list_del_init(&res->tracking); 520 else { 521 mlog(ML_ERROR, "Resource %.*s not on the Tracking list\n", 522 res->lockname.len, res->lockname.name); 523 dlm_print_one_lock_resource(res); 524 } 525 526 if (!hlist_unhashed(&res->hash_node) || 527 !list_empty(&res->granted) || 528 !list_empty(&res->converting) || 529 !list_empty(&res->blocked) || 530 !list_empty(&res->dirty) || 531 !list_empty(&res->recovering) || 532 !list_empty(&res->purge)) { 533 mlog(ML_ERROR, 534 "Going to BUG for resource %.*s." 535 " We're on a list! [%c%c%c%c%c%c%c]\n", 536 res->lockname.len, res->lockname.name, 537 !hlist_unhashed(&res->hash_node) ? 'H' : ' ', 538 !list_empty(&res->granted) ? 'G' : ' ', 539 !list_empty(&res->converting) ? 'C' : ' ', 540 !list_empty(&res->blocked) ? 'B' : ' ', 541 !list_empty(&res->dirty) ? 'D' : ' ', 542 !list_empty(&res->recovering) ? 'R' : ' ', 543 !list_empty(&res->purge) ? 'P' : ' '); 544 545 dlm_print_one_lock_resource(res); 546 } 547 548 /* By the time we're ready to blow this guy away, we shouldn't 549 * be on any lists. */ 550 BUG_ON(!hlist_unhashed(&res->hash_node)); 551 BUG_ON(!list_empty(&res->granted)); 552 BUG_ON(!list_empty(&res->converting)); 553 BUG_ON(!list_empty(&res->blocked)); 554 BUG_ON(!list_empty(&res->dirty)); 555 BUG_ON(!list_empty(&res->recovering)); 556 BUG_ON(!list_empty(&res->purge)); 557 558 kmem_cache_free(dlm_lockname_cache, (void *)res->lockname.name); 559 560 kmem_cache_free(dlm_lockres_cache, res); 561 } 562 563 void dlm_lockres_put(struct dlm_lock_resource *res) 564 { 565 kref_put(&res->refs, dlm_lockres_release); 566 } 567 568 static void dlm_init_lockres(struct dlm_ctxt *dlm, 569 struct dlm_lock_resource *res, 570 const char *name, unsigned int namelen) 571 { 572 char *qname; 573 574 /* If we memset here, we lose our reference to the kmalloc'd 575 * res->lockname.name, so be sure to init every field 576 * correctly! */ 577 578 qname = (char *) res->lockname.name; 579 memcpy(qname, name, namelen); 580 581 res->lockname.len = namelen; 582 res->lockname.hash = dlm_lockid_hash(name, namelen); 583 584 init_waitqueue_head(&res->wq); 585 spin_lock_init(&res->spinlock); 586 INIT_HLIST_NODE(&res->hash_node); 587 INIT_LIST_HEAD(&res->granted); 588 INIT_LIST_HEAD(&res->converting); 589 INIT_LIST_HEAD(&res->blocked); 590 INIT_LIST_HEAD(&res->dirty); 591 INIT_LIST_HEAD(&res->recovering); 592 INIT_LIST_HEAD(&res->purge); 593 INIT_LIST_HEAD(&res->tracking); 594 atomic_set(&res->asts_reserved, 0); 595 res->migration_pending = 0; 596 res->inflight_locks = 0; 597 598 kref_init(&res->refs); 599 600 /* just for consistency */ 601 spin_lock(&res->spinlock); 602 dlm_set_lockres_owner(dlm, res, DLM_LOCK_RES_OWNER_UNKNOWN); 603 spin_unlock(&res->spinlock); 604 605 res->state = DLM_LOCK_RES_IN_PROGRESS; 606 607 res->last_used = 0; 608 609 spin_lock(&dlm->spinlock); 610 list_add_tail(&res->tracking, &dlm->tracking_list); 611 spin_unlock(&dlm->spinlock); 612 613 memset(res->lvb, 0, DLM_LVB_LEN); 614 memset(res->refmap, 0, sizeof(res->refmap)); 615 } 616 617 struct dlm_lock_resource *dlm_new_lockres(struct dlm_ctxt *dlm, 618 const char *name, 619 unsigned int namelen) 620 { 621 struct dlm_lock_resource *res = NULL; 622 623 res = (struct dlm_lock_resource *) 624 kmem_cache_zalloc(dlm_lockres_cache, GFP_NOFS); 625 if (!res) 626 goto error; 627 628 res->lockname.name = (char *) 629 kmem_cache_zalloc(dlm_lockname_cache, GFP_NOFS); 630 if (!res->lockname.name) 631 goto error; 632 633 dlm_init_lockres(dlm, res, name, namelen); 634 return res; 635 636 error: 637 if (res && res->lockname.name) 638 kmem_cache_free(dlm_lockname_cache, (void *)res->lockname.name); 639 640 if (res) 641 kmem_cache_free(dlm_lockres_cache, res); 642 return NULL; 643 } 644 645 void __dlm_lockres_grab_inflight_ref(struct dlm_ctxt *dlm, 646 struct dlm_lock_resource *res, 647 int new_lockres, 648 const char *file, 649 int line) 650 { 651 if (!new_lockres) 652 assert_spin_locked(&res->spinlock); 653 654 if (!test_bit(dlm->node_num, res->refmap)) { 655 BUG_ON(res->inflight_locks != 0); 656 dlm_lockres_set_refmap_bit(dlm->node_num, res); 657 } 658 res->inflight_locks++; 659 mlog(0, "%s:%.*s: inflight++: now %u\n", 660 dlm->name, res->lockname.len, res->lockname.name, 661 res->inflight_locks); 662 } 663 664 void __dlm_lockres_drop_inflight_ref(struct dlm_ctxt *dlm, 665 struct dlm_lock_resource *res, 666 const char *file, 667 int line) 668 { 669 assert_spin_locked(&res->spinlock); 670 671 BUG_ON(res->inflight_locks == 0); 672 res->inflight_locks--; 673 mlog(0, "%s:%.*s: inflight--: now %u\n", 674 dlm->name, res->lockname.len, res->lockname.name, 675 res->inflight_locks); 676 if (res->inflight_locks == 0) 677 dlm_lockres_clear_refmap_bit(dlm->node_num, res); 678 wake_up(&res->wq); 679 } 680 681 /* 682 * lookup a lock resource by name. 683 * may already exist in the hashtable. 684 * lockid is null terminated 685 * 686 * if not, allocate enough for the lockres and for 687 * the temporary structure used in doing the mastering. 688 * 689 * also, do a lookup in the dlm->master_list to see 690 * if another node has begun mastering the same lock. 691 * if so, there should be a block entry in there 692 * for this name, and we should *not* attempt to master 693 * the lock here. need to wait around for that node 694 * to assert_master (or die). 695 * 696 */ 697 struct dlm_lock_resource * dlm_get_lock_resource(struct dlm_ctxt *dlm, 698 const char *lockid, 699 int namelen, 700 int flags) 701 { 702 struct dlm_lock_resource *tmpres=NULL, *res=NULL; 703 struct dlm_master_list_entry *mle = NULL; 704 struct dlm_master_list_entry *alloc_mle = NULL; 705 int blocked = 0; 706 int ret, nodenum; 707 struct dlm_node_iter iter; 708 unsigned int hash; 709 int tries = 0; 710 int bit, wait_on_recovery = 0; 711 int drop_inflight_if_nonlocal = 0; 712 713 BUG_ON(!lockid); 714 715 hash = dlm_lockid_hash(lockid, namelen); 716 717 mlog(0, "get lockres %s (len %d)\n", lockid, namelen); 718 719 lookup: 720 spin_lock(&dlm->spinlock); 721 tmpres = __dlm_lookup_lockres_full(dlm, lockid, namelen, hash); 722 if (tmpres) { 723 int dropping_ref = 0; 724 725 spin_lock(&tmpres->spinlock); 726 if (tmpres->owner == dlm->node_num) { 727 BUG_ON(tmpres->state & DLM_LOCK_RES_DROPPING_REF); 728 dlm_lockres_grab_inflight_ref(dlm, tmpres); 729 } else if (tmpres->state & DLM_LOCK_RES_DROPPING_REF) 730 dropping_ref = 1; 731 spin_unlock(&tmpres->spinlock); 732 spin_unlock(&dlm->spinlock); 733 734 /* wait until done messaging the master, drop our ref to allow 735 * the lockres to be purged, start over. */ 736 if (dropping_ref) { 737 spin_lock(&tmpres->spinlock); 738 __dlm_wait_on_lockres_flags(tmpres, DLM_LOCK_RES_DROPPING_REF); 739 spin_unlock(&tmpres->spinlock); 740 dlm_lockres_put(tmpres); 741 tmpres = NULL; 742 goto lookup; 743 } 744 745 mlog(0, "found in hash!\n"); 746 if (res) 747 dlm_lockres_put(res); 748 res = tmpres; 749 goto leave; 750 } 751 752 if (!res) { 753 spin_unlock(&dlm->spinlock); 754 mlog(0, "allocating a new resource\n"); 755 /* nothing found and we need to allocate one. */ 756 alloc_mle = (struct dlm_master_list_entry *) 757 kmem_cache_alloc(dlm_mle_cache, GFP_NOFS); 758 if (!alloc_mle) 759 goto leave; 760 res = dlm_new_lockres(dlm, lockid, namelen); 761 if (!res) 762 goto leave; 763 goto lookup; 764 } 765 766 mlog(0, "no lockres found, allocated our own: %p\n", res); 767 768 if (flags & LKM_LOCAL) { 769 /* caller knows it's safe to assume it's not mastered elsewhere 770 * DONE! return right away */ 771 spin_lock(&res->spinlock); 772 dlm_change_lockres_owner(dlm, res, dlm->node_num); 773 __dlm_insert_lockres(dlm, res); 774 dlm_lockres_grab_inflight_ref(dlm, res); 775 spin_unlock(&res->spinlock); 776 spin_unlock(&dlm->spinlock); 777 /* lockres still marked IN_PROGRESS */ 778 goto wake_waiters; 779 } 780 781 /* check master list to see if another node has started mastering it */ 782 spin_lock(&dlm->master_lock); 783 784 /* if we found a block, wait for lock to be mastered by another node */ 785 blocked = dlm_find_mle(dlm, &mle, (char *)lockid, namelen); 786 if (blocked) { 787 int mig; 788 if (mle->type == DLM_MLE_MASTER) { 789 mlog(ML_ERROR, "master entry for nonexistent lock!\n"); 790 BUG(); 791 } 792 mig = (mle->type == DLM_MLE_MIGRATION); 793 /* if there is a migration in progress, let the migration 794 * finish before continuing. we can wait for the absence 795 * of the MIGRATION mle: either the migrate finished or 796 * one of the nodes died and the mle was cleaned up. 797 * if there is a BLOCK here, but it already has a master 798 * set, we are too late. the master does not have a ref 799 * for us in the refmap. detach the mle and drop it. 800 * either way, go back to the top and start over. */ 801 if (mig || mle->master != O2NM_MAX_NODES) { 802 BUG_ON(mig && mle->master == dlm->node_num); 803 /* we arrived too late. the master does not 804 * have a ref for us. retry. */ 805 mlog(0, "%s:%.*s: late on %s\n", 806 dlm->name, namelen, lockid, 807 mig ? "MIGRATION" : "BLOCK"); 808 spin_unlock(&dlm->master_lock); 809 spin_unlock(&dlm->spinlock); 810 811 /* master is known, detach */ 812 if (!mig) 813 dlm_mle_detach_hb_events(dlm, mle); 814 dlm_put_mle(mle); 815 mle = NULL; 816 /* this is lame, but we cant wait on either 817 * the mle or lockres waitqueue here */ 818 if (mig) 819 msleep(100); 820 goto lookup; 821 } 822 } else { 823 /* go ahead and try to master lock on this node */ 824 mle = alloc_mle; 825 /* make sure this does not get freed below */ 826 alloc_mle = NULL; 827 dlm_init_mle(mle, DLM_MLE_MASTER, dlm, res, NULL, 0); 828 set_bit(dlm->node_num, mle->maybe_map); 829 list_add(&mle->list, &dlm->master_list); 830 831 /* still holding the dlm spinlock, check the recovery map 832 * to see if there are any nodes that still need to be 833 * considered. these will not appear in the mle nodemap 834 * but they might own this lockres. wait on them. */ 835 bit = find_next_bit(dlm->recovery_map, O2NM_MAX_NODES, 0); 836 if (bit < O2NM_MAX_NODES) { 837 mlog(ML_NOTICE, "%s:%.*s: at least one node (%d) to " 838 "recover before lock mastery can begin\n", 839 dlm->name, namelen, (char *)lockid, bit); 840 wait_on_recovery = 1; 841 } 842 } 843 844 /* at this point there is either a DLM_MLE_BLOCK or a 845 * DLM_MLE_MASTER on the master list, so it's safe to add the 846 * lockres to the hashtable. anyone who finds the lock will 847 * still have to wait on the IN_PROGRESS. */ 848 849 /* finally add the lockres to its hash bucket */ 850 __dlm_insert_lockres(dlm, res); 851 /* since this lockres is new it doesnt not require the spinlock */ 852 dlm_lockres_grab_inflight_ref_new(dlm, res); 853 854 /* if this node does not become the master make sure to drop 855 * this inflight reference below */ 856 drop_inflight_if_nonlocal = 1; 857 858 /* get an extra ref on the mle in case this is a BLOCK 859 * if so, the creator of the BLOCK may try to put the last 860 * ref at this time in the assert master handler, so we 861 * need an extra one to keep from a bad ptr deref. */ 862 dlm_get_mle_inuse(mle); 863 spin_unlock(&dlm->master_lock); 864 spin_unlock(&dlm->spinlock); 865 866 redo_request: 867 while (wait_on_recovery) { 868 /* any cluster changes that occurred after dropping the 869 * dlm spinlock would be detectable be a change on the mle, 870 * so we only need to clear out the recovery map once. */ 871 if (dlm_is_recovery_lock(lockid, namelen)) { 872 mlog(ML_NOTICE, "%s: recovery map is not empty, but " 873 "must master $RECOVERY lock now\n", dlm->name); 874 if (!dlm_pre_master_reco_lockres(dlm, res)) 875 wait_on_recovery = 0; 876 else { 877 mlog(0, "%s: waiting 500ms for heartbeat state " 878 "change\n", dlm->name); 879 msleep(500); 880 } 881 continue; 882 } 883 884 dlm_kick_recovery_thread(dlm); 885 msleep(1000); 886 dlm_wait_for_recovery(dlm); 887 888 spin_lock(&dlm->spinlock); 889 bit = find_next_bit(dlm->recovery_map, O2NM_MAX_NODES, 0); 890 if (bit < O2NM_MAX_NODES) { 891 mlog(ML_NOTICE, "%s:%.*s: at least one node (%d) to " 892 "recover before lock mastery can begin\n", 893 dlm->name, namelen, (char *)lockid, bit); 894 wait_on_recovery = 1; 895 } else 896 wait_on_recovery = 0; 897 spin_unlock(&dlm->spinlock); 898 899 if (wait_on_recovery) 900 dlm_wait_for_node_recovery(dlm, bit, 10000); 901 } 902 903 /* must wait for lock to be mastered elsewhere */ 904 if (blocked) 905 goto wait; 906 907 ret = -EINVAL; 908 dlm_node_iter_init(mle->vote_map, &iter); 909 while ((nodenum = dlm_node_iter_next(&iter)) >= 0) { 910 ret = dlm_do_master_request(res, mle, nodenum); 911 if (ret < 0) 912 mlog_errno(ret); 913 if (mle->master != O2NM_MAX_NODES) { 914 /* found a master ! */ 915 if (mle->master <= nodenum) 916 break; 917 /* if our master request has not reached the master 918 * yet, keep going until it does. this is how the 919 * master will know that asserts are needed back to 920 * the lower nodes. */ 921 mlog(0, "%s:%.*s: requests only up to %u but master " 922 "is %u, keep going\n", dlm->name, namelen, 923 lockid, nodenum, mle->master); 924 } 925 } 926 927 wait: 928 /* keep going until the response map includes all nodes */ 929 ret = dlm_wait_for_lock_mastery(dlm, res, mle, &blocked); 930 if (ret < 0) { 931 wait_on_recovery = 1; 932 mlog(0, "%s:%.*s: node map changed, redo the " 933 "master request now, blocked=%d\n", 934 dlm->name, res->lockname.len, 935 res->lockname.name, blocked); 936 if (++tries > 20) { 937 mlog(ML_ERROR, "%s:%.*s: spinning on " 938 "dlm_wait_for_lock_mastery, blocked=%d\n", 939 dlm->name, res->lockname.len, 940 res->lockname.name, blocked); 941 dlm_print_one_lock_resource(res); 942 dlm_print_one_mle(mle); 943 tries = 0; 944 } 945 goto redo_request; 946 } 947 948 mlog(0, "lockres mastered by %u\n", res->owner); 949 /* make sure we never continue without this */ 950 BUG_ON(res->owner == O2NM_MAX_NODES); 951 952 /* master is known, detach if not already detached */ 953 dlm_mle_detach_hb_events(dlm, mle); 954 dlm_put_mle(mle); 955 /* put the extra ref */ 956 dlm_put_mle_inuse(mle); 957 958 wake_waiters: 959 spin_lock(&res->spinlock); 960 if (res->owner != dlm->node_num && drop_inflight_if_nonlocal) 961 dlm_lockres_drop_inflight_ref(dlm, res); 962 res->state &= ~DLM_LOCK_RES_IN_PROGRESS; 963 spin_unlock(&res->spinlock); 964 wake_up(&res->wq); 965 966 leave: 967 /* need to free the unused mle */ 968 if (alloc_mle) 969 kmem_cache_free(dlm_mle_cache, alloc_mle); 970 971 return res; 972 } 973 974 975 #define DLM_MASTERY_TIMEOUT_MS 5000 976 977 static int dlm_wait_for_lock_mastery(struct dlm_ctxt *dlm, 978 struct dlm_lock_resource *res, 979 struct dlm_master_list_entry *mle, 980 int *blocked) 981 { 982 u8 m; 983 int ret, bit; 984 int map_changed, voting_done; 985 int assert, sleep; 986 987 recheck: 988 ret = 0; 989 assert = 0; 990 991 /* check if another node has already become the owner */ 992 spin_lock(&res->spinlock); 993 if (res->owner != DLM_LOCK_RES_OWNER_UNKNOWN) { 994 mlog(0, "%s:%.*s: owner is suddenly %u\n", dlm->name, 995 res->lockname.len, res->lockname.name, res->owner); 996 spin_unlock(&res->spinlock); 997 /* this will cause the master to re-assert across 998 * the whole cluster, freeing up mles */ 999 if (res->owner != dlm->node_num) { 1000 ret = dlm_do_master_request(res, mle, res->owner); 1001 if (ret < 0) { 1002 /* give recovery a chance to run */ 1003 mlog(ML_ERROR, "link to %u went down?: %d\n", res->owner, ret); 1004 msleep(500); 1005 goto recheck; 1006 } 1007 } 1008 ret = 0; 1009 goto leave; 1010 } 1011 spin_unlock(&res->spinlock); 1012 1013 spin_lock(&mle->spinlock); 1014 m = mle->master; 1015 map_changed = (memcmp(mle->vote_map, mle->node_map, 1016 sizeof(mle->vote_map)) != 0); 1017 voting_done = (memcmp(mle->vote_map, mle->response_map, 1018 sizeof(mle->vote_map)) == 0); 1019 1020 /* restart if we hit any errors */ 1021 if (map_changed) { 1022 int b; 1023 mlog(0, "%s: %.*s: node map changed, restarting\n", 1024 dlm->name, res->lockname.len, res->lockname.name); 1025 ret = dlm_restart_lock_mastery(dlm, res, mle, *blocked); 1026 b = (mle->type == DLM_MLE_BLOCK); 1027 if ((*blocked && !b) || (!*blocked && b)) { 1028 mlog(0, "%s:%.*s: status change: old=%d new=%d\n", 1029 dlm->name, res->lockname.len, res->lockname.name, 1030 *blocked, b); 1031 *blocked = b; 1032 } 1033 spin_unlock(&mle->spinlock); 1034 if (ret < 0) { 1035 mlog_errno(ret); 1036 goto leave; 1037 } 1038 mlog(0, "%s:%.*s: restart lock mastery succeeded, " 1039 "rechecking now\n", dlm->name, res->lockname.len, 1040 res->lockname.name); 1041 goto recheck; 1042 } else { 1043 if (!voting_done) { 1044 mlog(0, "map not changed and voting not done " 1045 "for %s:%.*s\n", dlm->name, res->lockname.len, 1046 res->lockname.name); 1047 } 1048 } 1049 1050 if (m != O2NM_MAX_NODES) { 1051 /* another node has done an assert! 1052 * all done! */ 1053 sleep = 0; 1054 } else { 1055 sleep = 1; 1056 /* have all nodes responded? */ 1057 if (voting_done && !*blocked) { 1058 bit = find_next_bit(mle->maybe_map, O2NM_MAX_NODES, 0); 1059 if (dlm->node_num <= bit) { 1060 /* my node number is lowest. 1061 * now tell other nodes that I am 1062 * mastering this. */ 1063 mle->master = dlm->node_num; 1064 /* ref was grabbed in get_lock_resource 1065 * will be dropped in dlmlock_master */ 1066 assert = 1; 1067 sleep = 0; 1068 } 1069 /* if voting is done, but we have not received 1070 * an assert master yet, we must sleep */ 1071 } 1072 } 1073 1074 spin_unlock(&mle->spinlock); 1075 1076 /* sleep if we haven't finished voting yet */ 1077 if (sleep) { 1078 unsigned long timeo = msecs_to_jiffies(DLM_MASTERY_TIMEOUT_MS); 1079 1080 /* 1081 if (atomic_read(&mle->mle_refs.refcount) < 2) 1082 mlog(ML_ERROR, "mle (%p) refs=%d, name=%.*s\n", mle, 1083 atomic_read(&mle->mle_refs.refcount), 1084 res->lockname.len, res->lockname.name); 1085 */ 1086 atomic_set(&mle->woken, 0); 1087 (void)wait_event_timeout(mle->wq, 1088 (atomic_read(&mle->woken) == 1), 1089 timeo); 1090 if (res->owner == O2NM_MAX_NODES) { 1091 mlog(0, "%s:%.*s: waiting again\n", dlm->name, 1092 res->lockname.len, res->lockname.name); 1093 goto recheck; 1094 } 1095 mlog(0, "done waiting, master is %u\n", res->owner); 1096 ret = 0; 1097 goto leave; 1098 } 1099 1100 ret = 0; /* done */ 1101 if (assert) { 1102 m = dlm->node_num; 1103 mlog(0, "about to master %.*s here, this=%u\n", 1104 res->lockname.len, res->lockname.name, m); 1105 ret = dlm_do_assert_master(dlm, res, mle->vote_map, 0); 1106 if (ret) { 1107 /* This is a failure in the network path, 1108 * not in the response to the assert_master 1109 * (any nonzero response is a BUG on this node). 1110 * Most likely a socket just got disconnected 1111 * due to node death. */ 1112 mlog_errno(ret); 1113 } 1114 /* no longer need to restart lock mastery. 1115 * all living nodes have been contacted. */ 1116 ret = 0; 1117 } 1118 1119 /* set the lockres owner */ 1120 spin_lock(&res->spinlock); 1121 /* mastery reference obtained either during 1122 * assert_master_handler or in get_lock_resource */ 1123 dlm_change_lockres_owner(dlm, res, m); 1124 spin_unlock(&res->spinlock); 1125 1126 leave: 1127 return ret; 1128 } 1129 1130 struct dlm_bitmap_diff_iter 1131 { 1132 int curnode; 1133 unsigned long *orig_bm; 1134 unsigned long *cur_bm; 1135 unsigned long diff_bm[BITS_TO_LONGS(O2NM_MAX_NODES)]; 1136 }; 1137 1138 enum dlm_node_state_change 1139 { 1140 NODE_DOWN = -1, 1141 NODE_NO_CHANGE = 0, 1142 NODE_UP 1143 }; 1144 1145 static void dlm_bitmap_diff_iter_init(struct dlm_bitmap_diff_iter *iter, 1146 unsigned long *orig_bm, 1147 unsigned long *cur_bm) 1148 { 1149 unsigned long p1, p2; 1150 int i; 1151 1152 iter->curnode = -1; 1153 iter->orig_bm = orig_bm; 1154 iter->cur_bm = cur_bm; 1155 1156 for (i = 0; i < BITS_TO_LONGS(O2NM_MAX_NODES); i++) { 1157 p1 = *(iter->orig_bm + i); 1158 p2 = *(iter->cur_bm + i); 1159 iter->diff_bm[i] = (p1 & ~p2) | (p2 & ~p1); 1160 } 1161 } 1162 1163 static int dlm_bitmap_diff_iter_next(struct dlm_bitmap_diff_iter *iter, 1164 enum dlm_node_state_change *state) 1165 { 1166 int bit; 1167 1168 if (iter->curnode >= O2NM_MAX_NODES) 1169 return -ENOENT; 1170 1171 bit = find_next_bit(iter->diff_bm, O2NM_MAX_NODES, 1172 iter->curnode+1); 1173 if (bit >= O2NM_MAX_NODES) { 1174 iter->curnode = O2NM_MAX_NODES; 1175 return -ENOENT; 1176 } 1177 1178 /* if it was there in the original then this node died */ 1179 if (test_bit(bit, iter->orig_bm)) 1180 *state = NODE_DOWN; 1181 else 1182 *state = NODE_UP; 1183 1184 iter->curnode = bit; 1185 return bit; 1186 } 1187 1188 1189 static int dlm_restart_lock_mastery(struct dlm_ctxt *dlm, 1190 struct dlm_lock_resource *res, 1191 struct dlm_master_list_entry *mle, 1192 int blocked) 1193 { 1194 struct dlm_bitmap_diff_iter bdi; 1195 enum dlm_node_state_change sc; 1196 int node; 1197 int ret = 0; 1198 1199 mlog(0, "something happened such that the " 1200 "master process may need to be restarted!\n"); 1201 1202 assert_spin_locked(&mle->spinlock); 1203 1204 dlm_bitmap_diff_iter_init(&bdi, mle->vote_map, mle->node_map); 1205 node = dlm_bitmap_diff_iter_next(&bdi, &sc); 1206 while (node >= 0) { 1207 if (sc == NODE_UP) { 1208 /* a node came up. clear any old vote from 1209 * the response map and set it in the vote map 1210 * then restart the mastery. */ 1211 mlog(ML_NOTICE, "node %d up while restarting\n", node); 1212 1213 /* redo the master request, but only for the new node */ 1214 mlog(0, "sending request to new node\n"); 1215 clear_bit(node, mle->response_map); 1216 set_bit(node, mle->vote_map); 1217 } else { 1218 mlog(ML_ERROR, "node down! %d\n", node); 1219 if (blocked) { 1220 int lowest = find_next_bit(mle->maybe_map, 1221 O2NM_MAX_NODES, 0); 1222 1223 /* act like it was never there */ 1224 clear_bit(node, mle->maybe_map); 1225 1226 if (node == lowest) { 1227 mlog(0, "expected master %u died" 1228 " while this node was blocked " 1229 "waiting on it!\n", node); 1230 lowest = find_next_bit(mle->maybe_map, 1231 O2NM_MAX_NODES, 1232 lowest+1); 1233 if (lowest < O2NM_MAX_NODES) { 1234 mlog(0, "%s:%.*s:still " 1235 "blocked. waiting on %u " 1236 "now\n", dlm->name, 1237 res->lockname.len, 1238 res->lockname.name, 1239 lowest); 1240 } else { 1241 /* mle is an MLE_BLOCK, but 1242 * there is now nothing left to 1243 * block on. we need to return 1244 * all the way back out and try 1245 * again with an MLE_MASTER. 1246 * dlm_do_local_recovery_cleanup 1247 * has already run, so the mle 1248 * refcount is ok */ 1249 mlog(0, "%s:%.*s: no " 1250 "longer blocking. try to " 1251 "master this here\n", 1252 dlm->name, 1253 res->lockname.len, 1254 res->lockname.name); 1255 mle->type = DLM_MLE_MASTER; 1256 mle->u.res = res; 1257 } 1258 } 1259 } 1260 1261 /* now blank out everything, as if we had never 1262 * contacted anyone */ 1263 memset(mle->maybe_map, 0, sizeof(mle->maybe_map)); 1264 memset(mle->response_map, 0, sizeof(mle->response_map)); 1265 /* reset the vote_map to the current node_map */ 1266 memcpy(mle->vote_map, mle->node_map, 1267 sizeof(mle->node_map)); 1268 /* put myself into the maybe map */ 1269 if (mle->type != DLM_MLE_BLOCK) 1270 set_bit(dlm->node_num, mle->maybe_map); 1271 } 1272 ret = -EAGAIN; 1273 node = dlm_bitmap_diff_iter_next(&bdi, &sc); 1274 } 1275 return ret; 1276 } 1277 1278 1279 /* 1280 * DLM_MASTER_REQUEST_MSG 1281 * 1282 * returns: 0 on success, 1283 * -errno on a network error 1284 * 1285 * on error, the caller should assume the target node is "dead" 1286 * 1287 */ 1288 1289 static int dlm_do_master_request(struct dlm_lock_resource *res, 1290 struct dlm_master_list_entry *mle, int to) 1291 { 1292 struct dlm_ctxt *dlm = mle->dlm; 1293 struct dlm_master_request request; 1294 int ret, response=0, resend; 1295 1296 memset(&request, 0, sizeof(request)); 1297 request.node_idx = dlm->node_num; 1298 1299 BUG_ON(mle->type == DLM_MLE_MIGRATION); 1300 1301 if (mle->type != DLM_MLE_MASTER) { 1302 request.namelen = mle->u.name.len; 1303 memcpy(request.name, mle->u.name.name, request.namelen); 1304 } else { 1305 request.namelen = mle->u.res->lockname.len; 1306 memcpy(request.name, mle->u.res->lockname.name, 1307 request.namelen); 1308 } 1309 1310 again: 1311 ret = o2net_send_message(DLM_MASTER_REQUEST_MSG, dlm->key, &request, 1312 sizeof(request), to, &response); 1313 if (ret < 0) { 1314 if (ret == -ESRCH) { 1315 /* should never happen */ 1316 mlog(ML_ERROR, "TCP stack not ready!\n"); 1317 BUG(); 1318 } else if (ret == -EINVAL) { 1319 mlog(ML_ERROR, "bad args passed to o2net!\n"); 1320 BUG(); 1321 } else if (ret == -ENOMEM) { 1322 mlog(ML_ERROR, "out of memory while trying to send " 1323 "network message! retrying\n"); 1324 /* this is totally crude */ 1325 msleep(50); 1326 goto again; 1327 } else if (!dlm_is_host_down(ret)) { 1328 /* not a network error. bad. */ 1329 mlog_errno(ret); 1330 mlog(ML_ERROR, "unhandled error!"); 1331 BUG(); 1332 } 1333 /* all other errors should be network errors, 1334 * and likely indicate node death */ 1335 mlog(ML_ERROR, "link to %d went down!\n", to); 1336 goto out; 1337 } 1338 1339 ret = 0; 1340 resend = 0; 1341 spin_lock(&mle->spinlock); 1342 switch (response) { 1343 case DLM_MASTER_RESP_YES: 1344 set_bit(to, mle->response_map); 1345 mlog(0, "node %u is the master, response=YES\n", to); 1346 mlog(0, "%s:%.*s: master node %u now knows I have a " 1347 "reference\n", dlm->name, res->lockname.len, 1348 res->lockname.name, to); 1349 mle->master = to; 1350 break; 1351 case DLM_MASTER_RESP_NO: 1352 mlog(0, "node %u not master, response=NO\n", to); 1353 set_bit(to, mle->response_map); 1354 break; 1355 case DLM_MASTER_RESP_MAYBE: 1356 mlog(0, "node %u not master, response=MAYBE\n", to); 1357 set_bit(to, mle->response_map); 1358 set_bit(to, mle->maybe_map); 1359 break; 1360 case DLM_MASTER_RESP_ERROR: 1361 mlog(0, "node %u hit an error, resending\n", to); 1362 resend = 1; 1363 response = 0; 1364 break; 1365 default: 1366 mlog(ML_ERROR, "bad response! %u\n", response); 1367 BUG(); 1368 } 1369 spin_unlock(&mle->spinlock); 1370 if (resend) { 1371 /* this is also totally crude */ 1372 msleep(50); 1373 goto again; 1374 } 1375 1376 out: 1377 return ret; 1378 } 1379 1380 /* 1381 * locks that can be taken here: 1382 * dlm->spinlock 1383 * res->spinlock 1384 * mle->spinlock 1385 * dlm->master_list 1386 * 1387 * if possible, TRIM THIS DOWN!!! 1388 */ 1389 int dlm_master_request_handler(struct o2net_msg *msg, u32 len, void *data, 1390 void **ret_data) 1391 { 1392 u8 response = DLM_MASTER_RESP_MAYBE; 1393 struct dlm_ctxt *dlm = data; 1394 struct dlm_lock_resource *res = NULL; 1395 struct dlm_master_request *request = (struct dlm_master_request *) msg->buf; 1396 struct dlm_master_list_entry *mle = NULL, *tmpmle = NULL; 1397 char *name; 1398 unsigned int namelen, hash; 1399 int found, ret; 1400 int set_maybe; 1401 int dispatch_assert = 0; 1402 1403 if (!dlm_grab(dlm)) 1404 return DLM_MASTER_RESP_NO; 1405 1406 if (!dlm_domain_fully_joined(dlm)) { 1407 response = DLM_MASTER_RESP_NO; 1408 goto send_response; 1409 } 1410 1411 name = request->name; 1412 namelen = request->namelen; 1413 hash = dlm_lockid_hash(name, namelen); 1414 1415 if (namelen > DLM_LOCKID_NAME_MAX) { 1416 response = DLM_IVBUFLEN; 1417 goto send_response; 1418 } 1419 1420 way_up_top: 1421 spin_lock(&dlm->spinlock); 1422 res = __dlm_lookup_lockres(dlm, name, namelen, hash); 1423 if (res) { 1424 spin_unlock(&dlm->spinlock); 1425 1426 /* take care of the easy cases up front */ 1427 spin_lock(&res->spinlock); 1428 if (res->state & (DLM_LOCK_RES_RECOVERING| 1429 DLM_LOCK_RES_MIGRATING)) { 1430 spin_unlock(&res->spinlock); 1431 mlog(0, "returning DLM_MASTER_RESP_ERROR since res is " 1432 "being recovered/migrated\n"); 1433 response = DLM_MASTER_RESP_ERROR; 1434 if (mle) 1435 kmem_cache_free(dlm_mle_cache, mle); 1436 goto send_response; 1437 } 1438 1439 if (res->owner == dlm->node_num) { 1440 mlog(0, "%s:%.*s: setting bit %u in refmap\n", 1441 dlm->name, namelen, name, request->node_idx); 1442 dlm_lockres_set_refmap_bit(request->node_idx, res); 1443 spin_unlock(&res->spinlock); 1444 response = DLM_MASTER_RESP_YES; 1445 if (mle) 1446 kmem_cache_free(dlm_mle_cache, mle); 1447 1448 /* this node is the owner. 1449 * there is some extra work that needs to 1450 * happen now. the requesting node has 1451 * caused all nodes up to this one to 1452 * create mles. this node now needs to 1453 * go back and clean those up. */ 1454 dispatch_assert = 1; 1455 goto send_response; 1456 } else if (res->owner != DLM_LOCK_RES_OWNER_UNKNOWN) { 1457 spin_unlock(&res->spinlock); 1458 // mlog(0, "node %u is the master\n", res->owner); 1459 response = DLM_MASTER_RESP_NO; 1460 if (mle) 1461 kmem_cache_free(dlm_mle_cache, mle); 1462 goto send_response; 1463 } 1464 1465 /* ok, there is no owner. either this node is 1466 * being blocked, or it is actively trying to 1467 * master this lock. */ 1468 if (!(res->state & DLM_LOCK_RES_IN_PROGRESS)) { 1469 mlog(ML_ERROR, "lock with no owner should be " 1470 "in-progress!\n"); 1471 BUG(); 1472 } 1473 1474 // mlog(0, "lockres is in progress...\n"); 1475 spin_lock(&dlm->master_lock); 1476 found = dlm_find_mle(dlm, &tmpmle, name, namelen); 1477 if (!found) { 1478 mlog(ML_ERROR, "no mle found for this lock!\n"); 1479 BUG(); 1480 } 1481 set_maybe = 1; 1482 spin_lock(&tmpmle->spinlock); 1483 if (tmpmle->type == DLM_MLE_BLOCK) { 1484 // mlog(0, "this node is waiting for " 1485 // "lockres to be mastered\n"); 1486 response = DLM_MASTER_RESP_NO; 1487 } else if (tmpmle->type == DLM_MLE_MIGRATION) { 1488 mlog(0, "node %u is master, but trying to migrate to " 1489 "node %u.\n", tmpmle->master, tmpmle->new_master); 1490 if (tmpmle->master == dlm->node_num) { 1491 mlog(ML_ERROR, "no owner on lockres, but this " 1492 "node is trying to migrate it to %u?!\n", 1493 tmpmle->new_master); 1494 BUG(); 1495 } else { 1496 /* the real master can respond on its own */ 1497 response = DLM_MASTER_RESP_NO; 1498 } 1499 } else if (tmpmle->master != DLM_LOCK_RES_OWNER_UNKNOWN) { 1500 set_maybe = 0; 1501 if (tmpmle->master == dlm->node_num) { 1502 response = DLM_MASTER_RESP_YES; 1503 /* this node will be the owner. 1504 * go back and clean the mles on any 1505 * other nodes */ 1506 dispatch_assert = 1; 1507 dlm_lockres_set_refmap_bit(request->node_idx, res); 1508 mlog(0, "%s:%.*s: setting bit %u in refmap\n", 1509 dlm->name, namelen, name, 1510 request->node_idx); 1511 } else 1512 response = DLM_MASTER_RESP_NO; 1513 } else { 1514 // mlog(0, "this node is attempting to " 1515 // "master lockres\n"); 1516 response = DLM_MASTER_RESP_MAYBE; 1517 } 1518 if (set_maybe) 1519 set_bit(request->node_idx, tmpmle->maybe_map); 1520 spin_unlock(&tmpmle->spinlock); 1521 1522 spin_unlock(&dlm->master_lock); 1523 spin_unlock(&res->spinlock); 1524 1525 /* keep the mle attached to heartbeat events */ 1526 dlm_put_mle(tmpmle); 1527 if (mle) 1528 kmem_cache_free(dlm_mle_cache, mle); 1529 goto send_response; 1530 } 1531 1532 /* 1533 * lockres doesn't exist on this node 1534 * if there is an MLE_BLOCK, return NO 1535 * if there is an MLE_MASTER, return MAYBE 1536 * otherwise, add an MLE_BLOCK, return NO 1537 */ 1538 spin_lock(&dlm->master_lock); 1539 found = dlm_find_mle(dlm, &tmpmle, name, namelen); 1540 if (!found) { 1541 /* this lockid has never been seen on this node yet */ 1542 // mlog(0, "no mle found\n"); 1543 if (!mle) { 1544 spin_unlock(&dlm->master_lock); 1545 spin_unlock(&dlm->spinlock); 1546 1547 mle = (struct dlm_master_list_entry *) 1548 kmem_cache_alloc(dlm_mle_cache, GFP_NOFS); 1549 if (!mle) { 1550 response = DLM_MASTER_RESP_ERROR; 1551 mlog_errno(-ENOMEM); 1552 goto send_response; 1553 } 1554 goto way_up_top; 1555 } 1556 1557 // mlog(0, "this is second time thru, already allocated, " 1558 // "add the block.\n"); 1559 dlm_init_mle(mle, DLM_MLE_BLOCK, dlm, NULL, name, namelen); 1560 set_bit(request->node_idx, mle->maybe_map); 1561 list_add(&mle->list, &dlm->master_list); 1562 response = DLM_MASTER_RESP_NO; 1563 } else { 1564 // mlog(0, "mle was found\n"); 1565 set_maybe = 1; 1566 spin_lock(&tmpmle->spinlock); 1567 if (tmpmle->master == dlm->node_num) { 1568 mlog(ML_ERROR, "no lockres, but an mle with this node as master!\n"); 1569 BUG(); 1570 } 1571 if (tmpmle->type == DLM_MLE_BLOCK) 1572 response = DLM_MASTER_RESP_NO; 1573 else if (tmpmle->type == DLM_MLE_MIGRATION) { 1574 mlog(0, "migration mle was found (%u->%u)\n", 1575 tmpmle->master, tmpmle->new_master); 1576 /* real master can respond on its own */ 1577 response = DLM_MASTER_RESP_NO; 1578 } else 1579 response = DLM_MASTER_RESP_MAYBE; 1580 if (set_maybe) 1581 set_bit(request->node_idx, tmpmle->maybe_map); 1582 spin_unlock(&tmpmle->spinlock); 1583 } 1584 spin_unlock(&dlm->master_lock); 1585 spin_unlock(&dlm->spinlock); 1586 1587 if (found) { 1588 /* keep the mle attached to heartbeat events */ 1589 dlm_put_mle(tmpmle); 1590 } 1591 send_response: 1592 /* 1593 * __dlm_lookup_lockres() grabbed a reference to this lockres. 1594 * The reference is released by dlm_assert_master_worker() under 1595 * the call to dlm_dispatch_assert_master(). If 1596 * dlm_assert_master_worker() isn't called, we drop it here. 1597 */ 1598 if (dispatch_assert) { 1599 if (response != DLM_MASTER_RESP_YES) 1600 mlog(ML_ERROR, "invalid response %d\n", response); 1601 if (!res) { 1602 mlog(ML_ERROR, "bad lockres while trying to assert!\n"); 1603 BUG(); 1604 } 1605 mlog(0, "%u is the owner of %.*s, cleaning everyone else\n", 1606 dlm->node_num, res->lockname.len, res->lockname.name); 1607 ret = dlm_dispatch_assert_master(dlm, res, 0, request->node_idx, 1608 DLM_ASSERT_MASTER_MLE_CLEANUP); 1609 if (ret < 0) { 1610 mlog(ML_ERROR, "failed to dispatch assert master work\n"); 1611 response = DLM_MASTER_RESP_ERROR; 1612 dlm_lockres_put(res); 1613 } 1614 } else { 1615 if (res) 1616 dlm_lockres_put(res); 1617 } 1618 1619 dlm_put(dlm); 1620 return response; 1621 } 1622 1623 /* 1624 * DLM_ASSERT_MASTER_MSG 1625 */ 1626 1627 1628 /* 1629 * NOTE: this can be used for debugging 1630 * can periodically run all locks owned by this node 1631 * and re-assert across the cluster... 1632 */ 1633 static int dlm_do_assert_master(struct dlm_ctxt *dlm, 1634 struct dlm_lock_resource *res, 1635 void *nodemap, u32 flags) 1636 { 1637 struct dlm_assert_master assert; 1638 int to, tmpret; 1639 struct dlm_node_iter iter; 1640 int ret = 0; 1641 int reassert; 1642 const char *lockname = res->lockname.name; 1643 unsigned int namelen = res->lockname.len; 1644 1645 BUG_ON(namelen > O2NM_MAX_NAME_LEN); 1646 1647 spin_lock(&res->spinlock); 1648 res->state |= DLM_LOCK_RES_SETREF_INPROG; 1649 spin_unlock(&res->spinlock); 1650 1651 again: 1652 reassert = 0; 1653 1654 /* note that if this nodemap is empty, it returns 0 */ 1655 dlm_node_iter_init(nodemap, &iter); 1656 while ((to = dlm_node_iter_next(&iter)) >= 0) { 1657 int r = 0; 1658 struct dlm_master_list_entry *mle = NULL; 1659 1660 mlog(0, "sending assert master to %d (%.*s)\n", to, 1661 namelen, lockname); 1662 memset(&assert, 0, sizeof(assert)); 1663 assert.node_idx = dlm->node_num; 1664 assert.namelen = namelen; 1665 memcpy(assert.name, lockname, namelen); 1666 assert.flags = cpu_to_be32(flags); 1667 1668 tmpret = o2net_send_message(DLM_ASSERT_MASTER_MSG, dlm->key, 1669 &assert, sizeof(assert), to, &r); 1670 if (tmpret < 0) { 1671 mlog(0, "assert_master returned %d!\n", tmpret); 1672 if (!dlm_is_host_down(tmpret)) { 1673 mlog(ML_ERROR, "unhandled error=%d!\n", tmpret); 1674 BUG(); 1675 } 1676 /* a node died. finish out the rest of the nodes. */ 1677 mlog(0, "link to %d went down!\n", to); 1678 /* any nonzero status return will do */ 1679 ret = tmpret; 1680 r = 0; 1681 } else if (r < 0) { 1682 /* ok, something horribly messed. kill thyself. */ 1683 mlog(ML_ERROR,"during assert master of %.*s to %u, " 1684 "got %d.\n", namelen, lockname, to, r); 1685 spin_lock(&dlm->spinlock); 1686 spin_lock(&dlm->master_lock); 1687 if (dlm_find_mle(dlm, &mle, (char *)lockname, 1688 namelen)) { 1689 dlm_print_one_mle(mle); 1690 __dlm_put_mle(mle); 1691 } 1692 spin_unlock(&dlm->master_lock); 1693 spin_unlock(&dlm->spinlock); 1694 BUG(); 1695 } 1696 1697 if (r & DLM_ASSERT_RESPONSE_REASSERT && 1698 !(r & DLM_ASSERT_RESPONSE_MASTERY_REF)) { 1699 mlog(ML_ERROR, "%.*s: very strange, " 1700 "master MLE but no lockres on %u\n", 1701 namelen, lockname, to); 1702 } 1703 1704 if (r & DLM_ASSERT_RESPONSE_REASSERT) { 1705 mlog(0, "%.*s: node %u create mles on other " 1706 "nodes and requests a re-assert\n", 1707 namelen, lockname, to); 1708 reassert = 1; 1709 } 1710 if (r & DLM_ASSERT_RESPONSE_MASTERY_REF) { 1711 mlog(0, "%.*s: node %u has a reference to this " 1712 "lockres, set the bit in the refmap\n", 1713 namelen, lockname, to); 1714 spin_lock(&res->spinlock); 1715 dlm_lockres_set_refmap_bit(to, res); 1716 spin_unlock(&res->spinlock); 1717 } 1718 } 1719 1720 if (reassert) 1721 goto again; 1722 1723 spin_lock(&res->spinlock); 1724 res->state &= ~DLM_LOCK_RES_SETREF_INPROG; 1725 spin_unlock(&res->spinlock); 1726 wake_up(&res->wq); 1727 1728 return ret; 1729 } 1730 1731 /* 1732 * locks that can be taken here: 1733 * dlm->spinlock 1734 * res->spinlock 1735 * mle->spinlock 1736 * dlm->master_list 1737 * 1738 * if possible, TRIM THIS DOWN!!! 1739 */ 1740 int dlm_assert_master_handler(struct o2net_msg *msg, u32 len, void *data, 1741 void **ret_data) 1742 { 1743 struct dlm_ctxt *dlm = data; 1744 struct dlm_master_list_entry *mle = NULL; 1745 struct dlm_assert_master *assert = (struct dlm_assert_master *)msg->buf; 1746 struct dlm_lock_resource *res = NULL; 1747 char *name; 1748 unsigned int namelen, hash; 1749 u32 flags; 1750 int master_request = 0, have_lockres_ref = 0; 1751 int ret = 0; 1752 1753 if (!dlm_grab(dlm)) 1754 return 0; 1755 1756 name = assert->name; 1757 namelen = assert->namelen; 1758 hash = dlm_lockid_hash(name, namelen); 1759 flags = be32_to_cpu(assert->flags); 1760 1761 if (namelen > DLM_LOCKID_NAME_MAX) { 1762 mlog(ML_ERROR, "Invalid name length!"); 1763 goto done; 1764 } 1765 1766 spin_lock(&dlm->spinlock); 1767 1768 if (flags) 1769 mlog(0, "assert_master with flags: %u\n", flags); 1770 1771 /* find the MLE */ 1772 spin_lock(&dlm->master_lock); 1773 if (!dlm_find_mle(dlm, &mle, name, namelen)) { 1774 /* not an error, could be master just re-asserting */ 1775 mlog(0, "just got an assert_master from %u, but no " 1776 "MLE for it! (%.*s)\n", assert->node_idx, 1777 namelen, name); 1778 } else { 1779 int bit = find_next_bit (mle->maybe_map, O2NM_MAX_NODES, 0); 1780 if (bit >= O2NM_MAX_NODES) { 1781 /* not necessarily an error, though less likely. 1782 * could be master just re-asserting. */ 1783 mlog(0, "no bits set in the maybe_map, but %u " 1784 "is asserting! (%.*s)\n", assert->node_idx, 1785 namelen, name); 1786 } else if (bit != assert->node_idx) { 1787 if (flags & DLM_ASSERT_MASTER_MLE_CLEANUP) { 1788 mlog(0, "master %u was found, %u should " 1789 "back off\n", assert->node_idx, bit); 1790 } else { 1791 /* with the fix for bug 569, a higher node 1792 * number winning the mastery will respond 1793 * YES to mastery requests, but this node 1794 * had no way of knowing. let it pass. */ 1795 mlog(0, "%u is the lowest node, " 1796 "%u is asserting. (%.*s) %u must " 1797 "have begun after %u won.\n", bit, 1798 assert->node_idx, namelen, name, bit, 1799 assert->node_idx); 1800 } 1801 } 1802 if (mle->type == DLM_MLE_MIGRATION) { 1803 if (flags & DLM_ASSERT_MASTER_MLE_CLEANUP) { 1804 mlog(0, "%s:%.*s: got cleanup assert" 1805 " from %u for migration\n", 1806 dlm->name, namelen, name, 1807 assert->node_idx); 1808 } else if (!(flags & DLM_ASSERT_MASTER_FINISH_MIGRATION)) { 1809 mlog(0, "%s:%.*s: got unrelated assert" 1810 " from %u for migration, ignoring\n", 1811 dlm->name, namelen, name, 1812 assert->node_idx); 1813 __dlm_put_mle(mle); 1814 spin_unlock(&dlm->master_lock); 1815 spin_unlock(&dlm->spinlock); 1816 goto done; 1817 } 1818 } 1819 } 1820 spin_unlock(&dlm->master_lock); 1821 1822 /* ok everything checks out with the MLE 1823 * now check to see if there is a lockres */ 1824 res = __dlm_lookup_lockres(dlm, name, namelen, hash); 1825 if (res) { 1826 spin_lock(&res->spinlock); 1827 if (res->state & DLM_LOCK_RES_RECOVERING) { 1828 mlog(ML_ERROR, "%u asserting but %.*s is " 1829 "RECOVERING!\n", assert->node_idx, namelen, name); 1830 goto kill; 1831 } 1832 if (!mle) { 1833 if (res->owner != DLM_LOCK_RES_OWNER_UNKNOWN && 1834 res->owner != assert->node_idx) { 1835 mlog(ML_ERROR, "assert_master from " 1836 "%u, but current owner is " 1837 "%u! (%.*s)\n", 1838 assert->node_idx, res->owner, 1839 namelen, name); 1840 goto kill; 1841 } 1842 } else if (mle->type != DLM_MLE_MIGRATION) { 1843 if (res->owner != DLM_LOCK_RES_OWNER_UNKNOWN) { 1844 /* owner is just re-asserting */ 1845 if (res->owner == assert->node_idx) { 1846 mlog(0, "owner %u re-asserting on " 1847 "lock %.*s\n", assert->node_idx, 1848 namelen, name); 1849 goto ok; 1850 } 1851 mlog(ML_ERROR, "got assert_master from " 1852 "node %u, but %u is the owner! " 1853 "(%.*s)\n", assert->node_idx, 1854 res->owner, namelen, name); 1855 goto kill; 1856 } 1857 if (!(res->state & DLM_LOCK_RES_IN_PROGRESS)) { 1858 mlog(ML_ERROR, "got assert from %u, but lock " 1859 "with no owner should be " 1860 "in-progress! (%.*s)\n", 1861 assert->node_idx, 1862 namelen, name); 1863 goto kill; 1864 } 1865 } else /* mle->type == DLM_MLE_MIGRATION */ { 1866 /* should only be getting an assert from new master */ 1867 if (assert->node_idx != mle->new_master) { 1868 mlog(ML_ERROR, "got assert from %u, but " 1869 "new master is %u, and old master " 1870 "was %u (%.*s)\n", 1871 assert->node_idx, mle->new_master, 1872 mle->master, namelen, name); 1873 goto kill; 1874 } 1875 1876 } 1877 ok: 1878 spin_unlock(&res->spinlock); 1879 } 1880 spin_unlock(&dlm->spinlock); 1881 1882 // mlog(0, "woo! got an assert_master from node %u!\n", 1883 // assert->node_idx); 1884 if (mle) { 1885 int extra_ref = 0; 1886 int nn = -1; 1887 int rr, err = 0; 1888 1889 spin_lock(&mle->spinlock); 1890 if (mle->type == DLM_MLE_BLOCK || mle->type == DLM_MLE_MIGRATION) 1891 extra_ref = 1; 1892 else { 1893 /* MASTER mle: if any bits set in the response map 1894 * then the calling node needs to re-assert to clear 1895 * up nodes that this node contacted */ 1896 while ((nn = find_next_bit (mle->response_map, O2NM_MAX_NODES, 1897 nn+1)) < O2NM_MAX_NODES) { 1898 if (nn != dlm->node_num && nn != assert->node_idx) 1899 master_request = 1; 1900 } 1901 } 1902 mle->master = assert->node_idx; 1903 atomic_set(&mle->woken, 1); 1904 wake_up(&mle->wq); 1905 spin_unlock(&mle->spinlock); 1906 1907 if (res) { 1908 int wake = 0; 1909 spin_lock(&res->spinlock); 1910 if (mle->type == DLM_MLE_MIGRATION) { 1911 mlog(0, "finishing off migration of lockres %.*s, " 1912 "from %u to %u\n", 1913 res->lockname.len, res->lockname.name, 1914 dlm->node_num, mle->new_master); 1915 res->state &= ~DLM_LOCK_RES_MIGRATING; 1916 wake = 1; 1917 dlm_change_lockres_owner(dlm, res, mle->new_master); 1918 BUG_ON(res->state & DLM_LOCK_RES_DIRTY); 1919 } else { 1920 dlm_change_lockres_owner(dlm, res, mle->master); 1921 } 1922 spin_unlock(&res->spinlock); 1923 have_lockres_ref = 1; 1924 if (wake) 1925 wake_up(&res->wq); 1926 } 1927 1928 /* master is known, detach if not already detached. 1929 * ensures that only one assert_master call will happen 1930 * on this mle. */ 1931 spin_lock(&dlm->spinlock); 1932 spin_lock(&dlm->master_lock); 1933 1934 rr = atomic_read(&mle->mle_refs.refcount); 1935 if (mle->inuse > 0) { 1936 if (extra_ref && rr < 3) 1937 err = 1; 1938 else if (!extra_ref && rr < 2) 1939 err = 1; 1940 } else { 1941 if (extra_ref && rr < 2) 1942 err = 1; 1943 else if (!extra_ref && rr < 1) 1944 err = 1; 1945 } 1946 if (err) { 1947 mlog(ML_ERROR, "%s:%.*s: got assert master from %u " 1948 "that will mess up this node, refs=%d, extra=%d, " 1949 "inuse=%d\n", dlm->name, namelen, name, 1950 assert->node_idx, rr, extra_ref, mle->inuse); 1951 dlm_print_one_mle(mle); 1952 } 1953 list_del_init(&mle->list); 1954 __dlm_mle_detach_hb_events(dlm, mle); 1955 __dlm_put_mle(mle); 1956 if (extra_ref) { 1957 /* the assert master message now balances the extra 1958 * ref given by the master / migration request message. 1959 * if this is the last put, it will be removed 1960 * from the list. */ 1961 __dlm_put_mle(mle); 1962 } 1963 spin_unlock(&dlm->master_lock); 1964 spin_unlock(&dlm->spinlock); 1965 } else if (res) { 1966 if (res->owner != assert->node_idx) { 1967 mlog(0, "assert_master from %u, but current " 1968 "owner is %u (%.*s), no mle\n", assert->node_idx, 1969 res->owner, namelen, name); 1970 } 1971 } 1972 1973 done: 1974 ret = 0; 1975 if (res) { 1976 spin_lock(&res->spinlock); 1977 res->state |= DLM_LOCK_RES_SETREF_INPROG; 1978 spin_unlock(&res->spinlock); 1979 *ret_data = (void *)res; 1980 } 1981 dlm_put(dlm); 1982 if (master_request) { 1983 mlog(0, "need to tell master to reassert\n"); 1984 /* positive. negative would shoot down the node. */ 1985 ret |= DLM_ASSERT_RESPONSE_REASSERT; 1986 if (!have_lockres_ref) { 1987 mlog(ML_ERROR, "strange, got assert from %u, MASTER " 1988 "mle present here for %s:%.*s, but no lockres!\n", 1989 assert->node_idx, dlm->name, namelen, name); 1990 } 1991 } 1992 if (have_lockres_ref) { 1993 /* let the master know we have a reference to the lockres */ 1994 ret |= DLM_ASSERT_RESPONSE_MASTERY_REF; 1995 mlog(0, "%s:%.*s: got assert from %u, need a ref\n", 1996 dlm->name, namelen, name, assert->node_idx); 1997 } 1998 return ret; 1999 2000 kill: 2001 /* kill the caller! */ 2002 mlog(ML_ERROR, "Bad message received from another node. Dumping state " 2003 "and killing the other node now! This node is OK and can continue.\n"); 2004 __dlm_print_one_lock_resource(res); 2005 spin_unlock(&res->spinlock); 2006 spin_unlock(&dlm->spinlock); 2007 *ret_data = (void *)res; 2008 dlm_put(dlm); 2009 return -EINVAL; 2010 } 2011 2012 void dlm_assert_master_post_handler(int status, void *data, void *ret_data) 2013 { 2014 struct dlm_lock_resource *res = (struct dlm_lock_resource *)ret_data; 2015 2016 if (ret_data) { 2017 spin_lock(&res->spinlock); 2018 res->state &= ~DLM_LOCK_RES_SETREF_INPROG; 2019 spin_unlock(&res->spinlock); 2020 wake_up(&res->wq); 2021 dlm_lockres_put(res); 2022 } 2023 return; 2024 } 2025 2026 int dlm_dispatch_assert_master(struct dlm_ctxt *dlm, 2027 struct dlm_lock_resource *res, 2028 int ignore_higher, u8 request_from, u32 flags) 2029 { 2030 struct dlm_work_item *item; 2031 item = kzalloc(sizeof(*item), GFP_NOFS); 2032 if (!item) 2033 return -ENOMEM; 2034 2035 2036 /* queue up work for dlm_assert_master_worker */ 2037 dlm_grab(dlm); /* get an extra ref for the work item */ 2038 dlm_init_work_item(dlm, item, dlm_assert_master_worker, NULL); 2039 item->u.am.lockres = res; /* already have a ref */ 2040 /* can optionally ignore node numbers higher than this node */ 2041 item->u.am.ignore_higher = ignore_higher; 2042 item->u.am.request_from = request_from; 2043 item->u.am.flags = flags; 2044 2045 if (ignore_higher) 2046 mlog(0, "IGNORE HIGHER: %.*s\n", res->lockname.len, 2047 res->lockname.name); 2048 2049 spin_lock(&dlm->work_lock); 2050 list_add_tail(&item->list, &dlm->work_list); 2051 spin_unlock(&dlm->work_lock); 2052 2053 queue_work(dlm->dlm_worker, &dlm->dispatched_work); 2054 return 0; 2055 } 2056 2057 static void dlm_assert_master_worker(struct dlm_work_item *item, void *data) 2058 { 2059 struct dlm_ctxt *dlm = data; 2060 int ret = 0; 2061 struct dlm_lock_resource *res; 2062 unsigned long nodemap[BITS_TO_LONGS(O2NM_MAX_NODES)]; 2063 int ignore_higher; 2064 int bit; 2065 u8 request_from; 2066 u32 flags; 2067 2068 dlm = item->dlm; 2069 res = item->u.am.lockres; 2070 ignore_higher = item->u.am.ignore_higher; 2071 request_from = item->u.am.request_from; 2072 flags = item->u.am.flags; 2073 2074 spin_lock(&dlm->spinlock); 2075 memcpy(nodemap, dlm->domain_map, sizeof(nodemap)); 2076 spin_unlock(&dlm->spinlock); 2077 2078 clear_bit(dlm->node_num, nodemap); 2079 if (ignore_higher) { 2080 /* if is this just to clear up mles for nodes below 2081 * this node, do not send the message to the original 2082 * caller or any node number higher than this */ 2083 clear_bit(request_from, nodemap); 2084 bit = dlm->node_num; 2085 while (1) { 2086 bit = find_next_bit(nodemap, O2NM_MAX_NODES, 2087 bit+1); 2088 if (bit >= O2NM_MAX_NODES) 2089 break; 2090 clear_bit(bit, nodemap); 2091 } 2092 } 2093 2094 /* 2095 * If we're migrating this lock to someone else, we are no 2096 * longer allowed to assert out own mastery. OTOH, we need to 2097 * prevent migration from starting while we're still asserting 2098 * our dominance. The reserved ast delays migration. 2099 */ 2100 spin_lock(&res->spinlock); 2101 if (res->state & DLM_LOCK_RES_MIGRATING) { 2102 mlog(0, "Someone asked us to assert mastery, but we're " 2103 "in the middle of migration. Skipping assert, " 2104 "the new master will handle that.\n"); 2105 spin_unlock(&res->spinlock); 2106 goto put; 2107 } else 2108 __dlm_lockres_reserve_ast(res); 2109 spin_unlock(&res->spinlock); 2110 2111 /* this call now finishes out the nodemap 2112 * even if one or more nodes die */ 2113 mlog(0, "worker about to master %.*s here, this=%u\n", 2114 res->lockname.len, res->lockname.name, dlm->node_num); 2115 ret = dlm_do_assert_master(dlm, res, nodemap, flags); 2116 if (ret < 0) { 2117 /* no need to restart, we are done */ 2118 if (!dlm_is_host_down(ret)) 2119 mlog_errno(ret); 2120 } 2121 2122 /* Ok, we've asserted ourselves. Let's let migration start. */ 2123 dlm_lockres_release_ast(dlm, res); 2124 2125 put: 2126 dlm_lockres_put(res); 2127 2128 mlog(0, "finished with dlm_assert_master_worker\n"); 2129 } 2130 2131 /* SPECIAL CASE for the $RECOVERY lock used by the recovery thread. 2132 * We cannot wait for node recovery to complete to begin mastering this 2133 * lockres because this lockres is used to kick off recovery! ;-) 2134 * So, do a pre-check on all living nodes to see if any of those nodes 2135 * think that $RECOVERY is currently mastered by a dead node. If so, 2136 * we wait a short time to allow that node to get notified by its own 2137 * heartbeat stack, then check again. All $RECOVERY lock resources 2138 * mastered by dead nodes are purged when the hearbeat callback is 2139 * fired, so we can know for sure that it is safe to continue once 2140 * the node returns a live node or no node. */ 2141 static int dlm_pre_master_reco_lockres(struct dlm_ctxt *dlm, 2142 struct dlm_lock_resource *res) 2143 { 2144 struct dlm_node_iter iter; 2145 int nodenum; 2146 int ret = 0; 2147 u8 master = DLM_LOCK_RES_OWNER_UNKNOWN; 2148 2149 spin_lock(&dlm->spinlock); 2150 dlm_node_iter_init(dlm->domain_map, &iter); 2151 spin_unlock(&dlm->spinlock); 2152 2153 while ((nodenum = dlm_node_iter_next(&iter)) >= 0) { 2154 /* do not send to self */ 2155 if (nodenum == dlm->node_num) 2156 continue; 2157 ret = dlm_do_master_requery(dlm, res, nodenum, &master); 2158 if (ret < 0) { 2159 mlog_errno(ret); 2160 if (!dlm_is_host_down(ret)) 2161 BUG(); 2162 /* host is down, so answer for that node would be 2163 * DLM_LOCK_RES_OWNER_UNKNOWN. continue. */ 2164 ret = 0; 2165 } 2166 2167 if (master != DLM_LOCK_RES_OWNER_UNKNOWN) { 2168 /* check to see if this master is in the recovery map */ 2169 spin_lock(&dlm->spinlock); 2170 if (test_bit(master, dlm->recovery_map)) { 2171 mlog(ML_NOTICE, "%s: node %u has not seen " 2172 "node %u go down yet, and thinks the " 2173 "dead node is mastering the recovery " 2174 "lock. must wait.\n", dlm->name, 2175 nodenum, master); 2176 ret = -EAGAIN; 2177 } 2178 spin_unlock(&dlm->spinlock); 2179 mlog(0, "%s: reco lock master is %u\n", dlm->name, 2180 master); 2181 break; 2182 } 2183 } 2184 return ret; 2185 } 2186 2187 /* 2188 * DLM_DEREF_LOCKRES_MSG 2189 */ 2190 2191 int dlm_drop_lockres_ref(struct dlm_ctxt *dlm, struct dlm_lock_resource *res) 2192 { 2193 struct dlm_deref_lockres deref; 2194 int ret = 0, r; 2195 const char *lockname; 2196 unsigned int namelen; 2197 2198 lockname = res->lockname.name; 2199 namelen = res->lockname.len; 2200 BUG_ON(namelen > O2NM_MAX_NAME_LEN); 2201 2202 mlog(0, "%s:%.*s: sending deref to %d\n", 2203 dlm->name, namelen, lockname, res->owner); 2204 memset(&deref, 0, sizeof(deref)); 2205 deref.node_idx = dlm->node_num; 2206 deref.namelen = namelen; 2207 memcpy(deref.name, lockname, namelen); 2208 2209 ret = o2net_send_message(DLM_DEREF_LOCKRES_MSG, dlm->key, 2210 &deref, sizeof(deref), res->owner, &r); 2211 if (ret < 0) 2212 mlog_errno(ret); 2213 else if (r < 0) { 2214 /* BAD. other node says I did not have a ref. */ 2215 mlog(ML_ERROR,"while dropping ref on %s:%.*s " 2216 "(master=%u) got %d.\n", dlm->name, namelen, 2217 lockname, res->owner, r); 2218 dlm_print_one_lock_resource(res); 2219 BUG(); 2220 } 2221 return ret; 2222 } 2223 2224 int dlm_deref_lockres_handler(struct o2net_msg *msg, u32 len, void *data, 2225 void **ret_data) 2226 { 2227 struct dlm_ctxt *dlm = data; 2228 struct dlm_deref_lockres *deref = (struct dlm_deref_lockres *)msg->buf; 2229 struct dlm_lock_resource *res = NULL; 2230 char *name; 2231 unsigned int namelen; 2232 int ret = -EINVAL; 2233 u8 node; 2234 unsigned int hash; 2235 struct dlm_work_item *item; 2236 int cleared = 0; 2237 int dispatch = 0; 2238 2239 if (!dlm_grab(dlm)) 2240 return 0; 2241 2242 name = deref->name; 2243 namelen = deref->namelen; 2244 node = deref->node_idx; 2245 2246 if (namelen > DLM_LOCKID_NAME_MAX) { 2247 mlog(ML_ERROR, "Invalid name length!"); 2248 goto done; 2249 } 2250 if (deref->node_idx >= O2NM_MAX_NODES) { 2251 mlog(ML_ERROR, "Invalid node number: %u\n", node); 2252 goto done; 2253 } 2254 2255 hash = dlm_lockid_hash(name, namelen); 2256 2257 spin_lock(&dlm->spinlock); 2258 res = __dlm_lookup_lockres_full(dlm, name, namelen, hash); 2259 if (!res) { 2260 spin_unlock(&dlm->spinlock); 2261 mlog(ML_ERROR, "%s:%.*s: bad lockres name\n", 2262 dlm->name, namelen, name); 2263 goto done; 2264 } 2265 spin_unlock(&dlm->spinlock); 2266 2267 spin_lock(&res->spinlock); 2268 if (res->state & DLM_LOCK_RES_SETREF_INPROG) 2269 dispatch = 1; 2270 else { 2271 BUG_ON(res->state & DLM_LOCK_RES_DROPPING_REF); 2272 if (test_bit(node, res->refmap)) { 2273 dlm_lockres_clear_refmap_bit(node, res); 2274 cleared = 1; 2275 } 2276 } 2277 spin_unlock(&res->spinlock); 2278 2279 if (!dispatch) { 2280 if (cleared) 2281 dlm_lockres_calc_usage(dlm, res); 2282 else { 2283 mlog(ML_ERROR, "%s:%.*s: node %u trying to drop ref " 2284 "but it is already dropped!\n", dlm->name, 2285 res->lockname.len, res->lockname.name, node); 2286 dlm_print_one_lock_resource(res); 2287 } 2288 ret = 0; 2289 goto done; 2290 } 2291 2292 item = kzalloc(sizeof(*item), GFP_NOFS); 2293 if (!item) { 2294 ret = -ENOMEM; 2295 mlog_errno(ret); 2296 goto done; 2297 } 2298 2299 dlm_init_work_item(dlm, item, dlm_deref_lockres_worker, NULL); 2300 item->u.dl.deref_res = res; 2301 item->u.dl.deref_node = node; 2302 2303 spin_lock(&dlm->work_lock); 2304 list_add_tail(&item->list, &dlm->work_list); 2305 spin_unlock(&dlm->work_lock); 2306 2307 queue_work(dlm->dlm_worker, &dlm->dispatched_work); 2308 return 0; 2309 2310 done: 2311 if (res) 2312 dlm_lockres_put(res); 2313 dlm_put(dlm); 2314 2315 return ret; 2316 } 2317 2318 static void dlm_deref_lockres_worker(struct dlm_work_item *item, void *data) 2319 { 2320 struct dlm_ctxt *dlm; 2321 struct dlm_lock_resource *res; 2322 u8 node; 2323 u8 cleared = 0; 2324 2325 dlm = item->dlm; 2326 res = item->u.dl.deref_res; 2327 node = item->u.dl.deref_node; 2328 2329 spin_lock(&res->spinlock); 2330 BUG_ON(res->state & DLM_LOCK_RES_DROPPING_REF); 2331 if (test_bit(node, res->refmap)) { 2332 __dlm_wait_on_lockres_flags(res, DLM_LOCK_RES_SETREF_INPROG); 2333 dlm_lockres_clear_refmap_bit(node, res); 2334 cleared = 1; 2335 } 2336 spin_unlock(&res->spinlock); 2337 2338 if (cleared) { 2339 mlog(0, "%s:%.*s node %u ref dropped in dispatch\n", 2340 dlm->name, res->lockname.len, res->lockname.name, node); 2341 dlm_lockres_calc_usage(dlm, res); 2342 } else { 2343 mlog(ML_ERROR, "%s:%.*s: node %u trying to drop ref " 2344 "but it is already dropped!\n", dlm->name, 2345 res->lockname.len, res->lockname.name, node); 2346 dlm_print_one_lock_resource(res); 2347 } 2348 2349 dlm_lockres_put(res); 2350 } 2351 2352 /* Checks whether the lockres can be migrated. Returns 0 if yes, < 0 2353 * if not. If 0, numlocks is set to the number of locks in the lockres. 2354 */ 2355 static int dlm_is_lockres_migrateable(struct dlm_ctxt *dlm, 2356 struct dlm_lock_resource *res, 2357 int *numlocks) 2358 { 2359 int ret; 2360 int i; 2361 int count = 0; 2362 struct list_head *queue; 2363 struct dlm_lock *lock; 2364 2365 assert_spin_locked(&res->spinlock); 2366 2367 ret = -EINVAL; 2368 if (res->owner == DLM_LOCK_RES_OWNER_UNKNOWN) { 2369 mlog(0, "cannot migrate lockres with unknown owner!\n"); 2370 goto leave; 2371 } 2372 2373 if (res->owner != dlm->node_num) { 2374 mlog(0, "cannot migrate lockres this node doesn't own!\n"); 2375 goto leave; 2376 } 2377 2378 ret = 0; 2379 queue = &res->granted; 2380 for (i = 0; i < 3; i++) { 2381 list_for_each_entry(lock, queue, list) { 2382 ++count; 2383 if (lock->ml.node == dlm->node_num) { 2384 mlog(0, "found a lock owned by this node still " 2385 "on the %s queue! will not migrate this " 2386 "lockres\n", (i == 0 ? "granted" : 2387 (i == 1 ? "converting" : 2388 "blocked"))); 2389 ret = -ENOTEMPTY; 2390 goto leave; 2391 } 2392 } 2393 queue++; 2394 } 2395 2396 *numlocks = count; 2397 mlog(0, "migrateable lockres having %d locks\n", *numlocks); 2398 2399 leave: 2400 return ret; 2401 } 2402 2403 /* 2404 * DLM_MIGRATE_LOCKRES 2405 */ 2406 2407 2408 static int dlm_migrate_lockres(struct dlm_ctxt *dlm, 2409 struct dlm_lock_resource *res, 2410 u8 target) 2411 { 2412 struct dlm_master_list_entry *mle = NULL; 2413 struct dlm_master_list_entry *oldmle = NULL; 2414 struct dlm_migratable_lockres *mres = NULL; 2415 int ret = 0; 2416 const char *name; 2417 unsigned int namelen; 2418 int mle_added = 0; 2419 int numlocks; 2420 int wake = 0; 2421 2422 if (!dlm_grab(dlm)) 2423 return -EINVAL; 2424 2425 name = res->lockname.name; 2426 namelen = res->lockname.len; 2427 2428 mlog(0, "migrating %.*s to %u\n", namelen, name, target); 2429 2430 /* 2431 * ensure this lockres is a proper candidate for migration 2432 */ 2433 spin_lock(&res->spinlock); 2434 ret = dlm_is_lockres_migrateable(dlm, res, &numlocks); 2435 if (ret < 0) { 2436 spin_unlock(&res->spinlock); 2437 goto leave; 2438 } 2439 spin_unlock(&res->spinlock); 2440 2441 /* no work to do */ 2442 if (numlocks == 0) { 2443 mlog(0, "no locks were found on this lockres! done!\n"); 2444 goto leave; 2445 } 2446 2447 /* 2448 * preallocate up front 2449 * if this fails, abort 2450 */ 2451 2452 ret = -ENOMEM; 2453 mres = (struct dlm_migratable_lockres *) __get_free_page(GFP_NOFS); 2454 if (!mres) { 2455 mlog_errno(ret); 2456 goto leave; 2457 } 2458 2459 mle = (struct dlm_master_list_entry *) kmem_cache_alloc(dlm_mle_cache, 2460 GFP_NOFS); 2461 if (!mle) { 2462 mlog_errno(ret); 2463 goto leave; 2464 } 2465 ret = 0; 2466 2467 /* 2468 * find a node to migrate the lockres to 2469 */ 2470 2471 mlog(0, "picking a migration node\n"); 2472 spin_lock(&dlm->spinlock); 2473 /* pick a new node */ 2474 if (!test_bit(target, dlm->domain_map) || 2475 target >= O2NM_MAX_NODES) { 2476 target = dlm_pick_migration_target(dlm, res); 2477 } 2478 mlog(0, "node %u chosen for migration\n", target); 2479 2480 if (target >= O2NM_MAX_NODES || 2481 !test_bit(target, dlm->domain_map)) { 2482 /* target chosen is not alive */ 2483 ret = -EINVAL; 2484 } 2485 2486 if (ret) { 2487 spin_unlock(&dlm->spinlock); 2488 goto fail; 2489 } 2490 2491 mlog(0, "continuing with target = %u\n", target); 2492 2493 /* 2494 * clear any existing master requests and 2495 * add the migration mle to the list 2496 */ 2497 spin_lock(&dlm->master_lock); 2498 ret = dlm_add_migration_mle(dlm, res, mle, &oldmle, name, 2499 namelen, target, dlm->node_num); 2500 spin_unlock(&dlm->master_lock); 2501 spin_unlock(&dlm->spinlock); 2502 2503 if (ret == -EEXIST) { 2504 mlog(0, "another process is already migrating it\n"); 2505 goto fail; 2506 } 2507 mle_added = 1; 2508 2509 /* 2510 * set the MIGRATING flag and flush asts 2511 * if we fail after this we need to re-dirty the lockres 2512 */ 2513 if (dlm_mark_lockres_migrating(dlm, res, target) < 0) { 2514 mlog(ML_ERROR, "tried to migrate %.*s to %u, but " 2515 "the target went down.\n", res->lockname.len, 2516 res->lockname.name, target); 2517 spin_lock(&res->spinlock); 2518 res->state &= ~DLM_LOCK_RES_MIGRATING; 2519 wake = 1; 2520 spin_unlock(&res->spinlock); 2521 ret = -EINVAL; 2522 } 2523 2524 fail: 2525 if (oldmle) { 2526 /* master is known, detach if not already detached */ 2527 dlm_mle_detach_hb_events(dlm, oldmle); 2528 dlm_put_mle(oldmle); 2529 } 2530 2531 if (ret < 0) { 2532 if (mle_added) { 2533 dlm_mle_detach_hb_events(dlm, mle); 2534 dlm_put_mle(mle); 2535 } else if (mle) { 2536 kmem_cache_free(dlm_mle_cache, mle); 2537 } 2538 goto leave; 2539 } 2540 2541 /* 2542 * at this point, we have a migration target, an mle 2543 * in the master list, and the MIGRATING flag set on 2544 * the lockres 2545 */ 2546 2547 /* now that remote nodes are spinning on the MIGRATING flag, 2548 * ensure that all assert_master work is flushed. */ 2549 flush_workqueue(dlm->dlm_worker); 2550 2551 /* get an extra reference on the mle. 2552 * otherwise the assert_master from the new 2553 * master will destroy this. 2554 * also, make sure that all callers of dlm_get_mle 2555 * take both dlm->spinlock and dlm->master_lock */ 2556 spin_lock(&dlm->spinlock); 2557 spin_lock(&dlm->master_lock); 2558 dlm_get_mle_inuse(mle); 2559 spin_unlock(&dlm->master_lock); 2560 spin_unlock(&dlm->spinlock); 2561 2562 /* notify new node and send all lock state */ 2563 /* call send_one_lockres with migration flag. 2564 * this serves as notice to the target node that a 2565 * migration is starting. */ 2566 ret = dlm_send_one_lockres(dlm, res, mres, target, 2567 DLM_MRES_MIGRATION); 2568 2569 if (ret < 0) { 2570 mlog(0, "migration to node %u failed with %d\n", 2571 target, ret); 2572 /* migration failed, detach and clean up mle */ 2573 dlm_mle_detach_hb_events(dlm, mle); 2574 dlm_put_mle(mle); 2575 dlm_put_mle_inuse(mle); 2576 spin_lock(&res->spinlock); 2577 res->state &= ~DLM_LOCK_RES_MIGRATING; 2578 wake = 1; 2579 spin_unlock(&res->spinlock); 2580 goto leave; 2581 } 2582 2583 /* at this point, the target sends a message to all nodes, 2584 * (using dlm_do_migrate_request). this node is skipped since 2585 * we had to put an mle in the list to begin the process. this 2586 * node now waits for target to do an assert master. this node 2587 * will be the last one notified, ensuring that the migration 2588 * is complete everywhere. if the target dies while this is 2589 * going on, some nodes could potentially see the target as the 2590 * master, so it is important that my recovery finds the migration 2591 * mle and sets the master to UNKNONWN. */ 2592 2593 2594 /* wait for new node to assert master */ 2595 while (1) { 2596 ret = wait_event_interruptible_timeout(mle->wq, 2597 (atomic_read(&mle->woken) == 1), 2598 msecs_to_jiffies(5000)); 2599 2600 if (ret >= 0) { 2601 if (atomic_read(&mle->woken) == 1 || 2602 res->owner == target) 2603 break; 2604 2605 mlog(0, "%s:%.*s: timed out during migration\n", 2606 dlm->name, res->lockname.len, res->lockname.name); 2607 /* avoid hang during shutdown when migrating lockres 2608 * to a node which also goes down */ 2609 if (dlm_is_node_dead(dlm, target)) { 2610 mlog(0, "%s:%.*s: expected migration " 2611 "target %u is no longer up, restarting\n", 2612 dlm->name, res->lockname.len, 2613 res->lockname.name, target); 2614 ret = -EINVAL; 2615 /* migration failed, detach and clean up mle */ 2616 dlm_mle_detach_hb_events(dlm, mle); 2617 dlm_put_mle(mle); 2618 dlm_put_mle_inuse(mle); 2619 spin_lock(&res->spinlock); 2620 res->state &= ~DLM_LOCK_RES_MIGRATING; 2621 wake = 1; 2622 spin_unlock(&res->spinlock); 2623 goto leave; 2624 } 2625 } else 2626 mlog(0, "%s:%.*s: caught signal during migration\n", 2627 dlm->name, res->lockname.len, res->lockname.name); 2628 } 2629 2630 /* all done, set the owner, clear the flag */ 2631 spin_lock(&res->spinlock); 2632 dlm_set_lockres_owner(dlm, res, target); 2633 res->state &= ~DLM_LOCK_RES_MIGRATING; 2634 dlm_remove_nonlocal_locks(dlm, res); 2635 spin_unlock(&res->spinlock); 2636 wake_up(&res->wq); 2637 2638 /* master is known, detach if not already detached */ 2639 dlm_mle_detach_hb_events(dlm, mle); 2640 dlm_put_mle_inuse(mle); 2641 ret = 0; 2642 2643 dlm_lockres_calc_usage(dlm, res); 2644 2645 leave: 2646 /* re-dirty the lockres if we failed */ 2647 if (ret < 0) 2648 dlm_kick_thread(dlm, res); 2649 2650 /* wake up waiters if the MIGRATING flag got set 2651 * but migration failed */ 2652 if (wake) 2653 wake_up(&res->wq); 2654 2655 /* TODO: cleanup */ 2656 if (mres) 2657 free_page((unsigned long)mres); 2658 2659 dlm_put(dlm); 2660 2661 mlog(0, "returning %d\n", ret); 2662 return ret; 2663 } 2664 2665 #define DLM_MIGRATION_RETRY_MS 100 2666 2667 /* Should be called only after beginning the domain leave process. 2668 * There should not be any remaining locks on nonlocal lock resources, 2669 * and there should be no local locks left on locally mastered resources. 2670 * 2671 * Called with the dlm spinlock held, may drop it to do migration, but 2672 * will re-acquire before exit. 2673 * 2674 * Returns: 1 if dlm->spinlock was dropped/retaken, 0 if never dropped */ 2675 int dlm_empty_lockres(struct dlm_ctxt *dlm, struct dlm_lock_resource *res) 2676 { 2677 int ret; 2678 int lock_dropped = 0; 2679 int numlocks; 2680 2681 spin_lock(&res->spinlock); 2682 if (res->owner != dlm->node_num) { 2683 if (!__dlm_lockres_unused(res)) { 2684 mlog(ML_ERROR, "%s:%.*s: this node is not master, " 2685 "trying to free this but locks remain\n", 2686 dlm->name, res->lockname.len, res->lockname.name); 2687 } 2688 spin_unlock(&res->spinlock); 2689 goto leave; 2690 } 2691 2692 /* No need to migrate a lockres having no locks */ 2693 ret = dlm_is_lockres_migrateable(dlm, res, &numlocks); 2694 if (ret >= 0 && numlocks == 0) { 2695 spin_unlock(&res->spinlock); 2696 goto leave; 2697 } 2698 spin_unlock(&res->spinlock); 2699 2700 /* Wheee! Migrate lockres here! Will sleep so drop spinlock. */ 2701 spin_unlock(&dlm->spinlock); 2702 lock_dropped = 1; 2703 while (1) { 2704 ret = dlm_migrate_lockres(dlm, res, O2NM_MAX_NODES); 2705 if (ret >= 0) 2706 break; 2707 if (ret == -ENOTEMPTY) { 2708 mlog(ML_ERROR, "lockres %.*s still has local locks!\n", 2709 res->lockname.len, res->lockname.name); 2710 BUG(); 2711 } 2712 2713 mlog(0, "lockres %.*s: migrate failed, " 2714 "retrying\n", res->lockname.len, 2715 res->lockname.name); 2716 msleep(DLM_MIGRATION_RETRY_MS); 2717 } 2718 spin_lock(&dlm->spinlock); 2719 leave: 2720 return lock_dropped; 2721 } 2722 2723 int dlm_lock_basts_flushed(struct dlm_ctxt *dlm, struct dlm_lock *lock) 2724 { 2725 int ret; 2726 spin_lock(&dlm->ast_lock); 2727 spin_lock(&lock->spinlock); 2728 ret = (list_empty(&lock->bast_list) && !lock->bast_pending); 2729 spin_unlock(&lock->spinlock); 2730 spin_unlock(&dlm->ast_lock); 2731 return ret; 2732 } 2733 2734 static int dlm_migration_can_proceed(struct dlm_ctxt *dlm, 2735 struct dlm_lock_resource *res, 2736 u8 mig_target) 2737 { 2738 int can_proceed; 2739 spin_lock(&res->spinlock); 2740 can_proceed = !!(res->state & DLM_LOCK_RES_MIGRATING); 2741 spin_unlock(&res->spinlock); 2742 2743 /* target has died, so make the caller break out of the 2744 * wait_event, but caller must recheck the domain_map */ 2745 spin_lock(&dlm->spinlock); 2746 if (!test_bit(mig_target, dlm->domain_map)) 2747 can_proceed = 1; 2748 spin_unlock(&dlm->spinlock); 2749 return can_proceed; 2750 } 2751 2752 static int dlm_lockres_is_dirty(struct dlm_ctxt *dlm, 2753 struct dlm_lock_resource *res) 2754 { 2755 int ret; 2756 spin_lock(&res->spinlock); 2757 ret = !!(res->state & DLM_LOCK_RES_DIRTY); 2758 spin_unlock(&res->spinlock); 2759 return ret; 2760 } 2761 2762 2763 static int dlm_mark_lockres_migrating(struct dlm_ctxt *dlm, 2764 struct dlm_lock_resource *res, 2765 u8 target) 2766 { 2767 int ret = 0; 2768 2769 mlog(0, "dlm_mark_lockres_migrating: %.*s, from %u to %u\n", 2770 res->lockname.len, res->lockname.name, dlm->node_num, 2771 target); 2772 /* need to set MIGRATING flag on lockres. this is done by 2773 * ensuring that all asts have been flushed for this lockres. */ 2774 spin_lock(&res->spinlock); 2775 BUG_ON(res->migration_pending); 2776 res->migration_pending = 1; 2777 /* strategy is to reserve an extra ast then release 2778 * it below, letting the release do all of the work */ 2779 __dlm_lockres_reserve_ast(res); 2780 spin_unlock(&res->spinlock); 2781 2782 /* now flush all the pending asts */ 2783 dlm_kick_thread(dlm, res); 2784 /* before waiting on DIRTY, block processes which may 2785 * try to dirty the lockres before MIGRATING is set */ 2786 spin_lock(&res->spinlock); 2787 BUG_ON(res->state & DLM_LOCK_RES_BLOCK_DIRTY); 2788 res->state |= DLM_LOCK_RES_BLOCK_DIRTY; 2789 spin_unlock(&res->spinlock); 2790 /* now wait on any pending asts and the DIRTY state */ 2791 wait_event(dlm->ast_wq, !dlm_lockres_is_dirty(dlm, res)); 2792 dlm_lockres_release_ast(dlm, res); 2793 2794 mlog(0, "about to wait on migration_wq, dirty=%s\n", 2795 res->state & DLM_LOCK_RES_DIRTY ? "yes" : "no"); 2796 /* if the extra ref we just put was the final one, this 2797 * will pass thru immediately. otherwise, we need to wait 2798 * for the last ast to finish. */ 2799 again: 2800 ret = wait_event_interruptible_timeout(dlm->migration_wq, 2801 dlm_migration_can_proceed(dlm, res, target), 2802 msecs_to_jiffies(1000)); 2803 if (ret < 0) { 2804 mlog(0, "woken again: migrating? %s, dead? %s\n", 2805 res->state & DLM_LOCK_RES_MIGRATING ? "yes":"no", 2806 test_bit(target, dlm->domain_map) ? "no":"yes"); 2807 } else { 2808 mlog(0, "all is well: migrating? %s, dead? %s\n", 2809 res->state & DLM_LOCK_RES_MIGRATING ? "yes":"no", 2810 test_bit(target, dlm->domain_map) ? "no":"yes"); 2811 } 2812 if (!dlm_migration_can_proceed(dlm, res, target)) { 2813 mlog(0, "trying again...\n"); 2814 goto again; 2815 } 2816 /* now that we are sure the MIGRATING state is there, drop 2817 * the unneded state which blocked threads trying to DIRTY */ 2818 spin_lock(&res->spinlock); 2819 BUG_ON(!(res->state & DLM_LOCK_RES_BLOCK_DIRTY)); 2820 BUG_ON(!(res->state & DLM_LOCK_RES_MIGRATING)); 2821 res->state &= ~DLM_LOCK_RES_BLOCK_DIRTY; 2822 spin_unlock(&res->spinlock); 2823 2824 /* did the target go down or die? */ 2825 spin_lock(&dlm->spinlock); 2826 if (!test_bit(target, dlm->domain_map)) { 2827 mlog(ML_ERROR, "aha. migration target %u just went down\n", 2828 target); 2829 ret = -EHOSTDOWN; 2830 } 2831 spin_unlock(&dlm->spinlock); 2832 2833 /* 2834 * at this point: 2835 * 2836 * o the DLM_LOCK_RES_MIGRATING flag is set 2837 * o there are no pending asts on this lockres 2838 * o all processes trying to reserve an ast on this 2839 * lockres must wait for the MIGRATING flag to clear 2840 */ 2841 return ret; 2842 } 2843 2844 /* last step in the migration process. 2845 * original master calls this to free all of the dlm_lock 2846 * structures that used to be for other nodes. */ 2847 static void dlm_remove_nonlocal_locks(struct dlm_ctxt *dlm, 2848 struct dlm_lock_resource *res) 2849 { 2850 struct list_head *queue = &res->granted; 2851 int i, bit; 2852 struct dlm_lock *lock, *next; 2853 2854 assert_spin_locked(&res->spinlock); 2855 2856 BUG_ON(res->owner == dlm->node_num); 2857 2858 for (i=0; i<3; i++) { 2859 list_for_each_entry_safe(lock, next, queue, list) { 2860 if (lock->ml.node != dlm->node_num) { 2861 mlog(0, "putting lock for node %u\n", 2862 lock->ml.node); 2863 /* be extra careful */ 2864 BUG_ON(!list_empty(&lock->ast_list)); 2865 BUG_ON(!list_empty(&lock->bast_list)); 2866 BUG_ON(lock->ast_pending); 2867 BUG_ON(lock->bast_pending); 2868 dlm_lockres_clear_refmap_bit(lock->ml.node, res); 2869 list_del_init(&lock->list); 2870 dlm_lock_put(lock); 2871 /* In a normal unlock, we would have added a 2872 * DLM_UNLOCK_FREE_LOCK action. Force it. */ 2873 dlm_lock_put(lock); 2874 } 2875 } 2876 queue++; 2877 } 2878 bit = 0; 2879 while (1) { 2880 bit = find_next_bit(res->refmap, O2NM_MAX_NODES, bit); 2881 if (bit >= O2NM_MAX_NODES) 2882 break; 2883 /* do not clear the local node reference, if there is a 2884 * process holding this, let it drop the ref itself */ 2885 if (bit != dlm->node_num) { 2886 mlog(0, "%s:%.*s: node %u had a ref to this " 2887 "migrating lockres, clearing\n", dlm->name, 2888 res->lockname.len, res->lockname.name, bit); 2889 dlm_lockres_clear_refmap_bit(bit, res); 2890 } 2891 bit++; 2892 } 2893 } 2894 2895 /* for now this is not too intelligent. we will 2896 * need stats to make this do the right thing. 2897 * this just finds the first lock on one of the 2898 * queues and uses that node as the target. */ 2899 static u8 dlm_pick_migration_target(struct dlm_ctxt *dlm, 2900 struct dlm_lock_resource *res) 2901 { 2902 int i; 2903 struct list_head *queue = &res->granted; 2904 struct dlm_lock *lock; 2905 int nodenum; 2906 2907 assert_spin_locked(&dlm->spinlock); 2908 2909 spin_lock(&res->spinlock); 2910 for (i=0; i<3; i++) { 2911 list_for_each_entry(lock, queue, list) { 2912 /* up to the caller to make sure this node 2913 * is alive */ 2914 if (lock->ml.node != dlm->node_num) { 2915 spin_unlock(&res->spinlock); 2916 return lock->ml.node; 2917 } 2918 } 2919 queue++; 2920 } 2921 spin_unlock(&res->spinlock); 2922 mlog(0, "have not found a suitable target yet! checking domain map\n"); 2923 2924 /* ok now we're getting desperate. pick anyone alive. */ 2925 nodenum = -1; 2926 while (1) { 2927 nodenum = find_next_bit(dlm->domain_map, 2928 O2NM_MAX_NODES, nodenum+1); 2929 mlog(0, "found %d in domain map\n", nodenum); 2930 if (nodenum >= O2NM_MAX_NODES) 2931 break; 2932 if (nodenum != dlm->node_num) { 2933 mlog(0, "picking %d\n", nodenum); 2934 return nodenum; 2935 } 2936 } 2937 2938 mlog(0, "giving up. no master to migrate to\n"); 2939 return DLM_LOCK_RES_OWNER_UNKNOWN; 2940 } 2941 2942 2943 2944 /* this is called by the new master once all lockres 2945 * data has been received */ 2946 static int dlm_do_migrate_request(struct dlm_ctxt *dlm, 2947 struct dlm_lock_resource *res, 2948 u8 master, u8 new_master, 2949 struct dlm_node_iter *iter) 2950 { 2951 struct dlm_migrate_request migrate; 2952 int ret, status = 0; 2953 int nodenum; 2954 2955 memset(&migrate, 0, sizeof(migrate)); 2956 migrate.namelen = res->lockname.len; 2957 memcpy(migrate.name, res->lockname.name, migrate.namelen); 2958 migrate.new_master = new_master; 2959 migrate.master = master; 2960 2961 ret = 0; 2962 2963 /* send message to all nodes, except the master and myself */ 2964 while ((nodenum = dlm_node_iter_next(iter)) >= 0) { 2965 if (nodenum == master || 2966 nodenum == new_master) 2967 continue; 2968 2969 ret = o2net_send_message(DLM_MIGRATE_REQUEST_MSG, dlm->key, 2970 &migrate, sizeof(migrate), nodenum, 2971 &status); 2972 if (ret < 0) 2973 mlog_errno(ret); 2974 else if (status < 0) { 2975 mlog(0, "migrate request (node %u) returned %d!\n", 2976 nodenum, status); 2977 ret = status; 2978 } else if (status == DLM_MIGRATE_RESPONSE_MASTERY_REF) { 2979 /* during the migration request we short-circuited 2980 * the mastery of the lockres. make sure we have 2981 * a mastery ref for nodenum */ 2982 mlog(0, "%s:%.*s: need ref for node %u\n", 2983 dlm->name, res->lockname.len, res->lockname.name, 2984 nodenum); 2985 spin_lock(&res->spinlock); 2986 dlm_lockres_set_refmap_bit(nodenum, res); 2987 spin_unlock(&res->spinlock); 2988 } 2989 } 2990 2991 if (ret < 0) 2992 mlog_errno(ret); 2993 2994 mlog(0, "returning ret=%d\n", ret); 2995 return ret; 2996 } 2997 2998 2999 /* if there is an existing mle for this lockres, we now know who the master is. 3000 * (the one who sent us *this* message) we can clear it up right away. 3001 * since the process that put the mle on the list still has a reference to it, 3002 * we can unhash it now, set the master and wake the process. as a result, 3003 * we will have no mle in the list to start with. now we can add an mle for 3004 * the migration and this should be the only one found for those scanning the 3005 * list. */ 3006 int dlm_migrate_request_handler(struct o2net_msg *msg, u32 len, void *data, 3007 void **ret_data) 3008 { 3009 struct dlm_ctxt *dlm = data; 3010 struct dlm_lock_resource *res = NULL; 3011 struct dlm_migrate_request *migrate = (struct dlm_migrate_request *) msg->buf; 3012 struct dlm_master_list_entry *mle = NULL, *oldmle = NULL; 3013 const char *name; 3014 unsigned int namelen, hash; 3015 int ret = 0; 3016 3017 if (!dlm_grab(dlm)) 3018 return -EINVAL; 3019 3020 name = migrate->name; 3021 namelen = migrate->namelen; 3022 hash = dlm_lockid_hash(name, namelen); 3023 3024 /* preallocate.. if this fails, abort */ 3025 mle = (struct dlm_master_list_entry *) kmem_cache_alloc(dlm_mle_cache, 3026 GFP_NOFS); 3027 3028 if (!mle) { 3029 ret = -ENOMEM; 3030 goto leave; 3031 } 3032 3033 /* check for pre-existing lock */ 3034 spin_lock(&dlm->spinlock); 3035 res = __dlm_lookup_lockres(dlm, name, namelen, hash); 3036 spin_lock(&dlm->master_lock); 3037 3038 if (res) { 3039 spin_lock(&res->spinlock); 3040 if (res->state & DLM_LOCK_RES_RECOVERING) { 3041 /* if all is working ok, this can only mean that we got 3042 * a migrate request from a node that we now see as 3043 * dead. what can we do here? drop it to the floor? */ 3044 spin_unlock(&res->spinlock); 3045 mlog(ML_ERROR, "Got a migrate request, but the " 3046 "lockres is marked as recovering!"); 3047 kmem_cache_free(dlm_mle_cache, mle); 3048 ret = -EINVAL; /* need a better solution */ 3049 goto unlock; 3050 } 3051 res->state |= DLM_LOCK_RES_MIGRATING; 3052 spin_unlock(&res->spinlock); 3053 } 3054 3055 /* ignore status. only nonzero status would BUG. */ 3056 ret = dlm_add_migration_mle(dlm, res, mle, &oldmle, 3057 name, namelen, 3058 migrate->new_master, 3059 migrate->master); 3060 3061 unlock: 3062 spin_unlock(&dlm->master_lock); 3063 spin_unlock(&dlm->spinlock); 3064 3065 if (oldmle) { 3066 /* master is known, detach if not already detached */ 3067 dlm_mle_detach_hb_events(dlm, oldmle); 3068 dlm_put_mle(oldmle); 3069 } 3070 3071 if (res) 3072 dlm_lockres_put(res); 3073 leave: 3074 dlm_put(dlm); 3075 return ret; 3076 } 3077 3078 /* must be holding dlm->spinlock and dlm->master_lock 3079 * when adding a migration mle, we can clear any other mles 3080 * in the master list because we know with certainty that 3081 * the master is "master". so we remove any old mle from 3082 * the list after setting it's master field, and then add 3083 * the new migration mle. this way we can hold with the rule 3084 * of having only one mle for a given lock name at all times. */ 3085 static int dlm_add_migration_mle(struct dlm_ctxt *dlm, 3086 struct dlm_lock_resource *res, 3087 struct dlm_master_list_entry *mle, 3088 struct dlm_master_list_entry **oldmle, 3089 const char *name, unsigned int namelen, 3090 u8 new_master, u8 master) 3091 { 3092 int found; 3093 int ret = 0; 3094 3095 *oldmle = NULL; 3096 3097 mlog_entry_void(); 3098 3099 assert_spin_locked(&dlm->spinlock); 3100 assert_spin_locked(&dlm->master_lock); 3101 3102 /* caller is responsible for any ref taken here on oldmle */ 3103 found = dlm_find_mle(dlm, oldmle, (char *)name, namelen); 3104 if (found) { 3105 struct dlm_master_list_entry *tmp = *oldmle; 3106 spin_lock(&tmp->spinlock); 3107 if (tmp->type == DLM_MLE_MIGRATION) { 3108 if (master == dlm->node_num) { 3109 /* ah another process raced me to it */ 3110 mlog(0, "tried to migrate %.*s, but some " 3111 "process beat me to it\n", 3112 namelen, name); 3113 ret = -EEXIST; 3114 } else { 3115 /* bad. 2 NODES are trying to migrate! */ 3116 mlog(ML_ERROR, "migration error mle: " 3117 "master=%u new_master=%u // request: " 3118 "master=%u new_master=%u // " 3119 "lockres=%.*s\n", 3120 tmp->master, tmp->new_master, 3121 master, new_master, 3122 namelen, name); 3123 BUG(); 3124 } 3125 } else { 3126 /* this is essentially what assert_master does */ 3127 tmp->master = master; 3128 atomic_set(&tmp->woken, 1); 3129 wake_up(&tmp->wq); 3130 /* remove it from the list so that only one 3131 * mle will be found */ 3132 list_del_init(&tmp->list); 3133 /* this was obviously WRONG. mle is uninited here. should be tmp. */ 3134 __dlm_mle_detach_hb_events(dlm, tmp); 3135 ret = DLM_MIGRATE_RESPONSE_MASTERY_REF; 3136 mlog(0, "%s:%.*s: master=%u, newmaster=%u, " 3137 "telling master to get ref for cleared out mle " 3138 "during migration\n", dlm->name, namelen, name, 3139 master, new_master); 3140 } 3141 spin_unlock(&tmp->spinlock); 3142 } 3143 3144 /* now add a migration mle to the tail of the list */ 3145 dlm_init_mle(mle, DLM_MLE_MIGRATION, dlm, res, name, namelen); 3146 mle->new_master = new_master; 3147 /* the new master will be sending an assert master for this. 3148 * at that point we will get the refmap reference */ 3149 mle->master = master; 3150 /* do this for consistency with other mle types */ 3151 set_bit(new_master, mle->maybe_map); 3152 list_add(&mle->list, &dlm->master_list); 3153 3154 return ret; 3155 } 3156 3157 3158 void dlm_clean_master_list(struct dlm_ctxt *dlm, u8 dead_node) 3159 { 3160 struct dlm_master_list_entry *mle, *next; 3161 struct dlm_lock_resource *res; 3162 unsigned int hash; 3163 3164 mlog_entry("dlm=%s, dead node=%u\n", dlm->name, dead_node); 3165 top: 3166 assert_spin_locked(&dlm->spinlock); 3167 3168 /* clean the master list */ 3169 spin_lock(&dlm->master_lock); 3170 list_for_each_entry_safe(mle, next, &dlm->master_list, list) { 3171 BUG_ON(mle->type != DLM_MLE_BLOCK && 3172 mle->type != DLM_MLE_MASTER && 3173 mle->type != DLM_MLE_MIGRATION); 3174 3175 /* MASTER mles are initiated locally. the waiting 3176 * process will notice the node map change 3177 * shortly. let that happen as normal. */ 3178 if (mle->type == DLM_MLE_MASTER) 3179 continue; 3180 3181 3182 /* BLOCK mles are initiated by other nodes. 3183 * need to clean up if the dead node would have 3184 * been the master. */ 3185 if (mle->type == DLM_MLE_BLOCK) { 3186 int bit; 3187 3188 spin_lock(&mle->spinlock); 3189 bit = find_next_bit(mle->maybe_map, O2NM_MAX_NODES, 0); 3190 if (bit != dead_node) { 3191 mlog(0, "mle found, but dead node %u would " 3192 "not have been master\n", dead_node); 3193 spin_unlock(&mle->spinlock); 3194 } else { 3195 /* must drop the refcount by one since the 3196 * assert_master will never arrive. this 3197 * may result in the mle being unlinked and 3198 * freed, but there may still be a process 3199 * waiting in the dlmlock path which is fine. */ 3200 mlog(0, "node %u was expected master\n", 3201 dead_node); 3202 atomic_set(&mle->woken, 1); 3203 spin_unlock(&mle->spinlock); 3204 wake_up(&mle->wq); 3205 /* do not need events any longer, so detach 3206 * from heartbeat */ 3207 __dlm_mle_detach_hb_events(dlm, mle); 3208 __dlm_put_mle(mle); 3209 } 3210 continue; 3211 } 3212 3213 /* everything else is a MIGRATION mle */ 3214 3215 /* the rule for MIGRATION mles is that the master 3216 * becomes UNKNOWN if *either* the original or 3217 * the new master dies. all UNKNOWN lockreses 3218 * are sent to whichever node becomes the recovery 3219 * master. the new master is responsible for 3220 * determining if there is still a master for 3221 * this lockres, or if he needs to take over 3222 * mastery. either way, this node should expect 3223 * another message to resolve this. */ 3224 if (mle->master != dead_node && 3225 mle->new_master != dead_node) 3226 continue; 3227 3228 /* if we have reached this point, this mle needs to 3229 * be removed from the list and freed. */ 3230 3231 /* remove from the list early. NOTE: unlinking 3232 * list_head while in list_for_each_safe */ 3233 __dlm_mle_detach_hb_events(dlm, mle); 3234 spin_lock(&mle->spinlock); 3235 list_del_init(&mle->list); 3236 atomic_set(&mle->woken, 1); 3237 spin_unlock(&mle->spinlock); 3238 wake_up(&mle->wq); 3239 3240 mlog(0, "%s: node %u died during migration from " 3241 "%u to %u!\n", dlm->name, dead_node, 3242 mle->master, mle->new_master); 3243 /* if there is a lockres associated with this 3244 * mle, find it and set its owner to UNKNOWN */ 3245 hash = dlm_lockid_hash(mle->u.name.name, mle->u.name.len); 3246 res = __dlm_lookup_lockres(dlm, mle->u.name.name, 3247 mle->u.name.len, hash); 3248 if (res) { 3249 /* unfortunately if we hit this rare case, our 3250 * lock ordering is messed. we need to drop 3251 * the master lock so that we can take the 3252 * lockres lock, meaning that we will have to 3253 * restart from the head of list. */ 3254 spin_unlock(&dlm->master_lock); 3255 3256 /* move lockres onto recovery list */ 3257 spin_lock(&res->spinlock); 3258 dlm_set_lockres_owner(dlm, res, 3259 DLM_LOCK_RES_OWNER_UNKNOWN); 3260 dlm_move_lockres_to_recovery_list(dlm, res); 3261 spin_unlock(&res->spinlock); 3262 dlm_lockres_put(res); 3263 3264 /* about to get rid of mle, detach from heartbeat */ 3265 __dlm_mle_detach_hb_events(dlm, mle); 3266 3267 /* dump the mle */ 3268 spin_lock(&dlm->master_lock); 3269 __dlm_put_mle(mle); 3270 spin_unlock(&dlm->master_lock); 3271 3272 /* restart */ 3273 goto top; 3274 } 3275 3276 /* this may be the last reference */ 3277 __dlm_put_mle(mle); 3278 } 3279 spin_unlock(&dlm->master_lock); 3280 } 3281 3282 3283 int dlm_finish_migration(struct dlm_ctxt *dlm, struct dlm_lock_resource *res, 3284 u8 old_master) 3285 { 3286 struct dlm_node_iter iter; 3287 int ret = 0; 3288 3289 spin_lock(&dlm->spinlock); 3290 dlm_node_iter_init(dlm->domain_map, &iter); 3291 clear_bit(old_master, iter.node_map); 3292 clear_bit(dlm->node_num, iter.node_map); 3293 spin_unlock(&dlm->spinlock); 3294 3295 /* ownership of the lockres is changing. account for the 3296 * mastery reference here since old_master will briefly have 3297 * a reference after the migration completes */ 3298 spin_lock(&res->spinlock); 3299 dlm_lockres_set_refmap_bit(old_master, res); 3300 spin_unlock(&res->spinlock); 3301 3302 mlog(0, "now time to do a migrate request to other nodes\n"); 3303 ret = dlm_do_migrate_request(dlm, res, old_master, 3304 dlm->node_num, &iter); 3305 if (ret < 0) { 3306 mlog_errno(ret); 3307 goto leave; 3308 } 3309 3310 mlog(0, "doing assert master of %.*s to all except the original node\n", 3311 res->lockname.len, res->lockname.name); 3312 /* this call now finishes out the nodemap 3313 * even if one or more nodes die */ 3314 ret = dlm_do_assert_master(dlm, res, iter.node_map, 3315 DLM_ASSERT_MASTER_FINISH_MIGRATION); 3316 if (ret < 0) { 3317 /* no longer need to retry. all living nodes contacted. */ 3318 mlog_errno(ret); 3319 ret = 0; 3320 } 3321 3322 memset(iter.node_map, 0, sizeof(iter.node_map)); 3323 set_bit(old_master, iter.node_map); 3324 mlog(0, "doing assert master of %.*s back to %u\n", 3325 res->lockname.len, res->lockname.name, old_master); 3326 ret = dlm_do_assert_master(dlm, res, iter.node_map, 3327 DLM_ASSERT_MASTER_FINISH_MIGRATION); 3328 if (ret < 0) { 3329 mlog(0, "assert master to original master failed " 3330 "with %d.\n", ret); 3331 /* the only nonzero status here would be because of 3332 * a dead original node. we're done. */ 3333 ret = 0; 3334 } 3335 3336 /* all done, set the owner, clear the flag */ 3337 spin_lock(&res->spinlock); 3338 dlm_set_lockres_owner(dlm, res, dlm->node_num); 3339 res->state &= ~DLM_LOCK_RES_MIGRATING; 3340 spin_unlock(&res->spinlock); 3341 /* re-dirty it on the new master */ 3342 dlm_kick_thread(dlm, res); 3343 wake_up(&res->wq); 3344 leave: 3345 return ret; 3346 } 3347 3348 /* 3349 * LOCKRES AST REFCOUNT 3350 * this is integral to migration 3351 */ 3352 3353 /* for future intent to call an ast, reserve one ahead of time. 3354 * this should be called only after waiting on the lockres 3355 * with dlm_wait_on_lockres, and while still holding the 3356 * spinlock after the call. */ 3357 void __dlm_lockres_reserve_ast(struct dlm_lock_resource *res) 3358 { 3359 assert_spin_locked(&res->spinlock); 3360 if (res->state & DLM_LOCK_RES_MIGRATING) { 3361 __dlm_print_one_lock_resource(res); 3362 } 3363 BUG_ON(res->state & DLM_LOCK_RES_MIGRATING); 3364 3365 atomic_inc(&res->asts_reserved); 3366 } 3367 3368 /* 3369 * used to drop the reserved ast, either because it went unused, 3370 * or because the ast/bast was actually called. 3371 * 3372 * also, if there is a pending migration on this lockres, 3373 * and this was the last pending ast on the lockres, 3374 * atomically set the MIGRATING flag before we drop the lock. 3375 * this is how we ensure that migration can proceed with no 3376 * asts in progress. note that it is ok if the state of the 3377 * queues is such that a lock should be granted in the future 3378 * or that a bast should be fired, because the new master will 3379 * shuffle the lists on this lockres as soon as it is migrated. 3380 */ 3381 void dlm_lockres_release_ast(struct dlm_ctxt *dlm, 3382 struct dlm_lock_resource *res) 3383 { 3384 if (!atomic_dec_and_lock(&res->asts_reserved, &res->spinlock)) 3385 return; 3386 3387 if (!res->migration_pending) { 3388 spin_unlock(&res->spinlock); 3389 return; 3390 } 3391 3392 BUG_ON(res->state & DLM_LOCK_RES_MIGRATING); 3393 res->migration_pending = 0; 3394 res->state |= DLM_LOCK_RES_MIGRATING; 3395 spin_unlock(&res->spinlock); 3396 wake_up(&res->wq); 3397 wake_up(&dlm->migration_wq); 3398 } 3399