1 /* -*- mode: c; c-basic-offset: 8; -*- 2 * vim: noexpandtab sw=8 ts=8 sts=0: 3 * 4 * dlmmod.c 5 * 6 * standalone DLM module 7 * 8 * Copyright (C) 2004 Oracle. All rights reserved. 9 * 10 * This program is free software; you can redistribute it and/or 11 * modify it under the terms of the GNU General Public 12 * License as published by the Free Software Foundation; either 13 * version 2 of the License, or (at your option) any later version. 14 * 15 * This program is distributed in the hope that it will be useful, 16 * but WITHOUT ANY WARRANTY; without even the implied warranty of 17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 18 * General Public License for more details. 19 * 20 * You should have received a copy of the GNU General Public 21 * License along with this program; if not, write to the 22 * Free Software Foundation, Inc., 59 Temple Place - Suite 330, 23 * Boston, MA 021110-1307, USA. 24 * 25 */ 26 27 28 #include <linux/module.h> 29 #include <linux/fs.h> 30 #include <linux/types.h> 31 #include <linux/slab.h> 32 #include <linux/highmem.h> 33 #include <linux/utsname.h> 34 #include <linux/init.h> 35 #include <linux/sysctl.h> 36 #include <linux/random.h> 37 #include <linux/blkdev.h> 38 #include <linux/socket.h> 39 #include <linux/inet.h> 40 #include <linux/spinlock.h> 41 #include <linux/delay.h> 42 43 44 #include "cluster/heartbeat.h" 45 #include "cluster/nodemanager.h" 46 #include "cluster/tcp.h" 47 48 #include "dlmapi.h" 49 #include "dlmcommon.h" 50 #include "dlmdomain.h" 51 52 #define MLOG_MASK_PREFIX (ML_DLM|ML_DLM_MASTER) 53 #include "cluster/masklog.h" 54 55 enum dlm_mle_type { 56 DLM_MLE_BLOCK, 57 DLM_MLE_MASTER, 58 DLM_MLE_MIGRATION 59 }; 60 61 struct dlm_lock_name 62 { 63 u8 len; 64 u8 name[DLM_LOCKID_NAME_MAX]; 65 }; 66 67 struct dlm_master_list_entry 68 { 69 struct list_head list; 70 struct list_head hb_events; 71 struct dlm_ctxt *dlm; 72 spinlock_t spinlock; 73 wait_queue_head_t wq; 74 atomic_t woken; 75 struct kref mle_refs; 76 int inuse; 77 unsigned long maybe_map[BITS_TO_LONGS(O2NM_MAX_NODES)]; 78 unsigned long vote_map[BITS_TO_LONGS(O2NM_MAX_NODES)]; 79 unsigned long response_map[BITS_TO_LONGS(O2NM_MAX_NODES)]; 80 unsigned long node_map[BITS_TO_LONGS(O2NM_MAX_NODES)]; 81 u8 master; 82 u8 new_master; 83 enum dlm_mle_type type; 84 struct o2hb_callback_func mle_hb_up; 85 struct o2hb_callback_func mle_hb_down; 86 union { 87 struct dlm_lock_resource *res; 88 struct dlm_lock_name name; 89 } u; 90 }; 91 92 static void dlm_mle_node_down(struct dlm_ctxt *dlm, 93 struct dlm_master_list_entry *mle, 94 struct o2nm_node *node, 95 int idx); 96 static void dlm_mle_node_up(struct dlm_ctxt *dlm, 97 struct dlm_master_list_entry *mle, 98 struct o2nm_node *node, 99 int idx); 100 101 static void dlm_assert_master_worker(struct dlm_work_item *item, void *data); 102 static int dlm_do_assert_master(struct dlm_ctxt *dlm, 103 struct dlm_lock_resource *res, 104 void *nodemap, u32 flags); 105 static void dlm_deref_lockres_worker(struct dlm_work_item *item, void *data); 106 107 static inline int dlm_mle_equal(struct dlm_ctxt *dlm, 108 struct dlm_master_list_entry *mle, 109 const char *name, 110 unsigned int namelen) 111 { 112 struct dlm_lock_resource *res; 113 114 if (dlm != mle->dlm) 115 return 0; 116 117 if (mle->type == DLM_MLE_BLOCK || 118 mle->type == DLM_MLE_MIGRATION) { 119 if (namelen != mle->u.name.len || 120 memcmp(name, mle->u.name.name, namelen)!=0) 121 return 0; 122 } else { 123 res = mle->u.res; 124 if (namelen != res->lockname.len || 125 memcmp(res->lockname.name, name, namelen) != 0) 126 return 0; 127 } 128 return 1; 129 } 130 131 #define dlm_print_nodemap(m) _dlm_print_nodemap(m,#m) 132 static void _dlm_print_nodemap(unsigned long *map, const char *mapname) 133 { 134 int i; 135 printk("%s=[ ", mapname); 136 for (i=0; i<O2NM_MAX_NODES; i++) 137 if (test_bit(i, map)) 138 printk("%d ", i); 139 printk("]"); 140 } 141 142 static void dlm_print_one_mle(struct dlm_master_list_entry *mle) 143 { 144 int refs; 145 char *type; 146 char attached; 147 u8 master; 148 unsigned int namelen; 149 const char *name; 150 struct kref *k; 151 unsigned long *maybe = mle->maybe_map, 152 *vote = mle->vote_map, 153 *resp = mle->response_map, 154 *node = mle->node_map; 155 156 k = &mle->mle_refs; 157 if (mle->type == DLM_MLE_BLOCK) 158 type = "BLK"; 159 else if (mle->type == DLM_MLE_MASTER) 160 type = "MAS"; 161 else 162 type = "MIG"; 163 refs = atomic_read(&k->refcount); 164 master = mle->master; 165 attached = (list_empty(&mle->hb_events) ? 'N' : 'Y'); 166 167 if (mle->type != DLM_MLE_MASTER) { 168 namelen = mle->u.name.len; 169 name = mle->u.name.name; 170 } else { 171 namelen = mle->u.res->lockname.len; 172 name = mle->u.res->lockname.name; 173 } 174 175 mlog(ML_NOTICE, "%.*s: %3s refs=%3d mas=%3u new=%3u evt=%c inuse=%d ", 176 namelen, name, type, refs, master, mle->new_master, attached, 177 mle->inuse); 178 dlm_print_nodemap(maybe); 179 printk(", "); 180 dlm_print_nodemap(vote); 181 printk(", "); 182 dlm_print_nodemap(resp); 183 printk(", "); 184 dlm_print_nodemap(node); 185 printk(", "); 186 printk("\n"); 187 } 188 189 #if 0 190 /* Code here is included but defined out as it aids debugging */ 191 192 static void dlm_dump_mles(struct dlm_ctxt *dlm) 193 { 194 struct dlm_master_list_entry *mle; 195 196 mlog(ML_NOTICE, "dumping all mles for domain %s:\n", dlm->name); 197 spin_lock(&dlm->master_lock); 198 list_for_each_entry(mle, &dlm->master_list, list) 199 dlm_print_one_mle(mle); 200 spin_unlock(&dlm->master_lock); 201 } 202 203 int dlm_dump_all_mles(const char __user *data, unsigned int len) 204 { 205 struct dlm_ctxt *dlm; 206 207 spin_lock(&dlm_domain_lock); 208 list_for_each_entry(dlm, &dlm_domains, list) { 209 mlog(ML_NOTICE, "found dlm: %p, name=%s\n", dlm, dlm->name); 210 dlm_dump_mles(dlm); 211 } 212 spin_unlock(&dlm_domain_lock); 213 return len; 214 } 215 EXPORT_SYMBOL_GPL(dlm_dump_all_mles); 216 217 #endif /* 0 */ 218 219 220 static struct kmem_cache *dlm_mle_cache = NULL; 221 222 223 static void dlm_mle_release(struct kref *kref); 224 static void dlm_init_mle(struct dlm_master_list_entry *mle, 225 enum dlm_mle_type type, 226 struct dlm_ctxt *dlm, 227 struct dlm_lock_resource *res, 228 const char *name, 229 unsigned int namelen); 230 static void dlm_put_mle(struct dlm_master_list_entry *mle); 231 static void __dlm_put_mle(struct dlm_master_list_entry *mle); 232 static int dlm_find_mle(struct dlm_ctxt *dlm, 233 struct dlm_master_list_entry **mle, 234 char *name, unsigned int namelen); 235 236 static int dlm_do_master_request(struct dlm_lock_resource *res, 237 struct dlm_master_list_entry *mle, int to); 238 239 240 static int dlm_wait_for_lock_mastery(struct dlm_ctxt *dlm, 241 struct dlm_lock_resource *res, 242 struct dlm_master_list_entry *mle, 243 int *blocked); 244 static int dlm_restart_lock_mastery(struct dlm_ctxt *dlm, 245 struct dlm_lock_resource *res, 246 struct dlm_master_list_entry *mle, 247 int blocked); 248 static int dlm_add_migration_mle(struct dlm_ctxt *dlm, 249 struct dlm_lock_resource *res, 250 struct dlm_master_list_entry *mle, 251 struct dlm_master_list_entry **oldmle, 252 const char *name, unsigned int namelen, 253 u8 new_master, u8 master); 254 255 static u8 dlm_pick_migration_target(struct dlm_ctxt *dlm, 256 struct dlm_lock_resource *res); 257 static void dlm_remove_nonlocal_locks(struct dlm_ctxt *dlm, 258 struct dlm_lock_resource *res); 259 static int dlm_mark_lockres_migrating(struct dlm_ctxt *dlm, 260 struct dlm_lock_resource *res, 261 u8 target); 262 static int dlm_pre_master_reco_lockres(struct dlm_ctxt *dlm, 263 struct dlm_lock_resource *res); 264 265 266 int dlm_is_host_down(int errno) 267 { 268 switch (errno) { 269 case -EBADF: 270 case -ECONNREFUSED: 271 case -ENOTCONN: 272 case -ECONNRESET: 273 case -EPIPE: 274 case -EHOSTDOWN: 275 case -EHOSTUNREACH: 276 case -ETIMEDOUT: 277 case -ECONNABORTED: 278 case -ENETDOWN: 279 case -ENETUNREACH: 280 case -ENETRESET: 281 case -ESHUTDOWN: 282 case -ENOPROTOOPT: 283 case -EINVAL: /* if returned from our tcp code, 284 this means there is no socket */ 285 return 1; 286 } 287 return 0; 288 } 289 290 291 /* 292 * MASTER LIST FUNCTIONS 293 */ 294 295 296 /* 297 * regarding master list entries and heartbeat callbacks: 298 * 299 * in order to avoid sleeping and allocation that occurs in 300 * heartbeat, master list entries are simply attached to the 301 * dlm's established heartbeat callbacks. the mle is attached 302 * when it is created, and since the dlm->spinlock is held at 303 * that time, any heartbeat event will be properly discovered 304 * by the mle. the mle needs to be detached from the 305 * dlm->mle_hb_events list as soon as heartbeat events are no 306 * longer useful to the mle, and before the mle is freed. 307 * 308 * as a general rule, heartbeat events are no longer needed by 309 * the mle once an "answer" regarding the lock master has been 310 * received. 311 */ 312 static inline void __dlm_mle_attach_hb_events(struct dlm_ctxt *dlm, 313 struct dlm_master_list_entry *mle) 314 { 315 assert_spin_locked(&dlm->spinlock); 316 317 list_add_tail(&mle->hb_events, &dlm->mle_hb_events); 318 } 319 320 321 static inline void __dlm_mle_detach_hb_events(struct dlm_ctxt *dlm, 322 struct dlm_master_list_entry *mle) 323 { 324 if (!list_empty(&mle->hb_events)) 325 list_del_init(&mle->hb_events); 326 } 327 328 329 static inline void dlm_mle_detach_hb_events(struct dlm_ctxt *dlm, 330 struct dlm_master_list_entry *mle) 331 { 332 spin_lock(&dlm->spinlock); 333 __dlm_mle_detach_hb_events(dlm, mle); 334 spin_unlock(&dlm->spinlock); 335 } 336 337 static void dlm_get_mle_inuse(struct dlm_master_list_entry *mle) 338 { 339 struct dlm_ctxt *dlm; 340 dlm = mle->dlm; 341 342 assert_spin_locked(&dlm->spinlock); 343 assert_spin_locked(&dlm->master_lock); 344 mle->inuse++; 345 kref_get(&mle->mle_refs); 346 } 347 348 static void dlm_put_mle_inuse(struct dlm_master_list_entry *mle) 349 { 350 struct dlm_ctxt *dlm; 351 dlm = mle->dlm; 352 353 spin_lock(&dlm->spinlock); 354 spin_lock(&dlm->master_lock); 355 mle->inuse--; 356 __dlm_put_mle(mle); 357 spin_unlock(&dlm->master_lock); 358 spin_unlock(&dlm->spinlock); 359 360 } 361 362 /* remove from list and free */ 363 static void __dlm_put_mle(struct dlm_master_list_entry *mle) 364 { 365 struct dlm_ctxt *dlm; 366 dlm = mle->dlm; 367 368 assert_spin_locked(&dlm->spinlock); 369 assert_spin_locked(&dlm->master_lock); 370 if (!atomic_read(&mle->mle_refs.refcount)) { 371 /* this may or may not crash, but who cares. 372 * it's a BUG. */ 373 mlog(ML_ERROR, "bad mle: %p\n", mle); 374 dlm_print_one_mle(mle); 375 BUG(); 376 } else 377 kref_put(&mle->mle_refs, dlm_mle_release); 378 } 379 380 381 /* must not have any spinlocks coming in */ 382 static void dlm_put_mle(struct dlm_master_list_entry *mle) 383 { 384 struct dlm_ctxt *dlm; 385 dlm = mle->dlm; 386 387 spin_lock(&dlm->spinlock); 388 spin_lock(&dlm->master_lock); 389 __dlm_put_mle(mle); 390 spin_unlock(&dlm->master_lock); 391 spin_unlock(&dlm->spinlock); 392 } 393 394 static inline void dlm_get_mle(struct dlm_master_list_entry *mle) 395 { 396 kref_get(&mle->mle_refs); 397 } 398 399 static void dlm_init_mle(struct dlm_master_list_entry *mle, 400 enum dlm_mle_type type, 401 struct dlm_ctxt *dlm, 402 struct dlm_lock_resource *res, 403 const char *name, 404 unsigned int namelen) 405 { 406 assert_spin_locked(&dlm->spinlock); 407 408 mle->dlm = dlm; 409 mle->type = type; 410 INIT_LIST_HEAD(&mle->list); 411 INIT_LIST_HEAD(&mle->hb_events); 412 memset(mle->maybe_map, 0, sizeof(mle->maybe_map)); 413 spin_lock_init(&mle->spinlock); 414 init_waitqueue_head(&mle->wq); 415 atomic_set(&mle->woken, 0); 416 kref_init(&mle->mle_refs); 417 memset(mle->response_map, 0, sizeof(mle->response_map)); 418 mle->master = O2NM_MAX_NODES; 419 mle->new_master = O2NM_MAX_NODES; 420 mle->inuse = 0; 421 422 if (mle->type == DLM_MLE_MASTER) { 423 BUG_ON(!res); 424 mle->u.res = res; 425 } else if (mle->type == DLM_MLE_BLOCK) { 426 BUG_ON(!name); 427 memcpy(mle->u.name.name, name, namelen); 428 mle->u.name.len = namelen; 429 } else /* DLM_MLE_MIGRATION */ { 430 BUG_ON(!name); 431 memcpy(mle->u.name.name, name, namelen); 432 mle->u.name.len = namelen; 433 } 434 435 /* copy off the node_map and register hb callbacks on our copy */ 436 memcpy(mle->node_map, dlm->domain_map, sizeof(mle->node_map)); 437 memcpy(mle->vote_map, dlm->domain_map, sizeof(mle->vote_map)); 438 clear_bit(dlm->node_num, mle->vote_map); 439 clear_bit(dlm->node_num, mle->node_map); 440 441 /* attach the mle to the domain node up/down events */ 442 __dlm_mle_attach_hb_events(dlm, mle); 443 } 444 445 446 /* returns 1 if found, 0 if not */ 447 static int dlm_find_mle(struct dlm_ctxt *dlm, 448 struct dlm_master_list_entry **mle, 449 char *name, unsigned int namelen) 450 { 451 struct dlm_master_list_entry *tmpmle; 452 453 assert_spin_locked(&dlm->master_lock); 454 455 list_for_each_entry(tmpmle, &dlm->master_list, list) { 456 if (!dlm_mle_equal(dlm, tmpmle, name, namelen)) 457 continue; 458 dlm_get_mle(tmpmle); 459 *mle = tmpmle; 460 return 1; 461 } 462 return 0; 463 } 464 465 void dlm_hb_event_notify_attached(struct dlm_ctxt *dlm, int idx, int node_up) 466 { 467 struct dlm_master_list_entry *mle; 468 469 assert_spin_locked(&dlm->spinlock); 470 471 list_for_each_entry(mle, &dlm->mle_hb_events, hb_events) { 472 if (node_up) 473 dlm_mle_node_up(dlm, mle, NULL, idx); 474 else 475 dlm_mle_node_down(dlm, mle, NULL, idx); 476 } 477 } 478 479 static void dlm_mle_node_down(struct dlm_ctxt *dlm, 480 struct dlm_master_list_entry *mle, 481 struct o2nm_node *node, int idx) 482 { 483 spin_lock(&mle->spinlock); 484 485 if (!test_bit(idx, mle->node_map)) 486 mlog(0, "node %u already removed from nodemap!\n", idx); 487 else 488 clear_bit(idx, mle->node_map); 489 490 spin_unlock(&mle->spinlock); 491 } 492 493 static void dlm_mle_node_up(struct dlm_ctxt *dlm, 494 struct dlm_master_list_entry *mle, 495 struct o2nm_node *node, int idx) 496 { 497 spin_lock(&mle->spinlock); 498 499 if (test_bit(idx, mle->node_map)) 500 mlog(0, "node %u already in node map!\n", idx); 501 else 502 set_bit(idx, mle->node_map); 503 504 spin_unlock(&mle->spinlock); 505 } 506 507 508 int dlm_init_mle_cache(void) 509 { 510 dlm_mle_cache = kmem_cache_create("dlm_mle_cache", 511 sizeof(struct dlm_master_list_entry), 512 0, SLAB_HWCACHE_ALIGN, 513 NULL); 514 if (dlm_mle_cache == NULL) 515 return -ENOMEM; 516 return 0; 517 } 518 519 void dlm_destroy_mle_cache(void) 520 { 521 if (dlm_mle_cache) 522 kmem_cache_destroy(dlm_mle_cache); 523 } 524 525 static void dlm_mle_release(struct kref *kref) 526 { 527 struct dlm_master_list_entry *mle; 528 struct dlm_ctxt *dlm; 529 530 mlog_entry_void(); 531 532 mle = container_of(kref, struct dlm_master_list_entry, mle_refs); 533 dlm = mle->dlm; 534 535 if (mle->type != DLM_MLE_MASTER) { 536 mlog(0, "calling mle_release for %.*s, type %d\n", 537 mle->u.name.len, mle->u.name.name, mle->type); 538 } else { 539 mlog(0, "calling mle_release for %.*s, type %d\n", 540 mle->u.res->lockname.len, 541 mle->u.res->lockname.name, mle->type); 542 } 543 assert_spin_locked(&dlm->spinlock); 544 assert_spin_locked(&dlm->master_lock); 545 546 /* remove from list if not already */ 547 if (!list_empty(&mle->list)) 548 list_del_init(&mle->list); 549 550 /* detach the mle from the domain node up/down events */ 551 __dlm_mle_detach_hb_events(dlm, mle); 552 553 /* NOTE: kfree under spinlock here. 554 * if this is bad, we can move this to a freelist. */ 555 kmem_cache_free(dlm_mle_cache, mle); 556 } 557 558 559 /* 560 * LOCK RESOURCE FUNCTIONS 561 */ 562 563 static void dlm_set_lockres_owner(struct dlm_ctxt *dlm, 564 struct dlm_lock_resource *res, 565 u8 owner) 566 { 567 assert_spin_locked(&res->spinlock); 568 569 mlog_entry("%.*s, %u\n", res->lockname.len, res->lockname.name, owner); 570 571 if (owner == dlm->node_num) 572 atomic_inc(&dlm->local_resources); 573 else if (owner == DLM_LOCK_RES_OWNER_UNKNOWN) 574 atomic_inc(&dlm->unknown_resources); 575 else 576 atomic_inc(&dlm->remote_resources); 577 578 res->owner = owner; 579 } 580 581 void dlm_change_lockres_owner(struct dlm_ctxt *dlm, 582 struct dlm_lock_resource *res, u8 owner) 583 { 584 assert_spin_locked(&res->spinlock); 585 586 if (owner == res->owner) 587 return; 588 589 if (res->owner == dlm->node_num) 590 atomic_dec(&dlm->local_resources); 591 else if (res->owner == DLM_LOCK_RES_OWNER_UNKNOWN) 592 atomic_dec(&dlm->unknown_resources); 593 else 594 atomic_dec(&dlm->remote_resources); 595 596 dlm_set_lockres_owner(dlm, res, owner); 597 } 598 599 600 static void dlm_lockres_release(struct kref *kref) 601 { 602 struct dlm_lock_resource *res; 603 604 res = container_of(kref, struct dlm_lock_resource, refs); 605 606 /* This should not happen -- all lockres' have a name 607 * associated with them at init time. */ 608 BUG_ON(!res->lockname.name); 609 610 mlog(0, "destroying lockres %.*s\n", res->lockname.len, 611 res->lockname.name); 612 613 if (!hlist_unhashed(&res->hash_node) || 614 !list_empty(&res->granted) || 615 !list_empty(&res->converting) || 616 !list_empty(&res->blocked) || 617 !list_empty(&res->dirty) || 618 !list_empty(&res->recovering) || 619 !list_empty(&res->purge)) { 620 mlog(ML_ERROR, 621 "Going to BUG for resource %.*s." 622 " We're on a list! [%c%c%c%c%c%c%c]\n", 623 res->lockname.len, res->lockname.name, 624 !hlist_unhashed(&res->hash_node) ? 'H' : ' ', 625 !list_empty(&res->granted) ? 'G' : ' ', 626 !list_empty(&res->converting) ? 'C' : ' ', 627 !list_empty(&res->blocked) ? 'B' : ' ', 628 !list_empty(&res->dirty) ? 'D' : ' ', 629 !list_empty(&res->recovering) ? 'R' : ' ', 630 !list_empty(&res->purge) ? 'P' : ' '); 631 632 dlm_print_one_lock_resource(res); 633 } 634 635 /* By the time we're ready to blow this guy away, we shouldn't 636 * be on any lists. */ 637 BUG_ON(!hlist_unhashed(&res->hash_node)); 638 BUG_ON(!list_empty(&res->granted)); 639 BUG_ON(!list_empty(&res->converting)); 640 BUG_ON(!list_empty(&res->blocked)); 641 BUG_ON(!list_empty(&res->dirty)); 642 BUG_ON(!list_empty(&res->recovering)); 643 BUG_ON(!list_empty(&res->purge)); 644 645 kfree(res->lockname.name); 646 647 kfree(res); 648 } 649 650 void dlm_lockres_put(struct dlm_lock_resource *res) 651 { 652 kref_put(&res->refs, dlm_lockres_release); 653 } 654 655 static void dlm_init_lockres(struct dlm_ctxt *dlm, 656 struct dlm_lock_resource *res, 657 const char *name, unsigned int namelen) 658 { 659 char *qname; 660 661 /* If we memset here, we lose our reference to the kmalloc'd 662 * res->lockname.name, so be sure to init every field 663 * correctly! */ 664 665 qname = (char *) res->lockname.name; 666 memcpy(qname, name, namelen); 667 668 res->lockname.len = namelen; 669 res->lockname.hash = dlm_lockid_hash(name, namelen); 670 671 init_waitqueue_head(&res->wq); 672 spin_lock_init(&res->spinlock); 673 INIT_HLIST_NODE(&res->hash_node); 674 INIT_LIST_HEAD(&res->granted); 675 INIT_LIST_HEAD(&res->converting); 676 INIT_LIST_HEAD(&res->blocked); 677 INIT_LIST_HEAD(&res->dirty); 678 INIT_LIST_HEAD(&res->recovering); 679 INIT_LIST_HEAD(&res->purge); 680 atomic_set(&res->asts_reserved, 0); 681 res->migration_pending = 0; 682 res->inflight_locks = 0; 683 684 kref_init(&res->refs); 685 686 /* just for consistency */ 687 spin_lock(&res->spinlock); 688 dlm_set_lockres_owner(dlm, res, DLM_LOCK_RES_OWNER_UNKNOWN); 689 spin_unlock(&res->spinlock); 690 691 res->state = DLM_LOCK_RES_IN_PROGRESS; 692 693 res->last_used = 0; 694 695 memset(res->lvb, 0, DLM_LVB_LEN); 696 memset(res->refmap, 0, sizeof(res->refmap)); 697 } 698 699 struct dlm_lock_resource *dlm_new_lockres(struct dlm_ctxt *dlm, 700 const char *name, 701 unsigned int namelen) 702 { 703 struct dlm_lock_resource *res; 704 705 res = kmalloc(sizeof(struct dlm_lock_resource), GFP_NOFS); 706 if (!res) 707 return NULL; 708 709 res->lockname.name = kmalloc(namelen, GFP_NOFS); 710 if (!res->lockname.name) { 711 kfree(res); 712 return NULL; 713 } 714 715 dlm_init_lockres(dlm, res, name, namelen); 716 return res; 717 } 718 719 void __dlm_lockres_grab_inflight_ref(struct dlm_ctxt *dlm, 720 struct dlm_lock_resource *res, 721 int new_lockres, 722 const char *file, 723 int line) 724 { 725 if (!new_lockres) 726 assert_spin_locked(&res->spinlock); 727 728 if (!test_bit(dlm->node_num, res->refmap)) { 729 BUG_ON(res->inflight_locks != 0); 730 dlm_lockres_set_refmap_bit(dlm->node_num, res); 731 } 732 res->inflight_locks++; 733 mlog(0, "%s:%.*s: inflight++: now %u\n", 734 dlm->name, res->lockname.len, res->lockname.name, 735 res->inflight_locks); 736 } 737 738 void __dlm_lockres_drop_inflight_ref(struct dlm_ctxt *dlm, 739 struct dlm_lock_resource *res, 740 const char *file, 741 int line) 742 { 743 assert_spin_locked(&res->spinlock); 744 745 BUG_ON(res->inflight_locks == 0); 746 res->inflight_locks--; 747 mlog(0, "%s:%.*s: inflight--: now %u\n", 748 dlm->name, res->lockname.len, res->lockname.name, 749 res->inflight_locks); 750 if (res->inflight_locks == 0) 751 dlm_lockres_clear_refmap_bit(dlm->node_num, res); 752 wake_up(&res->wq); 753 } 754 755 /* 756 * lookup a lock resource by name. 757 * may already exist in the hashtable. 758 * lockid is null terminated 759 * 760 * if not, allocate enough for the lockres and for 761 * the temporary structure used in doing the mastering. 762 * 763 * also, do a lookup in the dlm->master_list to see 764 * if another node has begun mastering the same lock. 765 * if so, there should be a block entry in there 766 * for this name, and we should *not* attempt to master 767 * the lock here. need to wait around for that node 768 * to assert_master (or die). 769 * 770 */ 771 struct dlm_lock_resource * dlm_get_lock_resource(struct dlm_ctxt *dlm, 772 const char *lockid, 773 int namelen, 774 int flags) 775 { 776 struct dlm_lock_resource *tmpres=NULL, *res=NULL; 777 struct dlm_master_list_entry *mle = NULL; 778 struct dlm_master_list_entry *alloc_mle = NULL; 779 int blocked = 0; 780 int ret, nodenum; 781 struct dlm_node_iter iter; 782 unsigned int hash; 783 int tries = 0; 784 int bit, wait_on_recovery = 0; 785 int drop_inflight_if_nonlocal = 0; 786 787 BUG_ON(!lockid); 788 789 hash = dlm_lockid_hash(lockid, namelen); 790 791 mlog(0, "get lockres %s (len %d)\n", lockid, namelen); 792 793 lookup: 794 spin_lock(&dlm->spinlock); 795 tmpres = __dlm_lookup_lockres_full(dlm, lockid, namelen, hash); 796 if (tmpres) { 797 int dropping_ref = 0; 798 799 spin_lock(&tmpres->spinlock); 800 if (tmpres->owner == dlm->node_num) { 801 BUG_ON(tmpres->state & DLM_LOCK_RES_DROPPING_REF); 802 dlm_lockres_grab_inflight_ref(dlm, tmpres); 803 } else if (tmpres->state & DLM_LOCK_RES_DROPPING_REF) 804 dropping_ref = 1; 805 spin_unlock(&tmpres->spinlock); 806 spin_unlock(&dlm->spinlock); 807 808 /* wait until done messaging the master, drop our ref to allow 809 * the lockres to be purged, start over. */ 810 if (dropping_ref) { 811 spin_lock(&tmpres->spinlock); 812 __dlm_wait_on_lockres_flags(tmpres, DLM_LOCK_RES_DROPPING_REF); 813 spin_unlock(&tmpres->spinlock); 814 dlm_lockres_put(tmpres); 815 tmpres = NULL; 816 goto lookup; 817 } 818 819 mlog(0, "found in hash!\n"); 820 if (res) 821 dlm_lockres_put(res); 822 res = tmpres; 823 goto leave; 824 } 825 826 if (!res) { 827 spin_unlock(&dlm->spinlock); 828 mlog(0, "allocating a new resource\n"); 829 /* nothing found and we need to allocate one. */ 830 alloc_mle = (struct dlm_master_list_entry *) 831 kmem_cache_alloc(dlm_mle_cache, GFP_NOFS); 832 if (!alloc_mle) 833 goto leave; 834 res = dlm_new_lockres(dlm, lockid, namelen); 835 if (!res) 836 goto leave; 837 goto lookup; 838 } 839 840 mlog(0, "no lockres found, allocated our own: %p\n", res); 841 842 if (flags & LKM_LOCAL) { 843 /* caller knows it's safe to assume it's not mastered elsewhere 844 * DONE! return right away */ 845 spin_lock(&res->spinlock); 846 dlm_change_lockres_owner(dlm, res, dlm->node_num); 847 __dlm_insert_lockres(dlm, res); 848 dlm_lockres_grab_inflight_ref(dlm, res); 849 spin_unlock(&res->spinlock); 850 spin_unlock(&dlm->spinlock); 851 /* lockres still marked IN_PROGRESS */ 852 goto wake_waiters; 853 } 854 855 /* check master list to see if another node has started mastering it */ 856 spin_lock(&dlm->master_lock); 857 858 /* if we found a block, wait for lock to be mastered by another node */ 859 blocked = dlm_find_mle(dlm, &mle, (char *)lockid, namelen); 860 if (blocked) { 861 int mig; 862 if (mle->type == DLM_MLE_MASTER) { 863 mlog(ML_ERROR, "master entry for nonexistent lock!\n"); 864 BUG(); 865 } 866 mig = (mle->type == DLM_MLE_MIGRATION); 867 /* if there is a migration in progress, let the migration 868 * finish before continuing. we can wait for the absence 869 * of the MIGRATION mle: either the migrate finished or 870 * one of the nodes died and the mle was cleaned up. 871 * if there is a BLOCK here, but it already has a master 872 * set, we are too late. the master does not have a ref 873 * for us in the refmap. detach the mle and drop it. 874 * either way, go back to the top and start over. */ 875 if (mig || mle->master != O2NM_MAX_NODES) { 876 BUG_ON(mig && mle->master == dlm->node_num); 877 /* we arrived too late. the master does not 878 * have a ref for us. retry. */ 879 mlog(0, "%s:%.*s: late on %s\n", 880 dlm->name, namelen, lockid, 881 mig ? "MIGRATION" : "BLOCK"); 882 spin_unlock(&dlm->master_lock); 883 spin_unlock(&dlm->spinlock); 884 885 /* master is known, detach */ 886 if (!mig) 887 dlm_mle_detach_hb_events(dlm, mle); 888 dlm_put_mle(mle); 889 mle = NULL; 890 /* this is lame, but we cant wait on either 891 * the mle or lockres waitqueue here */ 892 if (mig) 893 msleep(100); 894 goto lookup; 895 } 896 } else { 897 /* go ahead and try to master lock on this node */ 898 mle = alloc_mle; 899 /* make sure this does not get freed below */ 900 alloc_mle = NULL; 901 dlm_init_mle(mle, DLM_MLE_MASTER, dlm, res, NULL, 0); 902 set_bit(dlm->node_num, mle->maybe_map); 903 list_add(&mle->list, &dlm->master_list); 904 905 /* still holding the dlm spinlock, check the recovery map 906 * to see if there are any nodes that still need to be 907 * considered. these will not appear in the mle nodemap 908 * but they might own this lockres. wait on them. */ 909 bit = find_next_bit(dlm->recovery_map, O2NM_MAX_NODES, 0); 910 if (bit < O2NM_MAX_NODES) { 911 mlog(ML_NOTICE, "%s:%.*s: at least one node (%d) to " 912 "recover before lock mastery can begin\n", 913 dlm->name, namelen, (char *)lockid, bit); 914 wait_on_recovery = 1; 915 } 916 } 917 918 /* at this point there is either a DLM_MLE_BLOCK or a 919 * DLM_MLE_MASTER on the master list, so it's safe to add the 920 * lockres to the hashtable. anyone who finds the lock will 921 * still have to wait on the IN_PROGRESS. */ 922 923 /* finally add the lockres to its hash bucket */ 924 __dlm_insert_lockres(dlm, res); 925 /* since this lockres is new it doesnt not require the spinlock */ 926 dlm_lockres_grab_inflight_ref_new(dlm, res); 927 928 /* if this node does not become the master make sure to drop 929 * this inflight reference below */ 930 drop_inflight_if_nonlocal = 1; 931 932 /* get an extra ref on the mle in case this is a BLOCK 933 * if so, the creator of the BLOCK may try to put the last 934 * ref at this time in the assert master handler, so we 935 * need an extra one to keep from a bad ptr deref. */ 936 dlm_get_mle_inuse(mle); 937 spin_unlock(&dlm->master_lock); 938 spin_unlock(&dlm->spinlock); 939 940 redo_request: 941 while (wait_on_recovery) { 942 /* any cluster changes that occurred after dropping the 943 * dlm spinlock would be detectable be a change on the mle, 944 * so we only need to clear out the recovery map once. */ 945 if (dlm_is_recovery_lock(lockid, namelen)) { 946 mlog(ML_NOTICE, "%s: recovery map is not empty, but " 947 "must master $RECOVERY lock now\n", dlm->name); 948 if (!dlm_pre_master_reco_lockres(dlm, res)) 949 wait_on_recovery = 0; 950 else { 951 mlog(0, "%s: waiting 500ms for heartbeat state " 952 "change\n", dlm->name); 953 msleep(500); 954 } 955 continue; 956 } 957 958 dlm_kick_recovery_thread(dlm); 959 msleep(1000); 960 dlm_wait_for_recovery(dlm); 961 962 spin_lock(&dlm->spinlock); 963 bit = find_next_bit(dlm->recovery_map, O2NM_MAX_NODES, 0); 964 if (bit < O2NM_MAX_NODES) { 965 mlog(ML_NOTICE, "%s:%.*s: at least one node (%d) to " 966 "recover before lock mastery can begin\n", 967 dlm->name, namelen, (char *)lockid, bit); 968 wait_on_recovery = 1; 969 } else 970 wait_on_recovery = 0; 971 spin_unlock(&dlm->spinlock); 972 973 if (wait_on_recovery) 974 dlm_wait_for_node_recovery(dlm, bit, 10000); 975 } 976 977 /* must wait for lock to be mastered elsewhere */ 978 if (blocked) 979 goto wait; 980 981 ret = -EINVAL; 982 dlm_node_iter_init(mle->vote_map, &iter); 983 while ((nodenum = dlm_node_iter_next(&iter)) >= 0) { 984 ret = dlm_do_master_request(res, mle, nodenum); 985 if (ret < 0) 986 mlog_errno(ret); 987 if (mle->master != O2NM_MAX_NODES) { 988 /* found a master ! */ 989 if (mle->master <= nodenum) 990 break; 991 /* if our master request has not reached the master 992 * yet, keep going until it does. this is how the 993 * master will know that asserts are needed back to 994 * the lower nodes. */ 995 mlog(0, "%s:%.*s: requests only up to %u but master " 996 "is %u, keep going\n", dlm->name, namelen, 997 lockid, nodenum, mle->master); 998 } 999 } 1000 1001 wait: 1002 /* keep going until the response map includes all nodes */ 1003 ret = dlm_wait_for_lock_mastery(dlm, res, mle, &blocked); 1004 if (ret < 0) { 1005 wait_on_recovery = 1; 1006 mlog(0, "%s:%.*s: node map changed, redo the " 1007 "master request now, blocked=%d\n", 1008 dlm->name, res->lockname.len, 1009 res->lockname.name, blocked); 1010 if (++tries > 20) { 1011 mlog(ML_ERROR, "%s:%.*s: spinning on " 1012 "dlm_wait_for_lock_mastery, blocked=%d\n", 1013 dlm->name, res->lockname.len, 1014 res->lockname.name, blocked); 1015 dlm_print_one_lock_resource(res); 1016 dlm_print_one_mle(mle); 1017 tries = 0; 1018 } 1019 goto redo_request; 1020 } 1021 1022 mlog(0, "lockres mastered by %u\n", res->owner); 1023 /* make sure we never continue without this */ 1024 BUG_ON(res->owner == O2NM_MAX_NODES); 1025 1026 /* master is known, detach if not already detached */ 1027 dlm_mle_detach_hb_events(dlm, mle); 1028 dlm_put_mle(mle); 1029 /* put the extra ref */ 1030 dlm_put_mle_inuse(mle); 1031 1032 wake_waiters: 1033 spin_lock(&res->spinlock); 1034 if (res->owner != dlm->node_num && drop_inflight_if_nonlocal) 1035 dlm_lockres_drop_inflight_ref(dlm, res); 1036 res->state &= ~DLM_LOCK_RES_IN_PROGRESS; 1037 spin_unlock(&res->spinlock); 1038 wake_up(&res->wq); 1039 1040 leave: 1041 /* need to free the unused mle */ 1042 if (alloc_mle) 1043 kmem_cache_free(dlm_mle_cache, alloc_mle); 1044 1045 return res; 1046 } 1047 1048 1049 #define DLM_MASTERY_TIMEOUT_MS 5000 1050 1051 static int dlm_wait_for_lock_mastery(struct dlm_ctxt *dlm, 1052 struct dlm_lock_resource *res, 1053 struct dlm_master_list_entry *mle, 1054 int *blocked) 1055 { 1056 u8 m; 1057 int ret, bit; 1058 int map_changed, voting_done; 1059 int assert, sleep; 1060 1061 recheck: 1062 ret = 0; 1063 assert = 0; 1064 1065 /* check if another node has already become the owner */ 1066 spin_lock(&res->spinlock); 1067 if (res->owner != DLM_LOCK_RES_OWNER_UNKNOWN) { 1068 mlog(0, "%s:%.*s: owner is suddenly %u\n", dlm->name, 1069 res->lockname.len, res->lockname.name, res->owner); 1070 spin_unlock(&res->spinlock); 1071 /* this will cause the master to re-assert across 1072 * the whole cluster, freeing up mles */ 1073 if (res->owner != dlm->node_num) { 1074 ret = dlm_do_master_request(res, mle, res->owner); 1075 if (ret < 0) { 1076 /* give recovery a chance to run */ 1077 mlog(ML_ERROR, "link to %u went down?: %d\n", res->owner, ret); 1078 msleep(500); 1079 goto recheck; 1080 } 1081 } 1082 ret = 0; 1083 goto leave; 1084 } 1085 spin_unlock(&res->spinlock); 1086 1087 spin_lock(&mle->spinlock); 1088 m = mle->master; 1089 map_changed = (memcmp(mle->vote_map, mle->node_map, 1090 sizeof(mle->vote_map)) != 0); 1091 voting_done = (memcmp(mle->vote_map, mle->response_map, 1092 sizeof(mle->vote_map)) == 0); 1093 1094 /* restart if we hit any errors */ 1095 if (map_changed) { 1096 int b; 1097 mlog(0, "%s: %.*s: node map changed, restarting\n", 1098 dlm->name, res->lockname.len, res->lockname.name); 1099 ret = dlm_restart_lock_mastery(dlm, res, mle, *blocked); 1100 b = (mle->type == DLM_MLE_BLOCK); 1101 if ((*blocked && !b) || (!*blocked && b)) { 1102 mlog(0, "%s:%.*s: status change: old=%d new=%d\n", 1103 dlm->name, res->lockname.len, res->lockname.name, 1104 *blocked, b); 1105 *blocked = b; 1106 } 1107 spin_unlock(&mle->spinlock); 1108 if (ret < 0) { 1109 mlog_errno(ret); 1110 goto leave; 1111 } 1112 mlog(0, "%s:%.*s: restart lock mastery succeeded, " 1113 "rechecking now\n", dlm->name, res->lockname.len, 1114 res->lockname.name); 1115 goto recheck; 1116 } else { 1117 if (!voting_done) { 1118 mlog(0, "map not changed and voting not done " 1119 "for %s:%.*s\n", dlm->name, res->lockname.len, 1120 res->lockname.name); 1121 } 1122 } 1123 1124 if (m != O2NM_MAX_NODES) { 1125 /* another node has done an assert! 1126 * all done! */ 1127 sleep = 0; 1128 } else { 1129 sleep = 1; 1130 /* have all nodes responded? */ 1131 if (voting_done && !*blocked) { 1132 bit = find_next_bit(mle->maybe_map, O2NM_MAX_NODES, 0); 1133 if (dlm->node_num <= bit) { 1134 /* my node number is lowest. 1135 * now tell other nodes that I am 1136 * mastering this. */ 1137 mle->master = dlm->node_num; 1138 /* ref was grabbed in get_lock_resource 1139 * will be dropped in dlmlock_master */ 1140 assert = 1; 1141 sleep = 0; 1142 } 1143 /* if voting is done, but we have not received 1144 * an assert master yet, we must sleep */ 1145 } 1146 } 1147 1148 spin_unlock(&mle->spinlock); 1149 1150 /* sleep if we haven't finished voting yet */ 1151 if (sleep) { 1152 unsigned long timeo = msecs_to_jiffies(DLM_MASTERY_TIMEOUT_MS); 1153 1154 /* 1155 if (atomic_read(&mle->mle_refs.refcount) < 2) 1156 mlog(ML_ERROR, "mle (%p) refs=%d, name=%.*s\n", mle, 1157 atomic_read(&mle->mle_refs.refcount), 1158 res->lockname.len, res->lockname.name); 1159 */ 1160 atomic_set(&mle->woken, 0); 1161 (void)wait_event_timeout(mle->wq, 1162 (atomic_read(&mle->woken) == 1), 1163 timeo); 1164 if (res->owner == O2NM_MAX_NODES) { 1165 mlog(0, "%s:%.*s: waiting again\n", dlm->name, 1166 res->lockname.len, res->lockname.name); 1167 goto recheck; 1168 } 1169 mlog(0, "done waiting, master is %u\n", res->owner); 1170 ret = 0; 1171 goto leave; 1172 } 1173 1174 ret = 0; /* done */ 1175 if (assert) { 1176 m = dlm->node_num; 1177 mlog(0, "about to master %.*s here, this=%u\n", 1178 res->lockname.len, res->lockname.name, m); 1179 ret = dlm_do_assert_master(dlm, res, mle->vote_map, 0); 1180 if (ret) { 1181 /* This is a failure in the network path, 1182 * not in the response to the assert_master 1183 * (any nonzero response is a BUG on this node). 1184 * Most likely a socket just got disconnected 1185 * due to node death. */ 1186 mlog_errno(ret); 1187 } 1188 /* no longer need to restart lock mastery. 1189 * all living nodes have been contacted. */ 1190 ret = 0; 1191 } 1192 1193 /* set the lockres owner */ 1194 spin_lock(&res->spinlock); 1195 /* mastery reference obtained either during 1196 * assert_master_handler or in get_lock_resource */ 1197 dlm_change_lockres_owner(dlm, res, m); 1198 spin_unlock(&res->spinlock); 1199 1200 leave: 1201 return ret; 1202 } 1203 1204 struct dlm_bitmap_diff_iter 1205 { 1206 int curnode; 1207 unsigned long *orig_bm; 1208 unsigned long *cur_bm; 1209 unsigned long diff_bm[BITS_TO_LONGS(O2NM_MAX_NODES)]; 1210 }; 1211 1212 enum dlm_node_state_change 1213 { 1214 NODE_DOWN = -1, 1215 NODE_NO_CHANGE = 0, 1216 NODE_UP 1217 }; 1218 1219 static void dlm_bitmap_diff_iter_init(struct dlm_bitmap_diff_iter *iter, 1220 unsigned long *orig_bm, 1221 unsigned long *cur_bm) 1222 { 1223 unsigned long p1, p2; 1224 int i; 1225 1226 iter->curnode = -1; 1227 iter->orig_bm = orig_bm; 1228 iter->cur_bm = cur_bm; 1229 1230 for (i = 0; i < BITS_TO_LONGS(O2NM_MAX_NODES); i++) { 1231 p1 = *(iter->orig_bm + i); 1232 p2 = *(iter->cur_bm + i); 1233 iter->diff_bm[i] = (p1 & ~p2) | (p2 & ~p1); 1234 } 1235 } 1236 1237 static int dlm_bitmap_diff_iter_next(struct dlm_bitmap_diff_iter *iter, 1238 enum dlm_node_state_change *state) 1239 { 1240 int bit; 1241 1242 if (iter->curnode >= O2NM_MAX_NODES) 1243 return -ENOENT; 1244 1245 bit = find_next_bit(iter->diff_bm, O2NM_MAX_NODES, 1246 iter->curnode+1); 1247 if (bit >= O2NM_MAX_NODES) { 1248 iter->curnode = O2NM_MAX_NODES; 1249 return -ENOENT; 1250 } 1251 1252 /* if it was there in the original then this node died */ 1253 if (test_bit(bit, iter->orig_bm)) 1254 *state = NODE_DOWN; 1255 else 1256 *state = NODE_UP; 1257 1258 iter->curnode = bit; 1259 return bit; 1260 } 1261 1262 1263 static int dlm_restart_lock_mastery(struct dlm_ctxt *dlm, 1264 struct dlm_lock_resource *res, 1265 struct dlm_master_list_entry *mle, 1266 int blocked) 1267 { 1268 struct dlm_bitmap_diff_iter bdi; 1269 enum dlm_node_state_change sc; 1270 int node; 1271 int ret = 0; 1272 1273 mlog(0, "something happened such that the " 1274 "master process may need to be restarted!\n"); 1275 1276 assert_spin_locked(&mle->spinlock); 1277 1278 dlm_bitmap_diff_iter_init(&bdi, mle->vote_map, mle->node_map); 1279 node = dlm_bitmap_diff_iter_next(&bdi, &sc); 1280 while (node >= 0) { 1281 if (sc == NODE_UP) { 1282 /* a node came up. clear any old vote from 1283 * the response map and set it in the vote map 1284 * then restart the mastery. */ 1285 mlog(ML_NOTICE, "node %d up while restarting\n", node); 1286 1287 /* redo the master request, but only for the new node */ 1288 mlog(0, "sending request to new node\n"); 1289 clear_bit(node, mle->response_map); 1290 set_bit(node, mle->vote_map); 1291 } else { 1292 mlog(ML_ERROR, "node down! %d\n", node); 1293 if (blocked) { 1294 int lowest = find_next_bit(mle->maybe_map, 1295 O2NM_MAX_NODES, 0); 1296 1297 /* act like it was never there */ 1298 clear_bit(node, mle->maybe_map); 1299 1300 if (node == lowest) { 1301 mlog(0, "expected master %u died" 1302 " while this node was blocked " 1303 "waiting on it!\n", node); 1304 lowest = find_next_bit(mle->maybe_map, 1305 O2NM_MAX_NODES, 1306 lowest+1); 1307 if (lowest < O2NM_MAX_NODES) { 1308 mlog(0, "%s:%.*s:still " 1309 "blocked. waiting on %u " 1310 "now\n", dlm->name, 1311 res->lockname.len, 1312 res->lockname.name, 1313 lowest); 1314 } else { 1315 /* mle is an MLE_BLOCK, but 1316 * there is now nothing left to 1317 * block on. we need to return 1318 * all the way back out and try 1319 * again with an MLE_MASTER. 1320 * dlm_do_local_recovery_cleanup 1321 * has already run, so the mle 1322 * refcount is ok */ 1323 mlog(0, "%s:%.*s: no " 1324 "longer blocking. try to " 1325 "master this here\n", 1326 dlm->name, 1327 res->lockname.len, 1328 res->lockname.name); 1329 mle->type = DLM_MLE_MASTER; 1330 mle->u.res = res; 1331 } 1332 } 1333 } 1334 1335 /* now blank out everything, as if we had never 1336 * contacted anyone */ 1337 memset(mle->maybe_map, 0, sizeof(mle->maybe_map)); 1338 memset(mle->response_map, 0, sizeof(mle->response_map)); 1339 /* reset the vote_map to the current node_map */ 1340 memcpy(mle->vote_map, mle->node_map, 1341 sizeof(mle->node_map)); 1342 /* put myself into the maybe map */ 1343 if (mle->type != DLM_MLE_BLOCK) 1344 set_bit(dlm->node_num, mle->maybe_map); 1345 } 1346 ret = -EAGAIN; 1347 node = dlm_bitmap_diff_iter_next(&bdi, &sc); 1348 } 1349 return ret; 1350 } 1351 1352 1353 /* 1354 * DLM_MASTER_REQUEST_MSG 1355 * 1356 * returns: 0 on success, 1357 * -errno on a network error 1358 * 1359 * on error, the caller should assume the target node is "dead" 1360 * 1361 */ 1362 1363 static int dlm_do_master_request(struct dlm_lock_resource *res, 1364 struct dlm_master_list_entry *mle, int to) 1365 { 1366 struct dlm_ctxt *dlm = mle->dlm; 1367 struct dlm_master_request request; 1368 int ret, response=0, resend; 1369 1370 memset(&request, 0, sizeof(request)); 1371 request.node_idx = dlm->node_num; 1372 1373 BUG_ON(mle->type == DLM_MLE_MIGRATION); 1374 1375 if (mle->type != DLM_MLE_MASTER) { 1376 request.namelen = mle->u.name.len; 1377 memcpy(request.name, mle->u.name.name, request.namelen); 1378 } else { 1379 request.namelen = mle->u.res->lockname.len; 1380 memcpy(request.name, mle->u.res->lockname.name, 1381 request.namelen); 1382 } 1383 1384 again: 1385 ret = o2net_send_message(DLM_MASTER_REQUEST_MSG, dlm->key, &request, 1386 sizeof(request), to, &response); 1387 if (ret < 0) { 1388 if (ret == -ESRCH) { 1389 /* should never happen */ 1390 mlog(ML_ERROR, "TCP stack not ready!\n"); 1391 BUG(); 1392 } else if (ret == -EINVAL) { 1393 mlog(ML_ERROR, "bad args passed to o2net!\n"); 1394 BUG(); 1395 } else if (ret == -ENOMEM) { 1396 mlog(ML_ERROR, "out of memory while trying to send " 1397 "network message! retrying\n"); 1398 /* this is totally crude */ 1399 msleep(50); 1400 goto again; 1401 } else if (!dlm_is_host_down(ret)) { 1402 /* not a network error. bad. */ 1403 mlog_errno(ret); 1404 mlog(ML_ERROR, "unhandled error!"); 1405 BUG(); 1406 } 1407 /* all other errors should be network errors, 1408 * and likely indicate node death */ 1409 mlog(ML_ERROR, "link to %d went down!\n", to); 1410 goto out; 1411 } 1412 1413 ret = 0; 1414 resend = 0; 1415 spin_lock(&mle->spinlock); 1416 switch (response) { 1417 case DLM_MASTER_RESP_YES: 1418 set_bit(to, mle->response_map); 1419 mlog(0, "node %u is the master, response=YES\n", to); 1420 mlog(0, "%s:%.*s: master node %u now knows I have a " 1421 "reference\n", dlm->name, res->lockname.len, 1422 res->lockname.name, to); 1423 mle->master = to; 1424 break; 1425 case DLM_MASTER_RESP_NO: 1426 mlog(0, "node %u not master, response=NO\n", to); 1427 set_bit(to, mle->response_map); 1428 break; 1429 case DLM_MASTER_RESP_MAYBE: 1430 mlog(0, "node %u not master, response=MAYBE\n", to); 1431 set_bit(to, mle->response_map); 1432 set_bit(to, mle->maybe_map); 1433 break; 1434 case DLM_MASTER_RESP_ERROR: 1435 mlog(0, "node %u hit an error, resending\n", to); 1436 resend = 1; 1437 response = 0; 1438 break; 1439 default: 1440 mlog(ML_ERROR, "bad response! %u\n", response); 1441 BUG(); 1442 } 1443 spin_unlock(&mle->spinlock); 1444 if (resend) { 1445 /* this is also totally crude */ 1446 msleep(50); 1447 goto again; 1448 } 1449 1450 out: 1451 return ret; 1452 } 1453 1454 /* 1455 * locks that can be taken here: 1456 * dlm->spinlock 1457 * res->spinlock 1458 * mle->spinlock 1459 * dlm->master_list 1460 * 1461 * if possible, TRIM THIS DOWN!!! 1462 */ 1463 int dlm_master_request_handler(struct o2net_msg *msg, u32 len, void *data, 1464 void **ret_data) 1465 { 1466 u8 response = DLM_MASTER_RESP_MAYBE; 1467 struct dlm_ctxt *dlm = data; 1468 struct dlm_lock_resource *res = NULL; 1469 struct dlm_master_request *request = (struct dlm_master_request *) msg->buf; 1470 struct dlm_master_list_entry *mle = NULL, *tmpmle = NULL; 1471 char *name; 1472 unsigned int namelen, hash; 1473 int found, ret; 1474 int set_maybe; 1475 int dispatch_assert = 0; 1476 1477 if (!dlm_grab(dlm)) 1478 return DLM_MASTER_RESP_NO; 1479 1480 if (!dlm_domain_fully_joined(dlm)) { 1481 response = DLM_MASTER_RESP_NO; 1482 goto send_response; 1483 } 1484 1485 name = request->name; 1486 namelen = request->namelen; 1487 hash = dlm_lockid_hash(name, namelen); 1488 1489 if (namelen > DLM_LOCKID_NAME_MAX) { 1490 response = DLM_IVBUFLEN; 1491 goto send_response; 1492 } 1493 1494 way_up_top: 1495 spin_lock(&dlm->spinlock); 1496 res = __dlm_lookup_lockres(dlm, name, namelen, hash); 1497 if (res) { 1498 spin_unlock(&dlm->spinlock); 1499 1500 /* take care of the easy cases up front */ 1501 spin_lock(&res->spinlock); 1502 if (res->state & (DLM_LOCK_RES_RECOVERING| 1503 DLM_LOCK_RES_MIGRATING)) { 1504 spin_unlock(&res->spinlock); 1505 mlog(0, "returning DLM_MASTER_RESP_ERROR since res is " 1506 "being recovered/migrated\n"); 1507 response = DLM_MASTER_RESP_ERROR; 1508 if (mle) 1509 kmem_cache_free(dlm_mle_cache, mle); 1510 goto send_response; 1511 } 1512 1513 if (res->owner == dlm->node_num) { 1514 mlog(0, "%s:%.*s: setting bit %u in refmap\n", 1515 dlm->name, namelen, name, request->node_idx); 1516 dlm_lockres_set_refmap_bit(request->node_idx, res); 1517 spin_unlock(&res->spinlock); 1518 response = DLM_MASTER_RESP_YES; 1519 if (mle) 1520 kmem_cache_free(dlm_mle_cache, mle); 1521 1522 /* this node is the owner. 1523 * there is some extra work that needs to 1524 * happen now. the requesting node has 1525 * caused all nodes up to this one to 1526 * create mles. this node now needs to 1527 * go back and clean those up. */ 1528 dispatch_assert = 1; 1529 goto send_response; 1530 } else if (res->owner != DLM_LOCK_RES_OWNER_UNKNOWN) { 1531 spin_unlock(&res->spinlock); 1532 // mlog(0, "node %u is the master\n", res->owner); 1533 response = DLM_MASTER_RESP_NO; 1534 if (mle) 1535 kmem_cache_free(dlm_mle_cache, mle); 1536 goto send_response; 1537 } 1538 1539 /* ok, there is no owner. either this node is 1540 * being blocked, or it is actively trying to 1541 * master this lock. */ 1542 if (!(res->state & DLM_LOCK_RES_IN_PROGRESS)) { 1543 mlog(ML_ERROR, "lock with no owner should be " 1544 "in-progress!\n"); 1545 BUG(); 1546 } 1547 1548 // mlog(0, "lockres is in progress...\n"); 1549 spin_lock(&dlm->master_lock); 1550 found = dlm_find_mle(dlm, &tmpmle, name, namelen); 1551 if (!found) { 1552 mlog(ML_ERROR, "no mle found for this lock!\n"); 1553 BUG(); 1554 } 1555 set_maybe = 1; 1556 spin_lock(&tmpmle->spinlock); 1557 if (tmpmle->type == DLM_MLE_BLOCK) { 1558 // mlog(0, "this node is waiting for " 1559 // "lockres to be mastered\n"); 1560 response = DLM_MASTER_RESP_NO; 1561 } else if (tmpmle->type == DLM_MLE_MIGRATION) { 1562 mlog(0, "node %u is master, but trying to migrate to " 1563 "node %u.\n", tmpmle->master, tmpmle->new_master); 1564 if (tmpmle->master == dlm->node_num) { 1565 mlog(ML_ERROR, "no owner on lockres, but this " 1566 "node is trying to migrate it to %u?!\n", 1567 tmpmle->new_master); 1568 BUG(); 1569 } else { 1570 /* the real master can respond on its own */ 1571 response = DLM_MASTER_RESP_NO; 1572 } 1573 } else if (tmpmle->master != DLM_LOCK_RES_OWNER_UNKNOWN) { 1574 set_maybe = 0; 1575 if (tmpmle->master == dlm->node_num) { 1576 response = DLM_MASTER_RESP_YES; 1577 /* this node will be the owner. 1578 * go back and clean the mles on any 1579 * other nodes */ 1580 dispatch_assert = 1; 1581 dlm_lockres_set_refmap_bit(request->node_idx, res); 1582 mlog(0, "%s:%.*s: setting bit %u in refmap\n", 1583 dlm->name, namelen, name, 1584 request->node_idx); 1585 } else 1586 response = DLM_MASTER_RESP_NO; 1587 } else { 1588 // mlog(0, "this node is attempting to " 1589 // "master lockres\n"); 1590 response = DLM_MASTER_RESP_MAYBE; 1591 } 1592 if (set_maybe) 1593 set_bit(request->node_idx, tmpmle->maybe_map); 1594 spin_unlock(&tmpmle->spinlock); 1595 1596 spin_unlock(&dlm->master_lock); 1597 spin_unlock(&res->spinlock); 1598 1599 /* keep the mle attached to heartbeat events */ 1600 dlm_put_mle(tmpmle); 1601 if (mle) 1602 kmem_cache_free(dlm_mle_cache, mle); 1603 goto send_response; 1604 } 1605 1606 /* 1607 * lockres doesn't exist on this node 1608 * if there is an MLE_BLOCK, return NO 1609 * if there is an MLE_MASTER, return MAYBE 1610 * otherwise, add an MLE_BLOCK, return NO 1611 */ 1612 spin_lock(&dlm->master_lock); 1613 found = dlm_find_mle(dlm, &tmpmle, name, namelen); 1614 if (!found) { 1615 /* this lockid has never been seen on this node yet */ 1616 // mlog(0, "no mle found\n"); 1617 if (!mle) { 1618 spin_unlock(&dlm->master_lock); 1619 spin_unlock(&dlm->spinlock); 1620 1621 mle = (struct dlm_master_list_entry *) 1622 kmem_cache_alloc(dlm_mle_cache, GFP_NOFS); 1623 if (!mle) { 1624 response = DLM_MASTER_RESP_ERROR; 1625 mlog_errno(-ENOMEM); 1626 goto send_response; 1627 } 1628 goto way_up_top; 1629 } 1630 1631 // mlog(0, "this is second time thru, already allocated, " 1632 // "add the block.\n"); 1633 dlm_init_mle(mle, DLM_MLE_BLOCK, dlm, NULL, name, namelen); 1634 set_bit(request->node_idx, mle->maybe_map); 1635 list_add(&mle->list, &dlm->master_list); 1636 response = DLM_MASTER_RESP_NO; 1637 } else { 1638 // mlog(0, "mle was found\n"); 1639 set_maybe = 1; 1640 spin_lock(&tmpmle->spinlock); 1641 if (tmpmle->master == dlm->node_num) { 1642 mlog(ML_ERROR, "no lockres, but an mle with this node as master!\n"); 1643 BUG(); 1644 } 1645 if (tmpmle->type == DLM_MLE_BLOCK) 1646 response = DLM_MASTER_RESP_NO; 1647 else if (tmpmle->type == DLM_MLE_MIGRATION) { 1648 mlog(0, "migration mle was found (%u->%u)\n", 1649 tmpmle->master, tmpmle->new_master); 1650 /* real master can respond on its own */ 1651 response = DLM_MASTER_RESP_NO; 1652 } else 1653 response = DLM_MASTER_RESP_MAYBE; 1654 if (set_maybe) 1655 set_bit(request->node_idx, tmpmle->maybe_map); 1656 spin_unlock(&tmpmle->spinlock); 1657 } 1658 spin_unlock(&dlm->master_lock); 1659 spin_unlock(&dlm->spinlock); 1660 1661 if (found) { 1662 /* keep the mle attached to heartbeat events */ 1663 dlm_put_mle(tmpmle); 1664 } 1665 send_response: 1666 /* 1667 * __dlm_lookup_lockres() grabbed a reference to this lockres. 1668 * The reference is released by dlm_assert_master_worker() under 1669 * the call to dlm_dispatch_assert_master(). If 1670 * dlm_assert_master_worker() isn't called, we drop it here. 1671 */ 1672 if (dispatch_assert) { 1673 if (response != DLM_MASTER_RESP_YES) 1674 mlog(ML_ERROR, "invalid response %d\n", response); 1675 if (!res) { 1676 mlog(ML_ERROR, "bad lockres while trying to assert!\n"); 1677 BUG(); 1678 } 1679 mlog(0, "%u is the owner of %.*s, cleaning everyone else\n", 1680 dlm->node_num, res->lockname.len, res->lockname.name); 1681 ret = dlm_dispatch_assert_master(dlm, res, 0, request->node_idx, 1682 DLM_ASSERT_MASTER_MLE_CLEANUP); 1683 if (ret < 0) { 1684 mlog(ML_ERROR, "failed to dispatch assert master work\n"); 1685 response = DLM_MASTER_RESP_ERROR; 1686 dlm_lockres_put(res); 1687 } 1688 } else { 1689 if (res) 1690 dlm_lockres_put(res); 1691 } 1692 1693 dlm_put(dlm); 1694 return response; 1695 } 1696 1697 /* 1698 * DLM_ASSERT_MASTER_MSG 1699 */ 1700 1701 1702 /* 1703 * NOTE: this can be used for debugging 1704 * can periodically run all locks owned by this node 1705 * and re-assert across the cluster... 1706 */ 1707 static int dlm_do_assert_master(struct dlm_ctxt *dlm, 1708 struct dlm_lock_resource *res, 1709 void *nodemap, u32 flags) 1710 { 1711 struct dlm_assert_master assert; 1712 int to, tmpret; 1713 struct dlm_node_iter iter; 1714 int ret = 0; 1715 int reassert; 1716 const char *lockname = res->lockname.name; 1717 unsigned int namelen = res->lockname.len; 1718 1719 BUG_ON(namelen > O2NM_MAX_NAME_LEN); 1720 1721 spin_lock(&res->spinlock); 1722 res->state |= DLM_LOCK_RES_SETREF_INPROG; 1723 spin_unlock(&res->spinlock); 1724 1725 again: 1726 reassert = 0; 1727 1728 /* note that if this nodemap is empty, it returns 0 */ 1729 dlm_node_iter_init(nodemap, &iter); 1730 while ((to = dlm_node_iter_next(&iter)) >= 0) { 1731 int r = 0; 1732 struct dlm_master_list_entry *mle = NULL; 1733 1734 mlog(0, "sending assert master to %d (%.*s)\n", to, 1735 namelen, lockname); 1736 memset(&assert, 0, sizeof(assert)); 1737 assert.node_idx = dlm->node_num; 1738 assert.namelen = namelen; 1739 memcpy(assert.name, lockname, namelen); 1740 assert.flags = cpu_to_be32(flags); 1741 1742 tmpret = o2net_send_message(DLM_ASSERT_MASTER_MSG, dlm->key, 1743 &assert, sizeof(assert), to, &r); 1744 if (tmpret < 0) { 1745 mlog(0, "assert_master returned %d!\n", tmpret); 1746 if (!dlm_is_host_down(tmpret)) { 1747 mlog(ML_ERROR, "unhandled error=%d!\n", tmpret); 1748 BUG(); 1749 } 1750 /* a node died. finish out the rest of the nodes. */ 1751 mlog(0, "link to %d went down!\n", to); 1752 /* any nonzero status return will do */ 1753 ret = tmpret; 1754 r = 0; 1755 } else if (r < 0) { 1756 /* ok, something horribly messed. kill thyself. */ 1757 mlog(ML_ERROR,"during assert master of %.*s to %u, " 1758 "got %d.\n", namelen, lockname, to, r); 1759 spin_lock(&dlm->spinlock); 1760 spin_lock(&dlm->master_lock); 1761 if (dlm_find_mle(dlm, &mle, (char *)lockname, 1762 namelen)) { 1763 dlm_print_one_mle(mle); 1764 __dlm_put_mle(mle); 1765 } 1766 spin_unlock(&dlm->master_lock); 1767 spin_unlock(&dlm->spinlock); 1768 BUG(); 1769 } 1770 1771 if (r & DLM_ASSERT_RESPONSE_REASSERT && 1772 !(r & DLM_ASSERT_RESPONSE_MASTERY_REF)) { 1773 mlog(ML_ERROR, "%.*s: very strange, " 1774 "master MLE but no lockres on %u\n", 1775 namelen, lockname, to); 1776 } 1777 1778 if (r & DLM_ASSERT_RESPONSE_REASSERT) { 1779 mlog(0, "%.*s: node %u create mles on other " 1780 "nodes and requests a re-assert\n", 1781 namelen, lockname, to); 1782 reassert = 1; 1783 } 1784 if (r & DLM_ASSERT_RESPONSE_MASTERY_REF) { 1785 mlog(0, "%.*s: node %u has a reference to this " 1786 "lockres, set the bit in the refmap\n", 1787 namelen, lockname, to); 1788 spin_lock(&res->spinlock); 1789 dlm_lockres_set_refmap_bit(to, res); 1790 spin_unlock(&res->spinlock); 1791 } 1792 } 1793 1794 if (reassert) 1795 goto again; 1796 1797 spin_lock(&res->spinlock); 1798 res->state &= ~DLM_LOCK_RES_SETREF_INPROG; 1799 spin_unlock(&res->spinlock); 1800 wake_up(&res->wq); 1801 1802 return ret; 1803 } 1804 1805 /* 1806 * locks that can be taken here: 1807 * dlm->spinlock 1808 * res->spinlock 1809 * mle->spinlock 1810 * dlm->master_list 1811 * 1812 * if possible, TRIM THIS DOWN!!! 1813 */ 1814 int dlm_assert_master_handler(struct o2net_msg *msg, u32 len, void *data, 1815 void **ret_data) 1816 { 1817 struct dlm_ctxt *dlm = data; 1818 struct dlm_master_list_entry *mle = NULL; 1819 struct dlm_assert_master *assert = (struct dlm_assert_master *)msg->buf; 1820 struct dlm_lock_resource *res = NULL; 1821 char *name; 1822 unsigned int namelen, hash; 1823 u32 flags; 1824 int master_request = 0, have_lockres_ref = 0; 1825 int ret = 0; 1826 1827 if (!dlm_grab(dlm)) 1828 return 0; 1829 1830 name = assert->name; 1831 namelen = assert->namelen; 1832 hash = dlm_lockid_hash(name, namelen); 1833 flags = be32_to_cpu(assert->flags); 1834 1835 if (namelen > DLM_LOCKID_NAME_MAX) { 1836 mlog(ML_ERROR, "Invalid name length!"); 1837 goto done; 1838 } 1839 1840 spin_lock(&dlm->spinlock); 1841 1842 if (flags) 1843 mlog(0, "assert_master with flags: %u\n", flags); 1844 1845 /* find the MLE */ 1846 spin_lock(&dlm->master_lock); 1847 if (!dlm_find_mle(dlm, &mle, name, namelen)) { 1848 /* not an error, could be master just re-asserting */ 1849 mlog(0, "just got an assert_master from %u, but no " 1850 "MLE for it! (%.*s)\n", assert->node_idx, 1851 namelen, name); 1852 } else { 1853 int bit = find_next_bit (mle->maybe_map, O2NM_MAX_NODES, 0); 1854 if (bit >= O2NM_MAX_NODES) { 1855 /* not necessarily an error, though less likely. 1856 * could be master just re-asserting. */ 1857 mlog(0, "no bits set in the maybe_map, but %u " 1858 "is asserting! (%.*s)\n", assert->node_idx, 1859 namelen, name); 1860 } else if (bit != assert->node_idx) { 1861 if (flags & DLM_ASSERT_MASTER_MLE_CLEANUP) { 1862 mlog(0, "master %u was found, %u should " 1863 "back off\n", assert->node_idx, bit); 1864 } else { 1865 /* with the fix for bug 569, a higher node 1866 * number winning the mastery will respond 1867 * YES to mastery requests, but this node 1868 * had no way of knowing. let it pass. */ 1869 mlog(0, "%u is the lowest node, " 1870 "%u is asserting. (%.*s) %u must " 1871 "have begun after %u won.\n", bit, 1872 assert->node_idx, namelen, name, bit, 1873 assert->node_idx); 1874 } 1875 } 1876 if (mle->type == DLM_MLE_MIGRATION) { 1877 if (flags & DLM_ASSERT_MASTER_MLE_CLEANUP) { 1878 mlog(0, "%s:%.*s: got cleanup assert" 1879 " from %u for migration\n", 1880 dlm->name, namelen, name, 1881 assert->node_idx); 1882 } else if (!(flags & DLM_ASSERT_MASTER_FINISH_MIGRATION)) { 1883 mlog(0, "%s:%.*s: got unrelated assert" 1884 " from %u for migration, ignoring\n", 1885 dlm->name, namelen, name, 1886 assert->node_idx); 1887 __dlm_put_mle(mle); 1888 spin_unlock(&dlm->master_lock); 1889 spin_unlock(&dlm->spinlock); 1890 goto done; 1891 } 1892 } 1893 } 1894 spin_unlock(&dlm->master_lock); 1895 1896 /* ok everything checks out with the MLE 1897 * now check to see if there is a lockres */ 1898 res = __dlm_lookup_lockres(dlm, name, namelen, hash); 1899 if (res) { 1900 spin_lock(&res->spinlock); 1901 if (res->state & DLM_LOCK_RES_RECOVERING) { 1902 mlog(ML_ERROR, "%u asserting but %.*s is " 1903 "RECOVERING!\n", assert->node_idx, namelen, name); 1904 goto kill; 1905 } 1906 if (!mle) { 1907 if (res->owner != DLM_LOCK_RES_OWNER_UNKNOWN && 1908 res->owner != assert->node_idx) { 1909 mlog(ML_ERROR, "assert_master from " 1910 "%u, but current owner is " 1911 "%u! (%.*s)\n", 1912 assert->node_idx, res->owner, 1913 namelen, name); 1914 goto kill; 1915 } 1916 } else if (mle->type != DLM_MLE_MIGRATION) { 1917 if (res->owner != DLM_LOCK_RES_OWNER_UNKNOWN) { 1918 /* owner is just re-asserting */ 1919 if (res->owner == assert->node_idx) { 1920 mlog(0, "owner %u re-asserting on " 1921 "lock %.*s\n", assert->node_idx, 1922 namelen, name); 1923 goto ok; 1924 } 1925 mlog(ML_ERROR, "got assert_master from " 1926 "node %u, but %u is the owner! " 1927 "(%.*s)\n", assert->node_idx, 1928 res->owner, namelen, name); 1929 goto kill; 1930 } 1931 if (!(res->state & DLM_LOCK_RES_IN_PROGRESS)) { 1932 mlog(ML_ERROR, "got assert from %u, but lock " 1933 "with no owner should be " 1934 "in-progress! (%.*s)\n", 1935 assert->node_idx, 1936 namelen, name); 1937 goto kill; 1938 } 1939 } else /* mle->type == DLM_MLE_MIGRATION */ { 1940 /* should only be getting an assert from new master */ 1941 if (assert->node_idx != mle->new_master) { 1942 mlog(ML_ERROR, "got assert from %u, but " 1943 "new master is %u, and old master " 1944 "was %u (%.*s)\n", 1945 assert->node_idx, mle->new_master, 1946 mle->master, namelen, name); 1947 goto kill; 1948 } 1949 1950 } 1951 ok: 1952 spin_unlock(&res->spinlock); 1953 } 1954 spin_unlock(&dlm->spinlock); 1955 1956 // mlog(0, "woo! got an assert_master from node %u!\n", 1957 // assert->node_idx); 1958 if (mle) { 1959 int extra_ref = 0; 1960 int nn = -1; 1961 int rr, err = 0; 1962 1963 spin_lock(&mle->spinlock); 1964 if (mle->type == DLM_MLE_BLOCK || mle->type == DLM_MLE_MIGRATION) 1965 extra_ref = 1; 1966 else { 1967 /* MASTER mle: if any bits set in the response map 1968 * then the calling node needs to re-assert to clear 1969 * up nodes that this node contacted */ 1970 while ((nn = find_next_bit (mle->response_map, O2NM_MAX_NODES, 1971 nn+1)) < O2NM_MAX_NODES) { 1972 if (nn != dlm->node_num && nn != assert->node_idx) 1973 master_request = 1; 1974 } 1975 } 1976 mle->master = assert->node_idx; 1977 atomic_set(&mle->woken, 1); 1978 wake_up(&mle->wq); 1979 spin_unlock(&mle->spinlock); 1980 1981 if (res) { 1982 int wake = 0; 1983 spin_lock(&res->spinlock); 1984 if (mle->type == DLM_MLE_MIGRATION) { 1985 mlog(0, "finishing off migration of lockres %.*s, " 1986 "from %u to %u\n", 1987 res->lockname.len, res->lockname.name, 1988 dlm->node_num, mle->new_master); 1989 res->state &= ~DLM_LOCK_RES_MIGRATING; 1990 wake = 1; 1991 dlm_change_lockres_owner(dlm, res, mle->new_master); 1992 BUG_ON(res->state & DLM_LOCK_RES_DIRTY); 1993 } else { 1994 dlm_change_lockres_owner(dlm, res, mle->master); 1995 } 1996 spin_unlock(&res->spinlock); 1997 have_lockres_ref = 1; 1998 if (wake) 1999 wake_up(&res->wq); 2000 } 2001 2002 /* master is known, detach if not already detached. 2003 * ensures that only one assert_master call will happen 2004 * on this mle. */ 2005 spin_lock(&dlm->spinlock); 2006 spin_lock(&dlm->master_lock); 2007 2008 rr = atomic_read(&mle->mle_refs.refcount); 2009 if (mle->inuse > 0) { 2010 if (extra_ref && rr < 3) 2011 err = 1; 2012 else if (!extra_ref && rr < 2) 2013 err = 1; 2014 } else { 2015 if (extra_ref && rr < 2) 2016 err = 1; 2017 else if (!extra_ref && rr < 1) 2018 err = 1; 2019 } 2020 if (err) { 2021 mlog(ML_ERROR, "%s:%.*s: got assert master from %u " 2022 "that will mess up this node, refs=%d, extra=%d, " 2023 "inuse=%d\n", dlm->name, namelen, name, 2024 assert->node_idx, rr, extra_ref, mle->inuse); 2025 dlm_print_one_mle(mle); 2026 } 2027 list_del_init(&mle->list); 2028 __dlm_mle_detach_hb_events(dlm, mle); 2029 __dlm_put_mle(mle); 2030 if (extra_ref) { 2031 /* the assert master message now balances the extra 2032 * ref given by the master / migration request message. 2033 * if this is the last put, it will be removed 2034 * from the list. */ 2035 __dlm_put_mle(mle); 2036 } 2037 spin_unlock(&dlm->master_lock); 2038 spin_unlock(&dlm->spinlock); 2039 } else if (res) { 2040 if (res->owner != assert->node_idx) { 2041 mlog(0, "assert_master from %u, but current " 2042 "owner is %u (%.*s), no mle\n", assert->node_idx, 2043 res->owner, namelen, name); 2044 } 2045 } 2046 2047 done: 2048 ret = 0; 2049 if (res) { 2050 spin_lock(&res->spinlock); 2051 res->state |= DLM_LOCK_RES_SETREF_INPROG; 2052 spin_unlock(&res->spinlock); 2053 *ret_data = (void *)res; 2054 } 2055 dlm_put(dlm); 2056 if (master_request) { 2057 mlog(0, "need to tell master to reassert\n"); 2058 /* positive. negative would shoot down the node. */ 2059 ret |= DLM_ASSERT_RESPONSE_REASSERT; 2060 if (!have_lockres_ref) { 2061 mlog(ML_ERROR, "strange, got assert from %u, MASTER " 2062 "mle present here for %s:%.*s, but no lockres!\n", 2063 assert->node_idx, dlm->name, namelen, name); 2064 } 2065 } 2066 if (have_lockres_ref) { 2067 /* let the master know we have a reference to the lockres */ 2068 ret |= DLM_ASSERT_RESPONSE_MASTERY_REF; 2069 mlog(0, "%s:%.*s: got assert from %u, need a ref\n", 2070 dlm->name, namelen, name, assert->node_idx); 2071 } 2072 return ret; 2073 2074 kill: 2075 /* kill the caller! */ 2076 mlog(ML_ERROR, "Bad message received from another node. Dumping state " 2077 "and killing the other node now! This node is OK and can continue.\n"); 2078 __dlm_print_one_lock_resource(res); 2079 spin_unlock(&res->spinlock); 2080 spin_unlock(&dlm->spinlock); 2081 *ret_data = (void *)res; 2082 dlm_put(dlm); 2083 return -EINVAL; 2084 } 2085 2086 void dlm_assert_master_post_handler(int status, void *data, void *ret_data) 2087 { 2088 struct dlm_lock_resource *res = (struct dlm_lock_resource *)ret_data; 2089 2090 if (ret_data) { 2091 spin_lock(&res->spinlock); 2092 res->state &= ~DLM_LOCK_RES_SETREF_INPROG; 2093 spin_unlock(&res->spinlock); 2094 wake_up(&res->wq); 2095 dlm_lockres_put(res); 2096 } 2097 return; 2098 } 2099 2100 int dlm_dispatch_assert_master(struct dlm_ctxt *dlm, 2101 struct dlm_lock_resource *res, 2102 int ignore_higher, u8 request_from, u32 flags) 2103 { 2104 struct dlm_work_item *item; 2105 item = kzalloc(sizeof(*item), GFP_NOFS); 2106 if (!item) 2107 return -ENOMEM; 2108 2109 2110 /* queue up work for dlm_assert_master_worker */ 2111 dlm_grab(dlm); /* get an extra ref for the work item */ 2112 dlm_init_work_item(dlm, item, dlm_assert_master_worker, NULL); 2113 item->u.am.lockres = res; /* already have a ref */ 2114 /* can optionally ignore node numbers higher than this node */ 2115 item->u.am.ignore_higher = ignore_higher; 2116 item->u.am.request_from = request_from; 2117 item->u.am.flags = flags; 2118 2119 if (ignore_higher) 2120 mlog(0, "IGNORE HIGHER: %.*s\n", res->lockname.len, 2121 res->lockname.name); 2122 2123 spin_lock(&dlm->work_lock); 2124 list_add_tail(&item->list, &dlm->work_list); 2125 spin_unlock(&dlm->work_lock); 2126 2127 queue_work(dlm->dlm_worker, &dlm->dispatched_work); 2128 return 0; 2129 } 2130 2131 static void dlm_assert_master_worker(struct dlm_work_item *item, void *data) 2132 { 2133 struct dlm_ctxt *dlm = data; 2134 int ret = 0; 2135 struct dlm_lock_resource *res; 2136 unsigned long nodemap[BITS_TO_LONGS(O2NM_MAX_NODES)]; 2137 int ignore_higher; 2138 int bit; 2139 u8 request_from; 2140 u32 flags; 2141 2142 dlm = item->dlm; 2143 res = item->u.am.lockres; 2144 ignore_higher = item->u.am.ignore_higher; 2145 request_from = item->u.am.request_from; 2146 flags = item->u.am.flags; 2147 2148 spin_lock(&dlm->spinlock); 2149 memcpy(nodemap, dlm->domain_map, sizeof(nodemap)); 2150 spin_unlock(&dlm->spinlock); 2151 2152 clear_bit(dlm->node_num, nodemap); 2153 if (ignore_higher) { 2154 /* if is this just to clear up mles for nodes below 2155 * this node, do not send the message to the original 2156 * caller or any node number higher than this */ 2157 clear_bit(request_from, nodemap); 2158 bit = dlm->node_num; 2159 while (1) { 2160 bit = find_next_bit(nodemap, O2NM_MAX_NODES, 2161 bit+1); 2162 if (bit >= O2NM_MAX_NODES) 2163 break; 2164 clear_bit(bit, nodemap); 2165 } 2166 } 2167 2168 /* 2169 * If we're migrating this lock to someone else, we are no 2170 * longer allowed to assert out own mastery. OTOH, we need to 2171 * prevent migration from starting while we're still asserting 2172 * our dominance. The reserved ast delays migration. 2173 */ 2174 spin_lock(&res->spinlock); 2175 if (res->state & DLM_LOCK_RES_MIGRATING) { 2176 mlog(0, "Someone asked us to assert mastery, but we're " 2177 "in the middle of migration. Skipping assert, " 2178 "the new master will handle that.\n"); 2179 spin_unlock(&res->spinlock); 2180 goto put; 2181 } else 2182 __dlm_lockres_reserve_ast(res); 2183 spin_unlock(&res->spinlock); 2184 2185 /* this call now finishes out the nodemap 2186 * even if one or more nodes die */ 2187 mlog(0, "worker about to master %.*s here, this=%u\n", 2188 res->lockname.len, res->lockname.name, dlm->node_num); 2189 ret = dlm_do_assert_master(dlm, res, nodemap, flags); 2190 if (ret < 0) { 2191 /* no need to restart, we are done */ 2192 if (!dlm_is_host_down(ret)) 2193 mlog_errno(ret); 2194 } 2195 2196 /* Ok, we've asserted ourselves. Let's let migration start. */ 2197 dlm_lockres_release_ast(dlm, res); 2198 2199 put: 2200 dlm_lockres_put(res); 2201 2202 mlog(0, "finished with dlm_assert_master_worker\n"); 2203 } 2204 2205 /* SPECIAL CASE for the $RECOVERY lock used by the recovery thread. 2206 * We cannot wait for node recovery to complete to begin mastering this 2207 * lockres because this lockres is used to kick off recovery! ;-) 2208 * So, do a pre-check on all living nodes to see if any of those nodes 2209 * think that $RECOVERY is currently mastered by a dead node. If so, 2210 * we wait a short time to allow that node to get notified by its own 2211 * heartbeat stack, then check again. All $RECOVERY lock resources 2212 * mastered by dead nodes are purged when the hearbeat callback is 2213 * fired, so we can know for sure that it is safe to continue once 2214 * the node returns a live node or no node. */ 2215 static int dlm_pre_master_reco_lockres(struct dlm_ctxt *dlm, 2216 struct dlm_lock_resource *res) 2217 { 2218 struct dlm_node_iter iter; 2219 int nodenum; 2220 int ret = 0; 2221 u8 master = DLM_LOCK_RES_OWNER_UNKNOWN; 2222 2223 spin_lock(&dlm->spinlock); 2224 dlm_node_iter_init(dlm->domain_map, &iter); 2225 spin_unlock(&dlm->spinlock); 2226 2227 while ((nodenum = dlm_node_iter_next(&iter)) >= 0) { 2228 /* do not send to self */ 2229 if (nodenum == dlm->node_num) 2230 continue; 2231 ret = dlm_do_master_requery(dlm, res, nodenum, &master); 2232 if (ret < 0) { 2233 mlog_errno(ret); 2234 if (!dlm_is_host_down(ret)) 2235 BUG(); 2236 /* host is down, so answer for that node would be 2237 * DLM_LOCK_RES_OWNER_UNKNOWN. continue. */ 2238 ret = 0; 2239 } 2240 2241 if (master != DLM_LOCK_RES_OWNER_UNKNOWN) { 2242 /* check to see if this master is in the recovery map */ 2243 spin_lock(&dlm->spinlock); 2244 if (test_bit(master, dlm->recovery_map)) { 2245 mlog(ML_NOTICE, "%s: node %u has not seen " 2246 "node %u go down yet, and thinks the " 2247 "dead node is mastering the recovery " 2248 "lock. must wait.\n", dlm->name, 2249 nodenum, master); 2250 ret = -EAGAIN; 2251 } 2252 spin_unlock(&dlm->spinlock); 2253 mlog(0, "%s: reco lock master is %u\n", dlm->name, 2254 master); 2255 break; 2256 } 2257 } 2258 return ret; 2259 } 2260 2261 /* 2262 * DLM_DEREF_LOCKRES_MSG 2263 */ 2264 2265 int dlm_drop_lockres_ref(struct dlm_ctxt *dlm, struct dlm_lock_resource *res) 2266 { 2267 struct dlm_deref_lockres deref; 2268 int ret = 0, r; 2269 const char *lockname; 2270 unsigned int namelen; 2271 2272 lockname = res->lockname.name; 2273 namelen = res->lockname.len; 2274 BUG_ON(namelen > O2NM_MAX_NAME_LEN); 2275 2276 mlog(0, "%s:%.*s: sending deref to %d\n", 2277 dlm->name, namelen, lockname, res->owner); 2278 memset(&deref, 0, sizeof(deref)); 2279 deref.node_idx = dlm->node_num; 2280 deref.namelen = namelen; 2281 memcpy(deref.name, lockname, namelen); 2282 2283 ret = o2net_send_message(DLM_DEREF_LOCKRES_MSG, dlm->key, 2284 &deref, sizeof(deref), res->owner, &r); 2285 if (ret < 0) 2286 mlog_errno(ret); 2287 else if (r < 0) { 2288 /* BAD. other node says I did not have a ref. */ 2289 mlog(ML_ERROR,"while dropping ref on %s:%.*s " 2290 "(master=%u) got %d.\n", dlm->name, namelen, 2291 lockname, res->owner, r); 2292 dlm_print_one_lock_resource(res); 2293 BUG(); 2294 } 2295 return ret; 2296 } 2297 2298 int dlm_deref_lockres_handler(struct o2net_msg *msg, u32 len, void *data, 2299 void **ret_data) 2300 { 2301 struct dlm_ctxt *dlm = data; 2302 struct dlm_deref_lockres *deref = (struct dlm_deref_lockres *)msg->buf; 2303 struct dlm_lock_resource *res = NULL; 2304 char *name; 2305 unsigned int namelen; 2306 int ret = -EINVAL; 2307 u8 node; 2308 unsigned int hash; 2309 struct dlm_work_item *item; 2310 int cleared = 0; 2311 int dispatch = 0; 2312 2313 if (!dlm_grab(dlm)) 2314 return 0; 2315 2316 name = deref->name; 2317 namelen = deref->namelen; 2318 node = deref->node_idx; 2319 2320 if (namelen > DLM_LOCKID_NAME_MAX) { 2321 mlog(ML_ERROR, "Invalid name length!"); 2322 goto done; 2323 } 2324 if (deref->node_idx >= O2NM_MAX_NODES) { 2325 mlog(ML_ERROR, "Invalid node number: %u\n", node); 2326 goto done; 2327 } 2328 2329 hash = dlm_lockid_hash(name, namelen); 2330 2331 spin_lock(&dlm->spinlock); 2332 res = __dlm_lookup_lockres_full(dlm, name, namelen, hash); 2333 if (!res) { 2334 spin_unlock(&dlm->spinlock); 2335 mlog(ML_ERROR, "%s:%.*s: bad lockres name\n", 2336 dlm->name, namelen, name); 2337 goto done; 2338 } 2339 spin_unlock(&dlm->spinlock); 2340 2341 spin_lock(&res->spinlock); 2342 if (res->state & DLM_LOCK_RES_SETREF_INPROG) 2343 dispatch = 1; 2344 else { 2345 BUG_ON(res->state & DLM_LOCK_RES_DROPPING_REF); 2346 if (test_bit(node, res->refmap)) { 2347 dlm_lockres_clear_refmap_bit(node, res); 2348 cleared = 1; 2349 } 2350 } 2351 spin_unlock(&res->spinlock); 2352 2353 if (!dispatch) { 2354 if (cleared) 2355 dlm_lockres_calc_usage(dlm, res); 2356 else { 2357 mlog(ML_ERROR, "%s:%.*s: node %u trying to drop ref " 2358 "but it is already dropped!\n", dlm->name, 2359 res->lockname.len, res->lockname.name, node); 2360 dlm_print_one_lock_resource(res); 2361 } 2362 ret = 0; 2363 goto done; 2364 } 2365 2366 item = kzalloc(sizeof(*item), GFP_NOFS); 2367 if (!item) { 2368 ret = -ENOMEM; 2369 mlog_errno(ret); 2370 goto done; 2371 } 2372 2373 dlm_init_work_item(dlm, item, dlm_deref_lockres_worker, NULL); 2374 item->u.dl.deref_res = res; 2375 item->u.dl.deref_node = node; 2376 2377 spin_lock(&dlm->work_lock); 2378 list_add_tail(&item->list, &dlm->work_list); 2379 spin_unlock(&dlm->work_lock); 2380 2381 queue_work(dlm->dlm_worker, &dlm->dispatched_work); 2382 return 0; 2383 2384 done: 2385 if (res) 2386 dlm_lockres_put(res); 2387 dlm_put(dlm); 2388 2389 return ret; 2390 } 2391 2392 static void dlm_deref_lockres_worker(struct dlm_work_item *item, void *data) 2393 { 2394 struct dlm_ctxt *dlm; 2395 struct dlm_lock_resource *res; 2396 u8 node; 2397 u8 cleared = 0; 2398 2399 dlm = item->dlm; 2400 res = item->u.dl.deref_res; 2401 node = item->u.dl.deref_node; 2402 2403 spin_lock(&res->spinlock); 2404 BUG_ON(res->state & DLM_LOCK_RES_DROPPING_REF); 2405 if (test_bit(node, res->refmap)) { 2406 __dlm_wait_on_lockres_flags(res, DLM_LOCK_RES_SETREF_INPROG); 2407 dlm_lockres_clear_refmap_bit(node, res); 2408 cleared = 1; 2409 } 2410 spin_unlock(&res->spinlock); 2411 2412 if (cleared) { 2413 mlog(0, "%s:%.*s node %u ref dropped in dispatch\n", 2414 dlm->name, res->lockname.len, res->lockname.name, node); 2415 dlm_lockres_calc_usage(dlm, res); 2416 } else { 2417 mlog(ML_ERROR, "%s:%.*s: node %u trying to drop ref " 2418 "but it is already dropped!\n", dlm->name, 2419 res->lockname.len, res->lockname.name, node); 2420 dlm_print_one_lock_resource(res); 2421 } 2422 2423 dlm_lockres_put(res); 2424 } 2425 2426 /* Checks whether the lockres can be migrated. Returns 0 if yes, < 0 2427 * if not. If 0, numlocks is set to the number of locks in the lockres. 2428 */ 2429 static int dlm_is_lockres_migrateable(struct dlm_ctxt *dlm, 2430 struct dlm_lock_resource *res, 2431 int *numlocks) 2432 { 2433 int ret; 2434 int i; 2435 int count = 0; 2436 struct list_head *queue; 2437 struct dlm_lock *lock; 2438 2439 assert_spin_locked(&res->spinlock); 2440 2441 ret = -EINVAL; 2442 if (res->owner == DLM_LOCK_RES_OWNER_UNKNOWN) { 2443 mlog(0, "cannot migrate lockres with unknown owner!\n"); 2444 goto leave; 2445 } 2446 2447 if (res->owner != dlm->node_num) { 2448 mlog(0, "cannot migrate lockres this node doesn't own!\n"); 2449 goto leave; 2450 } 2451 2452 ret = 0; 2453 queue = &res->granted; 2454 for (i = 0; i < 3; i++) { 2455 list_for_each_entry(lock, queue, list) { 2456 ++count; 2457 if (lock->ml.node == dlm->node_num) { 2458 mlog(0, "found a lock owned by this node still " 2459 "on the %s queue! will not migrate this " 2460 "lockres\n", (i == 0 ? "granted" : 2461 (i == 1 ? "converting" : 2462 "blocked"))); 2463 ret = -ENOTEMPTY; 2464 goto leave; 2465 } 2466 } 2467 queue++; 2468 } 2469 2470 *numlocks = count; 2471 mlog(0, "migrateable lockres having %d locks\n", *numlocks); 2472 2473 leave: 2474 return ret; 2475 } 2476 2477 /* 2478 * DLM_MIGRATE_LOCKRES 2479 */ 2480 2481 2482 static int dlm_migrate_lockres(struct dlm_ctxt *dlm, 2483 struct dlm_lock_resource *res, 2484 u8 target) 2485 { 2486 struct dlm_master_list_entry *mle = NULL; 2487 struct dlm_master_list_entry *oldmle = NULL; 2488 struct dlm_migratable_lockres *mres = NULL; 2489 int ret = 0; 2490 const char *name; 2491 unsigned int namelen; 2492 int mle_added = 0; 2493 int numlocks; 2494 int wake = 0; 2495 2496 if (!dlm_grab(dlm)) 2497 return -EINVAL; 2498 2499 name = res->lockname.name; 2500 namelen = res->lockname.len; 2501 2502 mlog(0, "migrating %.*s to %u\n", namelen, name, target); 2503 2504 /* 2505 * ensure this lockres is a proper candidate for migration 2506 */ 2507 spin_lock(&res->spinlock); 2508 ret = dlm_is_lockres_migrateable(dlm, res, &numlocks); 2509 if (ret < 0) { 2510 spin_unlock(&res->spinlock); 2511 goto leave; 2512 } 2513 spin_unlock(&res->spinlock); 2514 2515 /* no work to do */ 2516 if (numlocks == 0) { 2517 mlog(0, "no locks were found on this lockres! done!\n"); 2518 goto leave; 2519 } 2520 2521 /* 2522 * preallocate up front 2523 * if this fails, abort 2524 */ 2525 2526 ret = -ENOMEM; 2527 mres = (struct dlm_migratable_lockres *) __get_free_page(GFP_NOFS); 2528 if (!mres) { 2529 mlog_errno(ret); 2530 goto leave; 2531 } 2532 2533 mle = (struct dlm_master_list_entry *) kmem_cache_alloc(dlm_mle_cache, 2534 GFP_NOFS); 2535 if (!mle) { 2536 mlog_errno(ret); 2537 goto leave; 2538 } 2539 ret = 0; 2540 2541 /* 2542 * find a node to migrate the lockres to 2543 */ 2544 2545 mlog(0, "picking a migration node\n"); 2546 spin_lock(&dlm->spinlock); 2547 /* pick a new node */ 2548 if (!test_bit(target, dlm->domain_map) || 2549 target >= O2NM_MAX_NODES) { 2550 target = dlm_pick_migration_target(dlm, res); 2551 } 2552 mlog(0, "node %u chosen for migration\n", target); 2553 2554 if (target >= O2NM_MAX_NODES || 2555 !test_bit(target, dlm->domain_map)) { 2556 /* target chosen is not alive */ 2557 ret = -EINVAL; 2558 } 2559 2560 if (ret) { 2561 spin_unlock(&dlm->spinlock); 2562 goto fail; 2563 } 2564 2565 mlog(0, "continuing with target = %u\n", target); 2566 2567 /* 2568 * clear any existing master requests and 2569 * add the migration mle to the list 2570 */ 2571 spin_lock(&dlm->master_lock); 2572 ret = dlm_add_migration_mle(dlm, res, mle, &oldmle, name, 2573 namelen, target, dlm->node_num); 2574 spin_unlock(&dlm->master_lock); 2575 spin_unlock(&dlm->spinlock); 2576 2577 if (ret == -EEXIST) { 2578 mlog(0, "another process is already migrating it\n"); 2579 goto fail; 2580 } 2581 mle_added = 1; 2582 2583 /* 2584 * set the MIGRATING flag and flush asts 2585 * if we fail after this we need to re-dirty the lockres 2586 */ 2587 if (dlm_mark_lockres_migrating(dlm, res, target) < 0) { 2588 mlog(ML_ERROR, "tried to migrate %.*s to %u, but " 2589 "the target went down.\n", res->lockname.len, 2590 res->lockname.name, target); 2591 spin_lock(&res->spinlock); 2592 res->state &= ~DLM_LOCK_RES_MIGRATING; 2593 wake = 1; 2594 spin_unlock(&res->spinlock); 2595 ret = -EINVAL; 2596 } 2597 2598 fail: 2599 if (oldmle) { 2600 /* master is known, detach if not already detached */ 2601 dlm_mle_detach_hb_events(dlm, oldmle); 2602 dlm_put_mle(oldmle); 2603 } 2604 2605 if (ret < 0) { 2606 if (mle_added) { 2607 dlm_mle_detach_hb_events(dlm, mle); 2608 dlm_put_mle(mle); 2609 } else if (mle) { 2610 kmem_cache_free(dlm_mle_cache, mle); 2611 } 2612 goto leave; 2613 } 2614 2615 /* 2616 * at this point, we have a migration target, an mle 2617 * in the master list, and the MIGRATING flag set on 2618 * the lockres 2619 */ 2620 2621 /* now that remote nodes are spinning on the MIGRATING flag, 2622 * ensure that all assert_master work is flushed. */ 2623 flush_workqueue(dlm->dlm_worker); 2624 2625 /* get an extra reference on the mle. 2626 * otherwise the assert_master from the new 2627 * master will destroy this. 2628 * also, make sure that all callers of dlm_get_mle 2629 * take both dlm->spinlock and dlm->master_lock */ 2630 spin_lock(&dlm->spinlock); 2631 spin_lock(&dlm->master_lock); 2632 dlm_get_mle_inuse(mle); 2633 spin_unlock(&dlm->master_lock); 2634 spin_unlock(&dlm->spinlock); 2635 2636 /* notify new node and send all lock state */ 2637 /* call send_one_lockres with migration flag. 2638 * this serves as notice to the target node that a 2639 * migration is starting. */ 2640 ret = dlm_send_one_lockres(dlm, res, mres, target, 2641 DLM_MRES_MIGRATION); 2642 2643 if (ret < 0) { 2644 mlog(0, "migration to node %u failed with %d\n", 2645 target, ret); 2646 /* migration failed, detach and clean up mle */ 2647 dlm_mle_detach_hb_events(dlm, mle); 2648 dlm_put_mle(mle); 2649 dlm_put_mle_inuse(mle); 2650 spin_lock(&res->spinlock); 2651 res->state &= ~DLM_LOCK_RES_MIGRATING; 2652 wake = 1; 2653 spin_unlock(&res->spinlock); 2654 goto leave; 2655 } 2656 2657 /* at this point, the target sends a message to all nodes, 2658 * (using dlm_do_migrate_request). this node is skipped since 2659 * we had to put an mle in the list to begin the process. this 2660 * node now waits for target to do an assert master. this node 2661 * will be the last one notified, ensuring that the migration 2662 * is complete everywhere. if the target dies while this is 2663 * going on, some nodes could potentially see the target as the 2664 * master, so it is important that my recovery finds the migration 2665 * mle and sets the master to UNKNONWN. */ 2666 2667 2668 /* wait for new node to assert master */ 2669 while (1) { 2670 ret = wait_event_interruptible_timeout(mle->wq, 2671 (atomic_read(&mle->woken) == 1), 2672 msecs_to_jiffies(5000)); 2673 2674 if (ret >= 0) { 2675 if (atomic_read(&mle->woken) == 1 || 2676 res->owner == target) 2677 break; 2678 2679 mlog(0, "%s:%.*s: timed out during migration\n", 2680 dlm->name, res->lockname.len, res->lockname.name); 2681 /* avoid hang during shutdown when migrating lockres 2682 * to a node which also goes down */ 2683 if (dlm_is_node_dead(dlm, target)) { 2684 mlog(0, "%s:%.*s: expected migration " 2685 "target %u is no longer up, restarting\n", 2686 dlm->name, res->lockname.len, 2687 res->lockname.name, target); 2688 ret = -EINVAL; 2689 /* migration failed, detach and clean up mle */ 2690 dlm_mle_detach_hb_events(dlm, mle); 2691 dlm_put_mle(mle); 2692 dlm_put_mle_inuse(mle); 2693 spin_lock(&res->spinlock); 2694 res->state &= ~DLM_LOCK_RES_MIGRATING; 2695 wake = 1; 2696 spin_unlock(&res->spinlock); 2697 goto leave; 2698 } 2699 } else 2700 mlog(0, "%s:%.*s: caught signal during migration\n", 2701 dlm->name, res->lockname.len, res->lockname.name); 2702 } 2703 2704 /* all done, set the owner, clear the flag */ 2705 spin_lock(&res->spinlock); 2706 dlm_set_lockres_owner(dlm, res, target); 2707 res->state &= ~DLM_LOCK_RES_MIGRATING; 2708 dlm_remove_nonlocal_locks(dlm, res); 2709 spin_unlock(&res->spinlock); 2710 wake_up(&res->wq); 2711 2712 /* master is known, detach if not already detached */ 2713 dlm_mle_detach_hb_events(dlm, mle); 2714 dlm_put_mle_inuse(mle); 2715 ret = 0; 2716 2717 dlm_lockres_calc_usage(dlm, res); 2718 2719 leave: 2720 /* re-dirty the lockres if we failed */ 2721 if (ret < 0) 2722 dlm_kick_thread(dlm, res); 2723 2724 /* wake up waiters if the MIGRATING flag got set 2725 * but migration failed */ 2726 if (wake) 2727 wake_up(&res->wq); 2728 2729 /* TODO: cleanup */ 2730 if (mres) 2731 free_page((unsigned long)mres); 2732 2733 dlm_put(dlm); 2734 2735 mlog(0, "returning %d\n", ret); 2736 return ret; 2737 } 2738 2739 #define DLM_MIGRATION_RETRY_MS 100 2740 2741 /* Should be called only after beginning the domain leave process. 2742 * There should not be any remaining locks on nonlocal lock resources, 2743 * and there should be no local locks left on locally mastered resources. 2744 * 2745 * Called with the dlm spinlock held, may drop it to do migration, but 2746 * will re-acquire before exit. 2747 * 2748 * Returns: 1 if dlm->spinlock was dropped/retaken, 0 if never dropped */ 2749 int dlm_empty_lockres(struct dlm_ctxt *dlm, struct dlm_lock_resource *res) 2750 { 2751 int ret; 2752 int lock_dropped = 0; 2753 int numlocks; 2754 2755 spin_lock(&res->spinlock); 2756 if (res->owner != dlm->node_num) { 2757 if (!__dlm_lockres_unused(res)) { 2758 mlog(ML_ERROR, "%s:%.*s: this node is not master, " 2759 "trying to free this but locks remain\n", 2760 dlm->name, res->lockname.len, res->lockname.name); 2761 } 2762 spin_unlock(&res->spinlock); 2763 goto leave; 2764 } 2765 2766 /* No need to migrate a lockres having no locks */ 2767 ret = dlm_is_lockres_migrateable(dlm, res, &numlocks); 2768 if (ret >= 0 && numlocks == 0) { 2769 spin_unlock(&res->spinlock); 2770 goto leave; 2771 } 2772 spin_unlock(&res->spinlock); 2773 2774 /* Wheee! Migrate lockres here! Will sleep so drop spinlock. */ 2775 spin_unlock(&dlm->spinlock); 2776 lock_dropped = 1; 2777 while (1) { 2778 ret = dlm_migrate_lockres(dlm, res, O2NM_MAX_NODES); 2779 if (ret >= 0) 2780 break; 2781 if (ret == -ENOTEMPTY) { 2782 mlog(ML_ERROR, "lockres %.*s still has local locks!\n", 2783 res->lockname.len, res->lockname.name); 2784 BUG(); 2785 } 2786 2787 mlog(0, "lockres %.*s: migrate failed, " 2788 "retrying\n", res->lockname.len, 2789 res->lockname.name); 2790 msleep(DLM_MIGRATION_RETRY_MS); 2791 } 2792 spin_lock(&dlm->spinlock); 2793 leave: 2794 return lock_dropped; 2795 } 2796 2797 int dlm_lock_basts_flushed(struct dlm_ctxt *dlm, struct dlm_lock *lock) 2798 { 2799 int ret; 2800 spin_lock(&dlm->ast_lock); 2801 spin_lock(&lock->spinlock); 2802 ret = (list_empty(&lock->bast_list) && !lock->bast_pending); 2803 spin_unlock(&lock->spinlock); 2804 spin_unlock(&dlm->ast_lock); 2805 return ret; 2806 } 2807 2808 static int dlm_migration_can_proceed(struct dlm_ctxt *dlm, 2809 struct dlm_lock_resource *res, 2810 u8 mig_target) 2811 { 2812 int can_proceed; 2813 spin_lock(&res->spinlock); 2814 can_proceed = !!(res->state & DLM_LOCK_RES_MIGRATING); 2815 spin_unlock(&res->spinlock); 2816 2817 /* target has died, so make the caller break out of the 2818 * wait_event, but caller must recheck the domain_map */ 2819 spin_lock(&dlm->spinlock); 2820 if (!test_bit(mig_target, dlm->domain_map)) 2821 can_proceed = 1; 2822 spin_unlock(&dlm->spinlock); 2823 return can_proceed; 2824 } 2825 2826 static int dlm_lockres_is_dirty(struct dlm_ctxt *dlm, 2827 struct dlm_lock_resource *res) 2828 { 2829 int ret; 2830 spin_lock(&res->spinlock); 2831 ret = !!(res->state & DLM_LOCK_RES_DIRTY); 2832 spin_unlock(&res->spinlock); 2833 return ret; 2834 } 2835 2836 2837 static int dlm_mark_lockres_migrating(struct dlm_ctxt *dlm, 2838 struct dlm_lock_resource *res, 2839 u8 target) 2840 { 2841 int ret = 0; 2842 2843 mlog(0, "dlm_mark_lockres_migrating: %.*s, from %u to %u\n", 2844 res->lockname.len, res->lockname.name, dlm->node_num, 2845 target); 2846 /* need to set MIGRATING flag on lockres. this is done by 2847 * ensuring that all asts have been flushed for this lockres. */ 2848 spin_lock(&res->spinlock); 2849 BUG_ON(res->migration_pending); 2850 res->migration_pending = 1; 2851 /* strategy is to reserve an extra ast then release 2852 * it below, letting the release do all of the work */ 2853 __dlm_lockres_reserve_ast(res); 2854 spin_unlock(&res->spinlock); 2855 2856 /* now flush all the pending asts */ 2857 dlm_kick_thread(dlm, res); 2858 /* before waiting on DIRTY, block processes which may 2859 * try to dirty the lockres before MIGRATING is set */ 2860 spin_lock(&res->spinlock); 2861 BUG_ON(res->state & DLM_LOCK_RES_BLOCK_DIRTY); 2862 res->state |= DLM_LOCK_RES_BLOCK_DIRTY; 2863 spin_unlock(&res->spinlock); 2864 /* now wait on any pending asts and the DIRTY state */ 2865 wait_event(dlm->ast_wq, !dlm_lockres_is_dirty(dlm, res)); 2866 dlm_lockres_release_ast(dlm, res); 2867 2868 mlog(0, "about to wait on migration_wq, dirty=%s\n", 2869 res->state & DLM_LOCK_RES_DIRTY ? "yes" : "no"); 2870 /* if the extra ref we just put was the final one, this 2871 * will pass thru immediately. otherwise, we need to wait 2872 * for the last ast to finish. */ 2873 again: 2874 ret = wait_event_interruptible_timeout(dlm->migration_wq, 2875 dlm_migration_can_proceed(dlm, res, target), 2876 msecs_to_jiffies(1000)); 2877 if (ret < 0) { 2878 mlog(0, "woken again: migrating? %s, dead? %s\n", 2879 res->state & DLM_LOCK_RES_MIGRATING ? "yes":"no", 2880 test_bit(target, dlm->domain_map) ? "no":"yes"); 2881 } else { 2882 mlog(0, "all is well: migrating? %s, dead? %s\n", 2883 res->state & DLM_LOCK_RES_MIGRATING ? "yes":"no", 2884 test_bit(target, dlm->domain_map) ? "no":"yes"); 2885 } 2886 if (!dlm_migration_can_proceed(dlm, res, target)) { 2887 mlog(0, "trying again...\n"); 2888 goto again; 2889 } 2890 /* now that we are sure the MIGRATING state is there, drop 2891 * the unneded state which blocked threads trying to DIRTY */ 2892 spin_lock(&res->spinlock); 2893 BUG_ON(!(res->state & DLM_LOCK_RES_BLOCK_DIRTY)); 2894 BUG_ON(!(res->state & DLM_LOCK_RES_MIGRATING)); 2895 res->state &= ~DLM_LOCK_RES_BLOCK_DIRTY; 2896 spin_unlock(&res->spinlock); 2897 2898 /* did the target go down or die? */ 2899 spin_lock(&dlm->spinlock); 2900 if (!test_bit(target, dlm->domain_map)) { 2901 mlog(ML_ERROR, "aha. migration target %u just went down\n", 2902 target); 2903 ret = -EHOSTDOWN; 2904 } 2905 spin_unlock(&dlm->spinlock); 2906 2907 /* 2908 * at this point: 2909 * 2910 * o the DLM_LOCK_RES_MIGRATING flag is set 2911 * o there are no pending asts on this lockres 2912 * o all processes trying to reserve an ast on this 2913 * lockres must wait for the MIGRATING flag to clear 2914 */ 2915 return ret; 2916 } 2917 2918 /* last step in the migration process. 2919 * original master calls this to free all of the dlm_lock 2920 * structures that used to be for other nodes. */ 2921 static void dlm_remove_nonlocal_locks(struct dlm_ctxt *dlm, 2922 struct dlm_lock_resource *res) 2923 { 2924 struct list_head *queue = &res->granted; 2925 int i, bit; 2926 struct dlm_lock *lock, *next; 2927 2928 assert_spin_locked(&res->spinlock); 2929 2930 BUG_ON(res->owner == dlm->node_num); 2931 2932 for (i=0; i<3; i++) { 2933 list_for_each_entry_safe(lock, next, queue, list) { 2934 if (lock->ml.node != dlm->node_num) { 2935 mlog(0, "putting lock for node %u\n", 2936 lock->ml.node); 2937 /* be extra careful */ 2938 BUG_ON(!list_empty(&lock->ast_list)); 2939 BUG_ON(!list_empty(&lock->bast_list)); 2940 BUG_ON(lock->ast_pending); 2941 BUG_ON(lock->bast_pending); 2942 dlm_lockres_clear_refmap_bit(lock->ml.node, res); 2943 list_del_init(&lock->list); 2944 dlm_lock_put(lock); 2945 /* In a normal unlock, we would have added a 2946 * DLM_UNLOCK_FREE_LOCK action. Force it. */ 2947 dlm_lock_put(lock); 2948 } 2949 } 2950 queue++; 2951 } 2952 bit = 0; 2953 while (1) { 2954 bit = find_next_bit(res->refmap, O2NM_MAX_NODES, bit); 2955 if (bit >= O2NM_MAX_NODES) 2956 break; 2957 /* do not clear the local node reference, if there is a 2958 * process holding this, let it drop the ref itself */ 2959 if (bit != dlm->node_num) { 2960 mlog(0, "%s:%.*s: node %u had a ref to this " 2961 "migrating lockres, clearing\n", dlm->name, 2962 res->lockname.len, res->lockname.name, bit); 2963 dlm_lockres_clear_refmap_bit(bit, res); 2964 } 2965 bit++; 2966 } 2967 } 2968 2969 /* for now this is not too intelligent. we will 2970 * need stats to make this do the right thing. 2971 * this just finds the first lock on one of the 2972 * queues and uses that node as the target. */ 2973 static u8 dlm_pick_migration_target(struct dlm_ctxt *dlm, 2974 struct dlm_lock_resource *res) 2975 { 2976 int i; 2977 struct list_head *queue = &res->granted; 2978 struct dlm_lock *lock; 2979 int nodenum; 2980 2981 assert_spin_locked(&dlm->spinlock); 2982 2983 spin_lock(&res->spinlock); 2984 for (i=0; i<3; i++) { 2985 list_for_each_entry(lock, queue, list) { 2986 /* up to the caller to make sure this node 2987 * is alive */ 2988 if (lock->ml.node != dlm->node_num) { 2989 spin_unlock(&res->spinlock); 2990 return lock->ml.node; 2991 } 2992 } 2993 queue++; 2994 } 2995 spin_unlock(&res->spinlock); 2996 mlog(0, "have not found a suitable target yet! checking domain map\n"); 2997 2998 /* ok now we're getting desperate. pick anyone alive. */ 2999 nodenum = -1; 3000 while (1) { 3001 nodenum = find_next_bit(dlm->domain_map, 3002 O2NM_MAX_NODES, nodenum+1); 3003 mlog(0, "found %d in domain map\n", nodenum); 3004 if (nodenum >= O2NM_MAX_NODES) 3005 break; 3006 if (nodenum != dlm->node_num) { 3007 mlog(0, "picking %d\n", nodenum); 3008 return nodenum; 3009 } 3010 } 3011 3012 mlog(0, "giving up. no master to migrate to\n"); 3013 return DLM_LOCK_RES_OWNER_UNKNOWN; 3014 } 3015 3016 3017 3018 /* this is called by the new master once all lockres 3019 * data has been received */ 3020 static int dlm_do_migrate_request(struct dlm_ctxt *dlm, 3021 struct dlm_lock_resource *res, 3022 u8 master, u8 new_master, 3023 struct dlm_node_iter *iter) 3024 { 3025 struct dlm_migrate_request migrate; 3026 int ret, status = 0; 3027 int nodenum; 3028 3029 memset(&migrate, 0, sizeof(migrate)); 3030 migrate.namelen = res->lockname.len; 3031 memcpy(migrate.name, res->lockname.name, migrate.namelen); 3032 migrate.new_master = new_master; 3033 migrate.master = master; 3034 3035 ret = 0; 3036 3037 /* send message to all nodes, except the master and myself */ 3038 while ((nodenum = dlm_node_iter_next(iter)) >= 0) { 3039 if (nodenum == master || 3040 nodenum == new_master) 3041 continue; 3042 3043 ret = o2net_send_message(DLM_MIGRATE_REQUEST_MSG, dlm->key, 3044 &migrate, sizeof(migrate), nodenum, 3045 &status); 3046 if (ret < 0) 3047 mlog_errno(ret); 3048 else if (status < 0) { 3049 mlog(0, "migrate request (node %u) returned %d!\n", 3050 nodenum, status); 3051 ret = status; 3052 } else if (status == DLM_MIGRATE_RESPONSE_MASTERY_REF) { 3053 /* during the migration request we short-circuited 3054 * the mastery of the lockres. make sure we have 3055 * a mastery ref for nodenum */ 3056 mlog(0, "%s:%.*s: need ref for node %u\n", 3057 dlm->name, res->lockname.len, res->lockname.name, 3058 nodenum); 3059 spin_lock(&res->spinlock); 3060 dlm_lockres_set_refmap_bit(nodenum, res); 3061 spin_unlock(&res->spinlock); 3062 } 3063 } 3064 3065 if (ret < 0) 3066 mlog_errno(ret); 3067 3068 mlog(0, "returning ret=%d\n", ret); 3069 return ret; 3070 } 3071 3072 3073 /* if there is an existing mle for this lockres, we now know who the master is. 3074 * (the one who sent us *this* message) we can clear it up right away. 3075 * since the process that put the mle on the list still has a reference to it, 3076 * we can unhash it now, set the master and wake the process. as a result, 3077 * we will have no mle in the list to start with. now we can add an mle for 3078 * the migration and this should be the only one found for those scanning the 3079 * list. */ 3080 int dlm_migrate_request_handler(struct o2net_msg *msg, u32 len, void *data, 3081 void **ret_data) 3082 { 3083 struct dlm_ctxt *dlm = data; 3084 struct dlm_lock_resource *res = NULL; 3085 struct dlm_migrate_request *migrate = (struct dlm_migrate_request *) msg->buf; 3086 struct dlm_master_list_entry *mle = NULL, *oldmle = NULL; 3087 const char *name; 3088 unsigned int namelen, hash; 3089 int ret = 0; 3090 3091 if (!dlm_grab(dlm)) 3092 return -EINVAL; 3093 3094 name = migrate->name; 3095 namelen = migrate->namelen; 3096 hash = dlm_lockid_hash(name, namelen); 3097 3098 /* preallocate.. if this fails, abort */ 3099 mle = (struct dlm_master_list_entry *) kmem_cache_alloc(dlm_mle_cache, 3100 GFP_NOFS); 3101 3102 if (!mle) { 3103 ret = -ENOMEM; 3104 goto leave; 3105 } 3106 3107 /* check for pre-existing lock */ 3108 spin_lock(&dlm->spinlock); 3109 res = __dlm_lookup_lockres(dlm, name, namelen, hash); 3110 spin_lock(&dlm->master_lock); 3111 3112 if (res) { 3113 spin_lock(&res->spinlock); 3114 if (res->state & DLM_LOCK_RES_RECOVERING) { 3115 /* if all is working ok, this can only mean that we got 3116 * a migrate request from a node that we now see as 3117 * dead. what can we do here? drop it to the floor? */ 3118 spin_unlock(&res->spinlock); 3119 mlog(ML_ERROR, "Got a migrate request, but the " 3120 "lockres is marked as recovering!"); 3121 kmem_cache_free(dlm_mle_cache, mle); 3122 ret = -EINVAL; /* need a better solution */ 3123 goto unlock; 3124 } 3125 res->state |= DLM_LOCK_RES_MIGRATING; 3126 spin_unlock(&res->spinlock); 3127 } 3128 3129 /* ignore status. only nonzero status would BUG. */ 3130 ret = dlm_add_migration_mle(dlm, res, mle, &oldmle, 3131 name, namelen, 3132 migrate->new_master, 3133 migrate->master); 3134 3135 unlock: 3136 spin_unlock(&dlm->master_lock); 3137 spin_unlock(&dlm->spinlock); 3138 3139 if (oldmle) { 3140 /* master is known, detach if not already detached */ 3141 dlm_mle_detach_hb_events(dlm, oldmle); 3142 dlm_put_mle(oldmle); 3143 } 3144 3145 if (res) 3146 dlm_lockres_put(res); 3147 leave: 3148 dlm_put(dlm); 3149 return ret; 3150 } 3151 3152 /* must be holding dlm->spinlock and dlm->master_lock 3153 * when adding a migration mle, we can clear any other mles 3154 * in the master list because we know with certainty that 3155 * the master is "master". so we remove any old mle from 3156 * the list after setting it's master field, and then add 3157 * the new migration mle. this way we can hold with the rule 3158 * of having only one mle for a given lock name at all times. */ 3159 static int dlm_add_migration_mle(struct dlm_ctxt *dlm, 3160 struct dlm_lock_resource *res, 3161 struct dlm_master_list_entry *mle, 3162 struct dlm_master_list_entry **oldmle, 3163 const char *name, unsigned int namelen, 3164 u8 new_master, u8 master) 3165 { 3166 int found; 3167 int ret = 0; 3168 3169 *oldmle = NULL; 3170 3171 mlog_entry_void(); 3172 3173 assert_spin_locked(&dlm->spinlock); 3174 assert_spin_locked(&dlm->master_lock); 3175 3176 /* caller is responsible for any ref taken here on oldmle */ 3177 found = dlm_find_mle(dlm, oldmle, (char *)name, namelen); 3178 if (found) { 3179 struct dlm_master_list_entry *tmp = *oldmle; 3180 spin_lock(&tmp->spinlock); 3181 if (tmp->type == DLM_MLE_MIGRATION) { 3182 if (master == dlm->node_num) { 3183 /* ah another process raced me to it */ 3184 mlog(0, "tried to migrate %.*s, but some " 3185 "process beat me to it\n", 3186 namelen, name); 3187 ret = -EEXIST; 3188 } else { 3189 /* bad. 2 NODES are trying to migrate! */ 3190 mlog(ML_ERROR, "migration error mle: " 3191 "master=%u new_master=%u // request: " 3192 "master=%u new_master=%u // " 3193 "lockres=%.*s\n", 3194 tmp->master, tmp->new_master, 3195 master, new_master, 3196 namelen, name); 3197 BUG(); 3198 } 3199 } else { 3200 /* this is essentially what assert_master does */ 3201 tmp->master = master; 3202 atomic_set(&tmp->woken, 1); 3203 wake_up(&tmp->wq); 3204 /* remove it from the list so that only one 3205 * mle will be found */ 3206 list_del_init(&tmp->list); 3207 /* this was obviously WRONG. mle is uninited here. should be tmp. */ 3208 __dlm_mle_detach_hb_events(dlm, tmp); 3209 ret = DLM_MIGRATE_RESPONSE_MASTERY_REF; 3210 mlog(0, "%s:%.*s: master=%u, newmaster=%u, " 3211 "telling master to get ref for cleared out mle " 3212 "during migration\n", dlm->name, namelen, name, 3213 master, new_master); 3214 } 3215 spin_unlock(&tmp->spinlock); 3216 } 3217 3218 /* now add a migration mle to the tail of the list */ 3219 dlm_init_mle(mle, DLM_MLE_MIGRATION, dlm, res, name, namelen); 3220 mle->new_master = new_master; 3221 /* the new master will be sending an assert master for this. 3222 * at that point we will get the refmap reference */ 3223 mle->master = master; 3224 /* do this for consistency with other mle types */ 3225 set_bit(new_master, mle->maybe_map); 3226 list_add(&mle->list, &dlm->master_list); 3227 3228 return ret; 3229 } 3230 3231 3232 void dlm_clean_master_list(struct dlm_ctxt *dlm, u8 dead_node) 3233 { 3234 struct dlm_master_list_entry *mle, *next; 3235 struct dlm_lock_resource *res; 3236 unsigned int hash; 3237 3238 mlog_entry("dlm=%s, dead node=%u\n", dlm->name, dead_node); 3239 top: 3240 assert_spin_locked(&dlm->spinlock); 3241 3242 /* clean the master list */ 3243 spin_lock(&dlm->master_lock); 3244 list_for_each_entry_safe(mle, next, &dlm->master_list, list) { 3245 BUG_ON(mle->type != DLM_MLE_BLOCK && 3246 mle->type != DLM_MLE_MASTER && 3247 mle->type != DLM_MLE_MIGRATION); 3248 3249 /* MASTER mles are initiated locally. the waiting 3250 * process will notice the node map change 3251 * shortly. let that happen as normal. */ 3252 if (mle->type == DLM_MLE_MASTER) 3253 continue; 3254 3255 3256 /* BLOCK mles are initiated by other nodes. 3257 * need to clean up if the dead node would have 3258 * been the master. */ 3259 if (mle->type == DLM_MLE_BLOCK) { 3260 int bit; 3261 3262 spin_lock(&mle->spinlock); 3263 bit = find_next_bit(mle->maybe_map, O2NM_MAX_NODES, 0); 3264 if (bit != dead_node) { 3265 mlog(0, "mle found, but dead node %u would " 3266 "not have been master\n", dead_node); 3267 spin_unlock(&mle->spinlock); 3268 } else { 3269 /* must drop the refcount by one since the 3270 * assert_master will never arrive. this 3271 * may result in the mle being unlinked and 3272 * freed, but there may still be a process 3273 * waiting in the dlmlock path which is fine. */ 3274 mlog(0, "node %u was expected master\n", 3275 dead_node); 3276 atomic_set(&mle->woken, 1); 3277 spin_unlock(&mle->spinlock); 3278 wake_up(&mle->wq); 3279 /* do not need events any longer, so detach 3280 * from heartbeat */ 3281 __dlm_mle_detach_hb_events(dlm, mle); 3282 __dlm_put_mle(mle); 3283 } 3284 continue; 3285 } 3286 3287 /* everything else is a MIGRATION mle */ 3288 3289 /* the rule for MIGRATION mles is that the master 3290 * becomes UNKNOWN if *either* the original or 3291 * the new master dies. all UNKNOWN lockreses 3292 * are sent to whichever node becomes the recovery 3293 * master. the new master is responsible for 3294 * determining if there is still a master for 3295 * this lockres, or if he needs to take over 3296 * mastery. either way, this node should expect 3297 * another message to resolve this. */ 3298 if (mle->master != dead_node && 3299 mle->new_master != dead_node) 3300 continue; 3301 3302 /* if we have reached this point, this mle needs to 3303 * be removed from the list and freed. */ 3304 3305 /* remove from the list early. NOTE: unlinking 3306 * list_head while in list_for_each_safe */ 3307 __dlm_mle_detach_hb_events(dlm, mle); 3308 spin_lock(&mle->spinlock); 3309 list_del_init(&mle->list); 3310 atomic_set(&mle->woken, 1); 3311 spin_unlock(&mle->spinlock); 3312 wake_up(&mle->wq); 3313 3314 mlog(0, "%s: node %u died during migration from " 3315 "%u to %u!\n", dlm->name, dead_node, 3316 mle->master, mle->new_master); 3317 /* if there is a lockres associated with this 3318 * mle, find it and set its owner to UNKNOWN */ 3319 hash = dlm_lockid_hash(mle->u.name.name, mle->u.name.len); 3320 res = __dlm_lookup_lockres(dlm, mle->u.name.name, 3321 mle->u.name.len, hash); 3322 if (res) { 3323 /* unfortunately if we hit this rare case, our 3324 * lock ordering is messed. we need to drop 3325 * the master lock so that we can take the 3326 * lockres lock, meaning that we will have to 3327 * restart from the head of list. */ 3328 spin_unlock(&dlm->master_lock); 3329 3330 /* move lockres onto recovery list */ 3331 spin_lock(&res->spinlock); 3332 dlm_set_lockres_owner(dlm, res, 3333 DLM_LOCK_RES_OWNER_UNKNOWN); 3334 dlm_move_lockres_to_recovery_list(dlm, res); 3335 spin_unlock(&res->spinlock); 3336 dlm_lockres_put(res); 3337 3338 /* about to get rid of mle, detach from heartbeat */ 3339 __dlm_mle_detach_hb_events(dlm, mle); 3340 3341 /* dump the mle */ 3342 spin_lock(&dlm->master_lock); 3343 __dlm_put_mle(mle); 3344 spin_unlock(&dlm->master_lock); 3345 3346 /* restart */ 3347 goto top; 3348 } 3349 3350 /* this may be the last reference */ 3351 __dlm_put_mle(mle); 3352 } 3353 spin_unlock(&dlm->master_lock); 3354 } 3355 3356 3357 int dlm_finish_migration(struct dlm_ctxt *dlm, struct dlm_lock_resource *res, 3358 u8 old_master) 3359 { 3360 struct dlm_node_iter iter; 3361 int ret = 0; 3362 3363 spin_lock(&dlm->spinlock); 3364 dlm_node_iter_init(dlm->domain_map, &iter); 3365 clear_bit(old_master, iter.node_map); 3366 clear_bit(dlm->node_num, iter.node_map); 3367 spin_unlock(&dlm->spinlock); 3368 3369 /* ownership of the lockres is changing. account for the 3370 * mastery reference here since old_master will briefly have 3371 * a reference after the migration completes */ 3372 spin_lock(&res->spinlock); 3373 dlm_lockres_set_refmap_bit(old_master, res); 3374 spin_unlock(&res->spinlock); 3375 3376 mlog(0, "now time to do a migrate request to other nodes\n"); 3377 ret = dlm_do_migrate_request(dlm, res, old_master, 3378 dlm->node_num, &iter); 3379 if (ret < 0) { 3380 mlog_errno(ret); 3381 goto leave; 3382 } 3383 3384 mlog(0, "doing assert master of %.*s to all except the original node\n", 3385 res->lockname.len, res->lockname.name); 3386 /* this call now finishes out the nodemap 3387 * even if one or more nodes die */ 3388 ret = dlm_do_assert_master(dlm, res, iter.node_map, 3389 DLM_ASSERT_MASTER_FINISH_MIGRATION); 3390 if (ret < 0) { 3391 /* no longer need to retry. all living nodes contacted. */ 3392 mlog_errno(ret); 3393 ret = 0; 3394 } 3395 3396 memset(iter.node_map, 0, sizeof(iter.node_map)); 3397 set_bit(old_master, iter.node_map); 3398 mlog(0, "doing assert master of %.*s back to %u\n", 3399 res->lockname.len, res->lockname.name, old_master); 3400 ret = dlm_do_assert_master(dlm, res, iter.node_map, 3401 DLM_ASSERT_MASTER_FINISH_MIGRATION); 3402 if (ret < 0) { 3403 mlog(0, "assert master to original master failed " 3404 "with %d.\n", ret); 3405 /* the only nonzero status here would be because of 3406 * a dead original node. we're done. */ 3407 ret = 0; 3408 } 3409 3410 /* all done, set the owner, clear the flag */ 3411 spin_lock(&res->spinlock); 3412 dlm_set_lockres_owner(dlm, res, dlm->node_num); 3413 res->state &= ~DLM_LOCK_RES_MIGRATING; 3414 spin_unlock(&res->spinlock); 3415 /* re-dirty it on the new master */ 3416 dlm_kick_thread(dlm, res); 3417 wake_up(&res->wq); 3418 leave: 3419 return ret; 3420 } 3421 3422 /* 3423 * LOCKRES AST REFCOUNT 3424 * this is integral to migration 3425 */ 3426 3427 /* for future intent to call an ast, reserve one ahead of time. 3428 * this should be called only after waiting on the lockres 3429 * with dlm_wait_on_lockres, and while still holding the 3430 * spinlock after the call. */ 3431 void __dlm_lockres_reserve_ast(struct dlm_lock_resource *res) 3432 { 3433 assert_spin_locked(&res->spinlock); 3434 if (res->state & DLM_LOCK_RES_MIGRATING) { 3435 __dlm_print_one_lock_resource(res); 3436 } 3437 BUG_ON(res->state & DLM_LOCK_RES_MIGRATING); 3438 3439 atomic_inc(&res->asts_reserved); 3440 } 3441 3442 /* 3443 * used to drop the reserved ast, either because it went unused, 3444 * or because the ast/bast was actually called. 3445 * 3446 * also, if there is a pending migration on this lockres, 3447 * and this was the last pending ast on the lockres, 3448 * atomically set the MIGRATING flag before we drop the lock. 3449 * this is how we ensure that migration can proceed with no 3450 * asts in progress. note that it is ok if the state of the 3451 * queues is such that a lock should be granted in the future 3452 * or that a bast should be fired, because the new master will 3453 * shuffle the lists on this lockres as soon as it is migrated. 3454 */ 3455 void dlm_lockres_release_ast(struct dlm_ctxt *dlm, 3456 struct dlm_lock_resource *res) 3457 { 3458 if (!atomic_dec_and_lock(&res->asts_reserved, &res->spinlock)) 3459 return; 3460 3461 if (!res->migration_pending) { 3462 spin_unlock(&res->spinlock); 3463 return; 3464 } 3465 3466 BUG_ON(res->state & DLM_LOCK_RES_MIGRATING); 3467 res->migration_pending = 0; 3468 res->state |= DLM_LOCK_RES_MIGRATING; 3469 spin_unlock(&res->spinlock); 3470 wake_up(&res->wq); 3471 wake_up(&dlm->migration_wq); 3472 } 3473