1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* -*- mode: c; c-basic-offset: 8; -*- 3 * vim: noexpandtab sw=8 ts=8 sts=0: 4 * 5 * dlmrecovery.c 6 * 7 * recovery stuff 8 * 9 * Copyright (C) 2004 Oracle. All rights reserved. 10 */ 11 12 13 #include <linux/module.h> 14 #include <linux/fs.h> 15 #include <linux/types.h> 16 #include <linux/slab.h> 17 #include <linux/highmem.h> 18 #include <linux/init.h> 19 #include <linux/sysctl.h> 20 #include <linux/random.h> 21 #include <linux/blkdev.h> 22 #include <linux/socket.h> 23 #include <linux/inet.h> 24 #include <linux/timer.h> 25 #include <linux/kthread.h> 26 #include <linux/delay.h> 27 28 29 #include "../cluster/heartbeat.h" 30 #include "../cluster/nodemanager.h" 31 #include "../cluster/tcp.h" 32 33 #include "dlmapi.h" 34 #include "dlmcommon.h" 35 #include "dlmdomain.h" 36 37 #define MLOG_MASK_PREFIX (ML_DLM|ML_DLM_RECOVERY) 38 #include "../cluster/masklog.h" 39 40 static void dlm_do_local_recovery_cleanup(struct dlm_ctxt *dlm, u8 dead_node); 41 42 static int dlm_recovery_thread(void *data); 43 static int dlm_do_recovery(struct dlm_ctxt *dlm); 44 45 static int dlm_pick_recovery_master(struct dlm_ctxt *dlm); 46 static int dlm_remaster_locks(struct dlm_ctxt *dlm, u8 dead_node); 47 static int dlm_init_recovery_area(struct dlm_ctxt *dlm, u8 dead_node); 48 static int dlm_request_all_locks(struct dlm_ctxt *dlm, 49 u8 request_from, u8 dead_node); 50 static void dlm_destroy_recovery_area(struct dlm_ctxt *dlm); 51 52 static inline int dlm_num_locks_in_lockres(struct dlm_lock_resource *res); 53 static void dlm_init_migratable_lockres(struct dlm_migratable_lockres *mres, 54 const char *lockname, int namelen, 55 int total_locks, u64 cookie, 56 u8 flags, u8 master); 57 static int dlm_send_mig_lockres_msg(struct dlm_ctxt *dlm, 58 struct dlm_migratable_lockres *mres, 59 u8 send_to, 60 struct dlm_lock_resource *res, 61 int total_locks); 62 static int dlm_process_recovery_data(struct dlm_ctxt *dlm, 63 struct dlm_lock_resource *res, 64 struct dlm_migratable_lockres *mres); 65 static int dlm_send_finalize_reco_message(struct dlm_ctxt *dlm); 66 static int dlm_send_all_done_msg(struct dlm_ctxt *dlm, 67 u8 dead_node, u8 send_to); 68 static int dlm_send_begin_reco_message(struct dlm_ctxt *dlm, u8 dead_node); 69 static void dlm_move_reco_locks_to_list(struct dlm_ctxt *dlm, 70 struct list_head *list, u8 dead_node); 71 static void dlm_finish_local_lockres_recovery(struct dlm_ctxt *dlm, 72 u8 dead_node, u8 new_master); 73 static void dlm_reco_ast(void *astdata); 74 static void dlm_reco_bast(void *astdata, int blocked_type); 75 static void dlm_reco_unlock_ast(void *astdata, enum dlm_status st); 76 static void dlm_request_all_locks_worker(struct dlm_work_item *item, 77 void *data); 78 static void dlm_mig_lockres_worker(struct dlm_work_item *item, void *data); 79 static int dlm_lockres_master_requery(struct dlm_ctxt *dlm, 80 struct dlm_lock_resource *res, 81 u8 *real_master); 82 83 static u64 dlm_get_next_mig_cookie(void); 84 85 static DEFINE_SPINLOCK(dlm_reco_state_lock); 86 static DEFINE_SPINLOCK(dlm_mig_cookie_lock); 87 static u64 dlm_mig_cookie = 1; 88 89 static u64 dlm_get_next_mig_cookie(void) 90 { 91 u64 c; 92 spin_lock(&dlm_mig_cookie_lock); 93 c = dlm_mig_cookie; 94 if (dlm_mig_cookie == (~0ULL)) 95 dlm_mig_cookie = 1; 96 else 97 dlm_mig_cookie++; 98 spin_unlock(&dlm_mig_cookie_lock); 99 return c; 100 } 101 102 static inline void dlm_set_reco_dead_node(struct dlm_ctxt *dlm, 103 u8 dead_node) 104 { 105 assert_spin_locked(&dlm->spinlock); 106 if (dlm->reco.dead_node != dead_node) 107 mlog(0, "%s: changing dead_node from %u to %u\n", 108 dlm->name, dlm->reco.dead_node, dead_node); 109 dlm->reco.dead_node = dead_node; 110 } 111 112 static inline void dlm_set_reco_master(struct dlm_ctxt *dlm, 113 u8 master) 114 { 115 assert_spin_locked(&dlm->spinlock); 116 mlog(0, "%s: changing new_master from %u to %u\n", 117 dlm->name, dlm->reco.new_master, master); 118 dlm->reco.new_master = master; 119 } 120 121 static inline void __dlm_reset_recovery(struct dlm_ctxt *dlm) 122 { 123 assert_spin_locked(&dlm->spinlock); 124 clear_bit(dlm->reco.dead_node, dlm->recovery_map); 125 dlm_set_reco_dead_node(dlm, O2NM_INVALID_NODE_NUM); 126 dlm_set_reco_master(dlm, O2NM_INVALID_NODE_NUM); 127 } 128 129 /* Worker function used during recovery. */ 130 void dlm_dispatch_work(struct work_struct *work) 131 { 132 struct dlm_ctxt *dlm = 133 container_of(work, struct dlm_ctxt, dispatched_work); 134 LIST_HEAD(tmp_list); 135 struct dlm_work_item *item, *next; 136 dlm_workfunc_t *workfunc; 137 int tot=0; 138 139 spin_lock(&dlm->work_lock); 140 list_splice_init(&dlm->work_list, &tmp_list); 141 spin_unlock(&dlm->work_lock); 142 143 list_for_each_entry(item, &tmp_list, list) { 144 tot++; 145 } 146 mlog(0, "%s: work thread has %d work items\n", dlm->name, tot); 147 148 list_for_each_entry_safe(item, next, &tmp_list, list) { 149 workfunc = item->func; 150 list_del_init(&item->list); 151 152 /* already have ref on dlm to avoid having 153 * it disappear. just double-check. */ 154 BUG_ON(item->dlm != dlm); 155 156 /* this is allowed to sleep and 157 * call network stuff */ 158 workfunc(item, item->data); 159 160 dlm_put(dlm); 161 kfree(item); 162 } 163 } 164 165 /* 166 * RECOVERY THREAD 167 */ 168 169 void dlm_kick_recovery_thread(struct dlm_ctxt *dlm) 170 { 171 /* wake the recovery thread 172 * this will wake the reco thread in one of three places 173 * 1) sleeping with no recovery happening 174 * 2) sleeping with recovery mastered elsewhere 175 * 3) recovery mastered here, waiting on reco data */ 176 177 wake_up(&dlm->dlm_reco_thread_wq); 178 } 179 180 /* Launch the recovery thread */ 181 int dlm_launch_recovery_thread(struct dlm_ctxt *dlm) 182 { 183 mlog(0, "starting dlm recovery thread...\n"); 184 185 dlm->dlm_reco_thread_task = kthread_run(dlm_recovery_thread, dlm, 186 "dlm_reco-%s", dlm->name); 187 if (IS_ERR(dlm->dlm_reco_thread_task)) { 188 mlog_errno(PTR_ERR(dlm->dlm_reco_thread_task)); 189 dlm->dlm_reco_thread_task = NULL; 190 return -EINVAL; 191 } 192 193 return 0; 194 } 195 196 void dlm_complete_recovery_thread(struct dlm_ctxt *dlm) 197 { 198 if (dlm->dlm_reco_thread_task) { 199 mlog(0, "waiting for dlm recovery thread to exit\n"); 200 kthread_stop(dlm->dlm_reco_thread_task); 201 dlm->dlm_reco_thread_task = NULL; 202 } 203 } 204 205 206 207 /* 208 * this is lame, but here's how recovery works... 209 * 1) all recovery threads cluster wide will work on recovering 210 * ONE node at a time 211 * 2) negotiate who will take over all the locks for the dead node. 212 * thats right... ALL the locks. 213 * 3) once a new master is chosen, everyone scans all locks 214 * and moves aside those mastered by the dead guy 215 * 4) each of these locks should be locked until recovery is done 216 * 5) the new master collects up all of secondary lock queue info 217 * one lock at a time, forcing each node to communicate back 218 * before continuing 219 * 6) each secondary lock queue responds with the full known lock info 220 * 7) once the new master has run all its locks, it sends a ALLDONE! 221 * message to everyone 222 * 8) upon receiving this message, the secondary queue node unlocks 223 * and responds to the ALLDONE 224 * 9) once the new master gets responses from everyone, he unlocks 225 * everything and recovery for this dead node is done 226 *10) go back to 2) while there are still dead nodes 227 * 228 */ 229 230 static void dlm_print_reco_node_status(struct dlm_ctxt *dlm) 231 { 232 struct dlm_reco_node_data *ndata; 233 struct dlm_lock_resource *res; 234 235 mlog(ML_NOTICE, "%s(%d): recovery info, state=%s, dead=%u, master=%u\n", 236 dlm->name, task_pid_nr(dlm->dlm_reco_thread_task), 237 dlm->reco.state & DLM_RECO_STATE_ACTIVE ? "ACTIVE" : "inactive", 238 dlm->reco.dead_node, dlm->reco.new_master); 239 240 list_for_each_entry(ndata, &dlm->reco.node_data, list) { 241 char *st = "unknown"; 242 switch (ndata->state) { 243 case DLM_RECO_NODE_DATA_INIT: 244 st = "init"; 245 break; 246 case DLM_RECO_NODE_DATA_REQUESTING: 247 st = "requesting"; 248 break; 249 case DLM_RECO_NODE_DATA_DEAD: 250 st = "dead"; 251 break; 252 case DLM_RECO_NODE_DATA_RECEIVING: 253 st = "receiving"; 254 break; 255 case DLM_RECO_NODE_DATA_REQUESTED: 256 st = "requested"; 257 break; 258 case DLM_RECO_NODE_DATA_DONE: 259 st = "done"; 260 break; 261 case DLM_RECO_NODE_DATA_FINALIZE_SENT: 262 st = "finalize-sent"; 263 break; 264 default: 265 st = "bad"; 266 break; 267 } 268 mlog(ML_NOTICE, "%s: reco state, node %u, state=%s\n", 269 dlm->name, ndata->node_num, st); 270 } 271 list_for_each_entry(res, &dlm->reco.resources, recovering) { 272 mlog(ML_NOTICE, "%s: lockres %.*s on recovering list\n", 273 dlm->name, res->lockname.len, res->lockname.name); 274 } 275 } 276 277 #define DLM_RECO_THREAD_TIMEOUT_MS (5 * 1000) 278 279 static int dlm_recovery_thread(void *data) 280 { 281 int status; 282 struct dlm_ctxt *dlm = data; 283 unsigned long timeout = msecs_to_jiffies(DLM_RECO_THREAD_TIMEOUT_MS); 284 285 mlog(0, "dlm thread running for %s...\n", dlm->name); 286 287 while (!kthread_should_stop()) { 288 if (dlm_domain_fully_joined(dlm)) { 289 status = dlm_do_recovery(dlm); 290 if (status == -EAGAIN) { 291 /* do not sleep, recheck immediately. */ 292 continue; 293 } 294 if (status < 0) 295 mlog_errno(status); 296 } 297 298 wait_event_interruptible_timeout(dlm->dlm_reco_thread_wq, 299 kthread_should_stop(), 300 timeout); 301 } 302 303 mlog(0, "quitting DLM recovery thread\n"); 304 return 0; 305 } 306 307 /* returns true when the recovery master has contacted us */ 308 static int dlm_reco_master_ready(struct dlm_ctxt *dlm) 309 { 310 int ready; 311 spin_lock(&dlm->spinlock); 312 ready = (dlm->reco.new_master != O2NM_INVALID_NODE_NUM); 313 spin_unlock(&dlm->spinlock); 314 return ready; 315 } 316 317 /* returns true if node is no longer in the domain 318 * could be dead or just not joined */ 319 int dlm_is_node_dead(struct dlm_ctxt *dlm, u8 node) 320 { 321 int dead; 322 spin_lock(&dlm->spinlock); 323 dead = !test_bit(node, dlm->domain_map); 324 spin_unlock(&dlm->spinlock); 325 return dead; 326 } 327 328 /* returns true if node is no longer in the domain 329 * could be dead or just not joined */ 330 static int dlm_is_node_recovered(struct dlm_ctxt *dlm, u8 node) 331 { 332 int recovered; 333 spin_lock(&dlm->spinlock); 334 recovered = !test_bit(node, dlm->recovery_map); 335 spin_unlock(&dlm->spinlock); 336 return recovered; 337 } 338 339 340 void dlm_wait_for_node_death(struct dlm_ctxt *dlm, u8 node, int timeout) 341 { 342 if (dlm_is_node_dead(dlm, node)) 343 return; 344 345 printk(KERN_NOTICE "o2dlm: Waiting on the death of node %u in " 346 "domain %s\n", node, dlm->name); 347 348 if (timeout) 349 wait_event_timeout(dlm->dlm_reco_thread_wq, 350 dlm_is_node_dead(dlm, node), 351 msecs_to_jiffies(timeout)); 352 else 353 wait_event(dlm->dlm_reco_thread_wq, 354 dlm_is_node_dead(dlm, node)); 355 } 356 357 void dlm_wait_for_node_recovery(struct dlm_ctxt *dlm, u8 node, int timeout) 358 { 359 if (dlm_is_node_recovered(dlm, node)) 360 return; 361 362 printk(KERN_NOTICE "o2dlm: Waiting on the recovery of node %u in " 363 "domain %s\n", node, dlm->name); 364 365 if (timeout) 366 wait_event_timeout(dlm->dlm_reco_thread_wq, 367 dlm_is_node_recovered(dlm, node), 368 msecs_to_jiffies(timeout)); 369 else 370 wait_event(dlm->dlm_reco_thread_wq, 371 dlm_is_node_recovered(dlm, node)); 372 } 373 374 /* callers of the top-level api calls (dlmlock/dlmunlock) should 375 * block on the dlm->reco.event when recovery is in progress. 376 * the dlm recovery thread will set this state when it begins 377 * recovering a dead node (as the new master or not) and clear 378 * the state and wake as soon as all affected lock resources have 379 * been marked with the RECOVERY flag */ 380 static int dlm_in_recovery(struct dlm_ctxt *dlm) 381 { 382 int in_recovery; 383 spin_lock(&dlm->spinlock); 384 in_recovery = !!(dlm->reco.state & DLM_RECO_STATE_ACTIVE); 385 spin_unlock(&dlm->spinlock); 386 return in_recovery; 387 } 388 389 390 void dlm_wait_for_recovery(struct dlm_ctxt *dlm) 391 { 392 if (dlm_in_recovery(dlm)) { 393 mlog(0, "%s: reco thread %d in recovery: " 394 "state=%d, master=%u, dead=%u\n", 395 dlm->name, task_pid_nr(dlm->dlm_reco_thread_task), 396 dlm->reco.state, dlm->reco.new_master, 397 dlm->reco.dead_node); 398 } 399 wait_event(dlm->reco.event, !dlm_in_recovery(dlm)); 400 } 401 402 static void dlm_begin_recovery(struct dlm_ctxt *dlm) 403 { 404 assert_spin_locked(&dlm->spinlock); 405 BUG_ON(dlm->reco.state & DLM_RECO_STATE_ACTIVE); 406 printk(KERN_NOTICE "o2dlm: Begin recovery on domain %s for node %u\n", 407 dlm->name, dlm->reco.dead_node); 408 dlm->reco.state |= DLM_RECO_STATE_ACTIVE; 409 } 410 411 static void dlm_end_recovery(struct dlm_ctxt *dlm) 412 { 413 spin_lock(&dlm->spinlock); 414 BUG_ON(!(dlm->reco.state & DLM_RECO_STATE_ACTIVE)); 415 dlm->reco.state &= ~DLM_RECO_STATE_ACTIVE; 416 spin_unlock(&dlm->spinlock); 417 printk(KERN_NOTICE "o2dlm: End recovery on domain %s\n", dlm->name); 418 wake_up(&dlm->reco.event); 419 } 420 421 static void dlm_print_recovery_master(struct dlm_ctxt *dlm) 422 { 423 printk(KERN_NOTICE "o2dlm: Node %u (%s) is the Recovery Master for the " 424 "dead node %u in domain %s\n", dlm->reco.new_master, 425 (dlm->node_num == dlm->reco.new_master ? "me" : "he"), 426 dlm->reco.dead_node, dlm->name); 427 } 428 429 static int dlm_do_recovery(struct dlm_ctxt *dlm) 430 { 431 int status = 0; 432 int ret; 433 434 spin_lock(&dlm->spinlock); 435 436 if (dlm->migrate_done) { 437 mlog(0, "%s: no need do recovery after migrating all " 438 "lock resources\n", dlm->name); 439 spin_unlock(&dlm->spinlock); 440 return 0; 441 } 442 443 /* check to see if the new master has died */ 444 if (dlm->reco.new_master != O2NM_INVALID_NODE_NUM && 445 test_bit(dlm->reco.new_master, dlm->recovery_map)) { 446 mlog(0, "new master %u died while recovering %u!\n", 447 dlm->reco.new_master, dlm->reco.dead_node); 448 /* unset the new_master, leave dead_node */ 449 dlm_set_reco_master(dlm, O2NM_INVALID_NODE_NUM); 450 } 451 452 /* select a target to recover */ 453 if (dlm->reco.dead_node == O2NM_INVALID_NODE_NUM) { 454 int bit; 455 456 bit = find_next_bit (dlm->recovery_map, O2NM_MAX_NODES, 0); 457 if (bit >= O2NM_MAX_NODES || bit < 0) 458 dlm_set_reco_dead_node(dlm, O2NM_INVALID_NODE_NUM); 459 else 460 dlm_set_reco_dead_node(dlm, bit); 461 } else if (!test_bit(dlm->reco.dead_node, dlm->recovery_map)) { 462 /* BUG? */ 463 mlog(ML_ERROR, "dead_node %u no longer in recovery map!\n", 464 dlm->reco.dead_node); 465 dlm_set_reco_dead_node(dlm, O2NM_INVALID_NODE_NUM); 466 } 467 468 if (dlm->reco.dead_node == O2NM_INVALID_NODE_NUM) { 469 // mlog(0, "nothing to recover! sleeping now!\n"); 470 spin_unlock(&dlm->spinlock); 471 /* return to main thread loop and sleep. */ 472 return 0; 473 } 474 mlog(0, "%s(%d):recovery thread found node %u in the recovery map!\n", 475 dlm->name, task_pid_nr(dlm->dlm_reco_thread_task), 476 dlm->reco.dead_node); 477 478 /* take write barrier */ 479 /* (stops the list reshuffling thread, proxy ast handling) */ 480 dlm_begin_recovery(dlm); 481 482 spin_unlock(&dlm->spinlock); 483 484 if (dlm->reco.new_master == dlm->node_num) 485 goto master_here; 486 487 if (dlm->reco.new_master == O2NM_INVALID_NODE_NUM) { 488 /* choose a new master, returns 0 if this node 489 * is the master, -EEXIST if it's another node. 490 * this does not return until a new master is chosen 491 * or recovery completes entirely. */ 492 ret = dlm_pick_recovery_master(dlm); 493 if (!ret) { 494 /* already notified everyone. go. */ 495 goto master_here; 496 } 497 mlog(0, "another node will master this recovery session.\n"); 498 } 499 500 dlm_print_recovery_master(dlm); 501 502 /* it is safe to start everything back up here 503 * because all of the dead node's lock resources 504 * have been marked as in-recovery */ 505 dlm_end_recovery(dlm); 506 507 /* sleep out in main dlm_recovery_thread loop. */ 508 return 0; 509 510 master_here: 511 dlm_print_recovery_master(dlm); 512 513 status = dlm_remaster_locks(dlm, dlm->reco.dead_node); 514 if (status < 0) { 515 /* we should never hit this anymore */ 516 mlog(ML_ERROR, "%s: Error %d remastering locks for node %u, " 517 "retrying.\n", dlm->name, status, dlm->reco.dead_node); 518 /* yield a bit to allow any final network messages 519 * to get handled on remaining nodes */ 520 msleep(100); 521 } else { 522 /* success! see if any other nodes need recovery */ 523 mlog(0, "DONE mastering recovery of %s:%u here(this=%u)!\n", 524 dlm->name, dlm->reco.dead_node, dlm->node_num); 525 spin_lock(&dlm->spinlock); 526 __dlm_reset_recovery(dlm); 527 dlm->reco.state &= ~DLM_RECO_STATE_FINALIZE; 528 spin_unlock(&dlm->spinlock); 529 } 530 dlm_end_recovery(dlm); 531 532 /* continue and look for another dead node */ 533 return -EAGAIN; 534 } 535 536 static int dlm_remaster_locks(struct dlm_ctxt *dlm, u8 dead_node) 537 { 538 int status = 0; 539 struct dlm_reco_node_data *ndata; 540 int all_nodes_done; 541 int destroy = 0; 542 int pass = 0; 543 544 do { 545 /* we have become recovery master. there is no escaping 546 * this, so just keep trying until we get it. */ 547 status = dlm_init_recovery_area(dlm, dead_node); 548 if (status < 0) { 549 mlog(ML_ERROR, "%s: failed to alloc recovery area, " 550 "retrying\n", dlm->name); 551 msleep(1000); 552 } 553 } while (status != 0); 554 555 /* safe to access the node data list without a lock, since this 556 * process is the only one to change the list */ 557 list_for_each_entry(ndata, &dlm->reco.node_data, list) { 558 BUG_ON(ndata->state != DLM_RECO_NODE_DATA_INIT); 559 ndata->state = DLM_RECO_NODE_DATA_REQUESTING; 560 561 mlog(0, "%s: Requesting lock info from node %u\n", dlm->name, 562 ndata->node_num); 563 564 if (ndata->node_num == dlm->node_num) { 565 ndata->state = DLM_RECO_NODE_DATA_DONE; 566 continue; 567 } 568 569 do { 570 status = dlm_request_all_locks(dlm, ndata->node_num, 571 dead_node); 572 if (status < 0) { 573 mlog_errno(status); 574 if (dlm_is_host_down(status)) { 575 /* node died, ignore it for recovery */ 576 status = 0; 577 ndata->state = DLM_RECO_NODE_DATA_DEAD; 578 /* wait for the domain map to catch up 579 * with the network state. */ 580 wait_event_timeout(dlm->dlm_reco_thread_wq, 581 dlm_is_node_dead(dlm, 582 ndata->node_num), 583 msecs_to_jiffies(1000)); 584 mlog(0, "waited 1 sec for %u, " 585 "dead? %s\n", ndata->node_num, 586 dlm_is_node_dead(dlm, ndata->node_num) ? 587 "yes" : "no"); 588 } else { 589 /* -ENOMEM on the other node */ 590 mlog(0, "%s: node %u returned " 591 "%d during recovery, retrying " 592 "after a short wait\n", 593 dlm->name, ndata->node_num, 594 status); 595 msleep(100); 596 } 597 } 598 } while (status != 0); 599 600 spin_lock(&dlm_reco_state_lock); 601 switch (ndata->state) { 602 case DLM_RECO_NODE_DATA_INIT: 603 case DLM_RECO_NODE_DATA_FINALIZE_SENT: 604 case DLM_RECO_NODE_DATA_REQUESTED: 605 BUG(); 606 break; 607 case DLM_RECO_NODE_DATA_DEAD: 608 mlog(0, "node %u died after requesting " 609 "recovery info for node %u\n", 610 ndata->node_num, dead_node); 611 /* fine. don't need this node's info. 612 * continue without it. */ 613 break; 614 case DLM_RECO_NODE_DATA_REQUESTING: 615 ndata->state = DLM_RECO_NODE_DATA_REQUESTED; 616 mlog(0, "now receiving recovery data from " 617 "node %u for dead node %u\n", 618 ndata->node_num, dead_node); 619 break; 620 case DLM_RECO_NODE_DATA_RECEIVING: 621 mlog(0, "already receiving recovery data from " 622 "node %u for dead node %u\n", 623 ndata->node_num, dead_node); 624 break; 625 case DLM_RECO_NODE_DATA_DONE: 626 mlog(0, "already DONE receiving recovery data " 627 "from node %u for dead node %u\n", 628 ndata->node_num, dead_node); 629 break; 630 } 631 spin_unlock(&dlm_reco_state_lock); 632 } 633 634 mlog(0, "%s: Done requesting all lock info\n", dlm->name); 635 636 /* nodes should be sending reco data now 637 * just need to wait */ 638 639 while (1) { 640 /* check all the nodes now to see if we are 641 * done, or if anyone died */ 642 all_nodes_done = 1; 643 spin_lock(&dlm_reco_state_lock); 644 list_for_each_entry(ndata, &dlm->reco.node_data, list) { 645 mlog(0, "checking recovery state of node %u\n", 646 ndata->node_num); 647 switch (ndata->state) { 648 case DLM_RECO_NODE_DATA_INIT: 649 case DLM_RECO_NODE_DATA_REQUESTING: 650 mlog(ML_ERROR, "bad ndata state for " 651 "node %u: state=%d\n", 652 ndata->node_num, ndata->state); 653 BUG(); 654 break; 655 case DLM_RECO_NODE_DATA_DEAD: 656 mlog(0, "node %u died after " 657 "requesting recovery info for " 658 "node %u\n", ndata->node_num, 659 dead_node); 660 break; 661 case DLM_RECO_NODE_DATA_RECEIVING: 662 case DLM_RECO_NODE_DATA_REQUESTED: 663 mlog(0, "%s: node %u still in state %s\n", 664 dlm->name, ndata->node_num, 665 ndata->state==DLM_RECO_NODE_DATA_RECEIVING ? 666 "receiving" : "requested"); 667 all_nodes_done = 0; 668 break; 669 case DLM_RECO_NODE_DATA_DONE: 670 mlog(0, "%s: node %u state is done\n", 671 dlm->name, ndata->node_num); 672 break; 673 case DLM_RECO_NODE_DATA_FINALIZE_SENT: 674 mlog(0, "%s: node %u state is finalize\n", 675 dlm->name, ndata->node_num); 676 break; 677 } 678 } 679 spin_unlock(&dlm_reco_state_lock); 680 681 mlog(0, "pass #%d, all_nodes_done?: %s\n", ++pass, 682 all_nodes_done?"yes":"no"); 683 if (all_nodes_done) { 684 int ret; 685 686 /* Set this flag on recovery master to avoid 687 * a new recovery for another dead node start 688 * before the recovery is not done. That may 689 * cause recovery hung.*/ 690 spin_lock(&dlm->spinlock); 691 dlm->reco.state |= DLM_RECO_STATE_FINALIZE; 692 spin_unlock(&dlm->spinlock); 693 694 /* all nodes are now in DLM_RECO_NODE_DATA_DONE state 695 * just send a finalize message to everyone and 696 * clean up */ 697 mlog(0, "all nodes are done! send finalize\n"); 698 ret = dlm_send_finalize_reco_message(dlm); 699 if (ret < 0) 700 mlog_errno(ret); 701 702 spin_lock(&dlm->spinlock); 703 dlm_finish_local_lockres_recovery(dlm, dead_node, 704 dlm->node_num); 705 spin_unlock(&dlm->spinlock); 706 mlog(0, "should be done with recovery!\n"); 707 708 mlog(0, "finishing recovery of %s at %lu, " 709 "dead=%u, this=%u, new=%u\n", dlm->name, 710 jiffies, dlm->reco.dead_node, 711 dlm->node_num, dlm->reco.new_master); 712 destroy = 1; 713 status = 0; 714 /* rescan everything marked dirty along the way */ 715 dlm_kick_thread(dlm, NULL); 716 break; 717 } 718 /* wait to be signalled, with periodic timeout 719 * to check for node death */ 720 wait_event_interruptible_timeout(dlm->dlm_reco_thread_wq, 721 kthread_should_stop(), 722 msecs_to_jiffies(DLM_RECO_THREAD_TIMEOUT_MS)); 723 724 } 725 726 if (destroy) 727 dlm_destroy_recovery_area(dlm); 728 729 return status; 730 } 731 732 static int dlm_init_recovery_area(struct dlm_ctxt *dlm, u8 dead_node) 733 { 734 int num=0; 735 struct dlm_reco_node_data *ndata; 736 737 spin_lock(&dlm->spinlock); 738 memcpy(dlm->reco.node_map, dlm->domain_map, sizeof(dlm->domain_map)); 739 /* nodes can only be removed (by dying) after dropping 740 * this lock, and death will be trapped later, so this should do */ 741 spin_unlock(&dlm->spinlock); 742 743 while (1) { 744 num = find_next_bit (dlm->reco.node_map, O2NM_MAX_NODES, num); 745 if (num >= O2NM_MAX_NODES) { 746 break; 747 } 748 BUG_ON(num == dead_node); 749 750 ndata = kzalloc(sizeof(*ndata), GFP_NOFS); 751 if (!ndata) { 752 dlm_destroy_recovery_area(dlm); 753 return -ENOMEM; 754 } 755 ndata->node_num = num; 756 ndata->state = DLM_RECO_NODE_DATA_INIT; 757 spin_lock(&dlm_reco_state_lock); 758 list_add_tail(&ndata->list, &dlm->reco.node_data); 759 spin_unlock(&dlm_reco_state_lock); 760 num++; 761 } 762 763 return 0; 764 } 765 766 static void dlm_destroy_recovery_area(struct dlm_ctxt *dlm) 767 { 768 struct dlm_reco_node_data *ndata, *next; 769 LIST_HEAD(tmplist); 770 771 spin_lock(&dlm_reco_state_lock); 772 list_splice_init(&dlm->reco.node_data, &tmplist); 773 spin_unlock(&dlm_reco_state_lock); 774 775 list_for_each_entry_safe(ndata, next, &tmplist, list) { 776 list_del_init(&ndata->list); 777 kfree(ndata); 778 } 779 } 780 781 static int dlm_request_all_locks(struct dlm_ctxt *dlm, u8 request_from, 782 u8 dead_node) 783 { 784 struct dlm_lock_request lr; 785 int ret; 786 int status; 787 788 mlog(0, "\n"); 789 790 791 mlog(0, "dlm_request_all_locks: dead node is %u, sending request " 792 "to %u\n", dead_node, request_from); 793 794 memset(&lr, 0, sizeof(lr)); 795 lr.node_idx = dlm->node_num; 796 lr.dead_node = dead_node; 797 798 // send message 799 ret = o2net_send_message(DLM_LOCK_REQUEST_MSG, dlm->key, 800 &lr, sizeof(lr), request_from, &status); 801 802 /* negative status is handled by caller */ 803 if (ret < 0) 804 mlog(ML_ERROR, "%s: Error %d send LOCK_REQUEST to node %u " 805 "to recover dead node %u\n", dlm->name, ret, 806 request_from, dead_node); 807 else 808 ret = status; 809 // return from here, then 810 // sleep until all received or error 811 return ret; 812 813 } 814 815 int dlm_request_all_locks_handler(struct o2net_msg *msg, u32 len, void *data, 816 void **ret_data) 817 { 818 struct dlm_ctxt *dlm = data; 819 struct dlm_lock_request *lr = (struct dlm_lock_request *)msg->buf; 820 char *buf = NULL; 821 struct dlm_work_item *item = NULL; 822 823 if (!dlm_grab(dlm)) 824 return -EINVAL; 825 826 if (lr->dead_node != dlm->reco.dead_node) { 827 mlog(ML_ERROR, "%s: node %u sent dead_node=%u, but local " 828 "dead_node is %u\n", dlm->name, lr->node_idx, 829 lr->dead_node, dlm->reco.dead_node); 830 dlm_print_reco_node_status(dlm); 831 /* this is a hack */ 832 dlm_put(dlm); 833 return -ENOMEM; 834 } 835 BUG_ON(lr->dead_node != dlm->reco.dead_node); 836 837 item = kzalloc(sizeof(*item), GFP_NOFS); 838 if (!item) { 839 dlm_put(dlm); 840 return -ENOMEM; 841 } 842 843 /* this will get freed by dlm_request_all_locks_worker */ 844 buf = (char *) __get_free_page(GFP_NOFS); 845 if (!buf) { 846 kfree(item); 847 dlm_put(dlm); 848 return -ENOMEM; 849 } 850 851 /* queue up work for dlm_request_all_locks_worker */ 852 dlm_grab(dlm); /* get an extra ref for the work item */ 853 dlm_init_work_item(dlm, item, dlm_request_all_locks_worker, buf); 854 item->u.ral.reco_master = lr->node_idx; 855 item->u.ral.dead_node = lr->dead_node; 856 spin_lock(&dlm->work_lock); 857 list_add_tail(&item->list, &dlm->work_list); 858 spin_unlock(&dlm->work_lock); 859 queue_work(dlm->dlm_worker, &dlm->dispatched_work); 860 861 dlm_put(dlm); 862 return 0; 863 } 864 865 static void dlm_request_all_locks_worker(struct dlm_work_item *item, void *data) 866 { 867 struct dlm_migratable_lockres *mres; 868 struct dlm_lock_resource *res; 869 struct dlm_ctxt *dlm; 870 LIST_HEAD(resources); 871 int ret; 872 u8 dead_node, reco_master; 873 int skip_all_done = 0; 874 875 dlm = item->dlm; 876 dead_node = item->u.ral.dead_node; 877 reco_master = item->u.ral.reco_master; 878 mres = (struct dlm_migratable_lockres *)data; 879 880 mlog(0, "%s: recovery worker started, dead=%u, master=%u\n", 881 dlm->name, dead_node, reco_master); 882 883 if (dead_node != dlm->reco.dead_node || 884 reco_master != dlm->reco.new_master) { 885 /* worker could have been created before the recovery master 886 * died. if so, do not continue, but do not error. */ 887 if (dlm->reco.new_master == O2NM_INVALID_NODE_NUM) { 888 mlog(ML_NOTICE, "%s: will not send recovery state, " 889 "recovery master %u died, thread=(dead=%u,mas=%u)" 890 " current=(dead=%u,mas=%u)\n", dlm->name, 891 reco_master, dead_node, reco_master, 892 dlm->reco.dead_node, dlm->reco.new_master); 893 } else { 894 mlog(ML_NOTICE, "%s: reco state invalid: reco(dead=%u, " 895 "master=%u), request(dead=%u, master=%u)\n", 896 dlm->name, dlm->reco.dead_node, 897 dlm->reco.new_master, dead_node, reco_master); 898 } 899 goto leave; 900 } 901 902 /* lock resources should have already been moved to the 903 * dlm->reco.resources list. now move items from that list 904 * to a temp list if the dead owner matches. note that the 905 * whole cluster recovers only one node at a time, so we 906 * can safely move UNKNOWN lock resources for each recovery 907 * session. */ 908 dlm_move_reco_locks_to_list(dlm, &resources, dead_node); 909 910 /* now we can begin blasting lockreses without the dlm lock */ 911 912 /* any errors returned will be due to the new_master dying, 913 * the dlm_reco_thread should detect this */ 914 list_for_each_entry(res, &resources, recovering) { 915 ret = dlm_send_one_lockres(dlm, res, mres, reco_master, 916 DLM_MRES_RECOVERY); 917 if (ret < 0) { 918 mlog(ML_ERROR, "%s: node %u went down while sending " 919 "recovery state for dead node %u, ret=%d\n", dlm->name, 920 reco_master, dead_node, ret); 921 skip_all_done = 1; 922 break; 923 } 924 } 925 926 /* move the resources back to the list */ 927 spin_lock(&dlm->spinlock); 928 list_splice_init(&resources, &dlm->reco.resources); 929 spin_unlock(&dlm->spinlock); 930 931 if (!skip_all_done) { 932 ret = dlm_send_all_done_msg(dlm, dead_node, reco_master); 933 if (ret < 0) { 934 mlog(ML_ERROR, "%s: node %u went down while sending " 935 "recovery all-done for dead node %u, ret=%d\n", 936 dlm->name, reco_master, dead_node, ret); 937 } 938 } 939 leave: 940 free_page((unsigned long)data); 941 } 942 943 944 static int dlm_send_all_done_msg(struct dlm_ctxt *dlm, u8 dead_node, u8 send_to) 945 { 946 int ret, tmpret; 947 struct dlm_reco_data_done done_msg; 948 949 memset(&done_msg, 0, sizeof(done_msg)); 950 done_msg.node_idx = dlm->node_num; 951 done_msg.dead_node = dead_node; 952 mlog(0, "sending DATA DONE message to %u, " 953 "my node=%u, dead node=%u\n", send_to, done_msg.node_idx, 954 done_msg.dead_node); 955 956 ret = o2net_send_message(DLM_RECO_DATA_DONE_MSG, dlm->key, &done_msg, 957 sizeof(done_msg), send_to, &tmpret); 958 if (ret < 0) { 959 mlog(ML_ERROR, "%s: Error %d send RECO_DATA_DONE to node %u " 960 "to recover dead node %u\n", dlm->name, ret, send_to, 961 dead_node); 962 if (!dlm_is_host_down(ret)) { 963 BUG(); 964 } 965 } else 966 ret = tmpret; 967 return ret; 968 } 969 970 971 int dlm_reco_data_done_handler(struct o2net_msg *msg, u32 len, void *data, 972 void **ret_data) 973 { 974 struct dlm_ctxt *dlm = data; 975 struct dlm_reco_data_done *done = (struct dlm_reco_data_done *)msg->buf; 976 struct dlm_reco_node_data *ndata = NULL; 977 int ret = -EINVAL; 978 979 if (!dlm_grab(dlm)) 980 return -EINVAL; 981 982 mlog(0, "got DATA DONE: dead_node=%u, reco.dead_node=%u, " 983 "node_idx=%u, this node=%u\n", done->dead_node, 984 dlm->reco.dead_node, done->node_idx, dlm->node_num); 985 986 mlog_bug_on_msg((done->dead_node != dlm->reco.dead_node), 987 "Got DATA DONE: dead_node=%u, reco.dead_node=%u, " 988 "node_idx=%u, this node=%u\n", done->dead_node, 989 dlm->reco.dead_node, done->node_idx, dlm->node_num); 990 991 spin_lock(&dlm_reco_state_lock); 992 list_for_each_entry(ndata, &dlm->reco.node_data, list) { 993 if (ndata->node_num != done->node_idx) 994 continue; 995 996 switch (ndata->state) { 997 /* should have moved beyond INIT but not to FINALIZE yet */ 998 case DLM_RECO_NODE_DATA_INIT: 999 case DLM_RECO_NODE_DATA_DEAD: 1000 case DLM_RECO_NODE_DATA_FINALIZE_SENT: 1001 mlog(ML_ERROR, "bad ndata state for node %u:" 1002 " state=%d\n", ndata->node_num, 1003 ndata->state); 1004 BUG(); 1005 break; 1006 /* these states are possible at this point, anywhere along 1007 * the line of recovery */ 1008 case DLM_RECO_NODE_DATA_DONE: 1009 case DLM_RECO_NODE_DATA_RECEIVING: 1010 case DLM_RECO_NODE_DATA_REQUESTED: 1011 case DLM_RECO_NODE_DATA_REQUESTING: 1012 mlog(0, "node %u is DONE sending " 1013 "recovery data!\n", 1014 ndata->node_num); 1015 1016 ndata->state = DLM_RECO_NODE_DATA_DONE; 1017 ret = 0; 1018 break; 1019 } 1020 } 1021 spin_unlock(&dlm_reco_state_lock); 1022 1023 /* wake the recovery thread, some node is done */ 1024 if (!ret) 1025 dlm_kick_recovery_thread(dlm); 1026 1027 if (ret < 0) 1028 mlog(ML_ERROR, "failed to find recovery node data for node " 1029 "%u\n", done->node_idx); 1030 dlm_put(dlm); 1031 1032 mlog(0, "leaving reco data done handler, ret=%d\n", ret); 1033 return ret; 1034 } 1035 1036 static void dlm_move_reco_locks_to_list(struct dlm_ctxt *dlm, 1037 struct list_head *list, 1038 u8 dead_node) 1039 { 1040 struct dlm_lock_resource *res, *next; 1041 struct dlm_lock *lock; 1042 1043 spin_lock(&dlm->spinlock); 1044 list_for_each_entry_safe(res, next, &dlm->reco.resources, recovering) { 1045 /* always prune any $RECOVERY entries for dead nodes, 1046 * otherwise hangs can occur during later recovery */ 1047 if (dlm_is_recovery_lock(res->lockname.name, 1048 res->lockname.len)) { 1049 spin_lock(&res->spinlock); 1050 list_for_each_entry(lock, &res->granted, list) { 1051 if (lock->ml.node == dead_node) { 1052 mlog(0, "AHA! there was " 1053 "a $RECOVERY lock for dead " 1054 "node %u (%s)!\n", 1055 dead_node, dlm->name); 1056 list_del_init(&lock->list); 1057 dlm_lock_put(lock); 1058 /* Can't schedule DLM_UNLOCK_FREE_LOCK 1059 * - do manually */ 1060 dlm_lock_put(lock); 1061 break; 1062 } 1063 } 1064 spin_unlock(&res->spinlock); 1065 continue; 1066 } 1067 1068 if (res->owner == dead_node) { 1069 mlog(0, "found lockres owned by dead node while " 1070 "doing recovery for node %u. sending it.\n", 1071 dead_node); 1072 list_move_tail(&res->recovering, list); 1073 } else if (res->owner == DLM_LOCK_RES_OWNER_UNKNOWN) { 1074 mlog(0, "found UNKNOWN owner while doing recovery " 1075 "for node %u. sending it.\n", dead_node); 1076 list_move_tail(&res->recovering, list); 1077 } 1078 } 1079 spin_unlock(&dlm->spinlock); 1080 } 1081 1082 static inline int dlm_num_locks_in_lockres(struct dlm_lock_resource *res) 1083 { 1084 int total_locks = 0; 1085 struct list_head *iter, *queue = &res->granted; 1086 int i; 1087 1088 for (i=0; i<3; i++) { 1089 list_for_each(iter, queue) 1090 total_locks++; 1091 queue++; 1092 } 1093 return total_locks; 1094 } 1095 1096 1097 static int dlm_send_mig_lockres_msg(struct dlm_ctxt *dlm, 1098 struct dlm_migratable_lockres *mres, 1099 u8 send_to, 1100 struct dlm_lock_resource *res, 1101 int total_locks) 1102 { 1103 u64 mig_cookie = be64_to_cpu(mres->mig_cookie); 1104 int mres_total_locks = be32_to_cpu(mres->total_locks); 1105 int ret = 0, status = 0; 1106 u8 orig_flags = mres->flags, 1107 orig_master = mres->master; 1108 1109 BUG_ON(mres->num_locks > DLM_MAX_MIGRATABLE_LOCKS); 1110 if (!mres->num_locks) 1111 return 0; 1112 1113 /* add an all-done flag if we reached the last lock */ 1114 orig_flags = mres->flags; 1115 BUG_ON(total_locks > mres_total_locks); 1116 if (total_locks == mres_total_locks) 1117 mres->flags |= DLM_MRES_ALL_DONE; 1118 1119 mlog(0, "%s:%.*s: sending mig lockres (%s) to %u\n", 1120 dlm->name, res->lockname.len, res->lockname.name, 1121 orig_flags & DLM_MRES_MIGRATION ? "migration" : "recovery", 1122 send_to); 1123 1124 /* send it */ 1125 ret = o2net_send_message(DLM_MIG_LOCKRES_MSG, dlm->key, mres, 1126 struct_size(mres, ml, mres->num_locks), 1127 send_to, &status); 1128 if (ret < 0) { 1129 /* XXX: negative status is not handled. 1130 * this will end up killing this node. */ 1131 mlog(ML_ERROR, "%s: res %.*s, Error %d send MIG_LOCKRES to " 1132 "node %u (%s)\n", dlm->name, mres->lockname_len, 1133 mres->lockname, ret, send_to, 1134 (orig_flags & DLM_MRES_MIGRATION ? 1135 "migration" : "recovery")); 1136 } else { 1137 /* might get an -ENOMEM back here */ 1138 ret = status; 1139 if (ret < 0) { 1140 mlog_errno(ret); 1141 1142 if (ret == -EFAULT) { 1143 mlog(ML_ERROR, "node %u told me to kill " 1144 "myself!\n", send_to); 1145 BUG(); 1146 } 1147 } 1148 } 1149 1150 /* zero and reinit the message buffer */ 1151 dlm_init_migratable_lockres(mres, res->lockname.name, 1152 res->lockname.len, mres_total_locks, 1153 mig_cookie, orig_flags, orig_master); 1154 return ret; 1155 } 1156 1157 static void dlm_init_migratable_lockres(struct dlm_migratable_lockres *mres, 1158 const char *lockname, int namelen, 1159 int total_locks, u64 cookie, 1160 u8 flags, u8 master) 1161 { 1162 /* mres here is one full page */ 1163 clear_page(mres); 1164 mres->lockname_len = namelen; 1165 memcpy(mres->lockname, lockname, namelen); 1166 mres->num_locks = 0; 1167 mres->total_locks = cpu_to_be32(total_locks); 1168 mres->mig_cookie = cpu_to_be64(cookie); 1169 mres->flags = flags; 1170 mres->master = master; 1171 } 1172 1173 static void dlm_prepare_lvb_for_migration(struct dlm_lock *lock, 1174 struct dlm_migratable_lockres *mres, 1175 int queue) 1176 { 1177 if (!lock->lksb) 1178 return; 1179 1180 /* Ignore lvb in all locks in the blocked list */ 1181 if (queue == DLM_BLOCKED_LIST) 1182 return; 1183 1184 /* Only consider lvbs in locks with granted EX or PR lock levels */ 1185 if (lock->ml.type != LKM_EXMODE && lock->ml.type != LKM_PRMODE) 1186 return; 1187 1188 if (dlm_lvb_is_empty(mres->lvb)) { 1189 memcpy(mres->lvb, lock->lksb->lvb, DLM_LVB_LEN); 1190 return; 1191 } 1192 1193 /* Ensure the lvb copied for migration matches in other valid locks */ 1194 if (!memcmp(mres->lvb, lock->lksb->lvb, DLM_LVB_LEN)) 1195 return; 1196 1197 mlog(ML_ERROR, "Mismatched lvb in lock cookie=%u:%llu, name=%.*s, " 1198 "node=%u\n", 1199 dlm_get_lock_cookie_node(be64_to_cpu(lock->ml.cookie)), 1200 dlm_get_lock_cookie_seq(be64_to_cpu(lock->ml.cookie)), 1201 lock->lockres->lockname.len, lock->lockres->lockname.name, 1202 lock->ml.node); 1203 dlm_print_one_lock_resource(lock->lockres); 1204 BUG(); 1205 } 1206 1207 /* returns 1 if this lock fills the network structure, 1208 * 0 otherwise */ 1209 static int dlm_add_lock_to_array(struct dlm_lock *lock, 1210 struct dlm_migratable_lockres *mres, int queue) 1211 { 1212 struct dlm_migratable_lock *ml; 1213 int lock_num = mres->num_locks; 1214 1215 ml = &(mres->ml[lock_num]); 1216 ml->cookie = lock->ml.cookie; 1217 ml->type = lock->ml.type; 1218 ml->convert_type = lock->ml.convert_type; 1219 ml->highest_blocked = lock->ml.highest_blocked; 1220 ml->list = queue; 1221 if (lock->lksb) { 1222 ml->flags = lock->lksb->flags; 1223 dlm_prepare_lvb_for_migration(lock, mres, queue); 1224 } 1225 ml->node = lock->ml.node; 1226 mres->num_locks++; 1227 /* we reached the max, send this network message */ 1228 if (mres->num_locks == DLM_MAX_MIGRATABLE_LOCKS) 1229 return 1; 1230 return 0; 1231 } 1232 1233 static void dlm_add_dummy_lock(struct dlm_ctxt *dlm, 1234 struct dlm_migratable_lockres *mres) 1235 { 1236 struct dlm_lock dummy; 1237 memset(&dummy, 0, sizeof(dummy)); 1238 dummy.ml.cookie = 0; 1239 dummy.ml.type = LKM_IVMODE; 1240 dummy.ml.convert_type = LKM_IVMODE; 1241 dummy.ml.highest_blocked = LKM_IVMODE; 1242 dummy.lksb = NULL; 1243 dummy.ml.node = dlm->node_num; 1244 dlm_add_lock_to_array(&dummy, mres, DLM_BLOCKED_LIST); 1245 } 1246 1247 static inline int dlm_is_dummy_lock(struct dlm_ctxt *dlm, 1248 struct dlm_migratable_lock *ml, 1249 u8 *nodenum) 1250 { 1251 if (unlikely(ml->cookie == 0 && 1252 ml->type == LKM_IVMODE && 1253 ml->convert_type == LKM_IVMODE && 1254 ml->highest_blocked == LKM_IVMODE && 1255 ml->list == DLM_BLOCKED_LIST)) { 1256 *nodenum = ml->node; 1257 return 1; 1258 } 1259 return 0; 1260 } 1261 1262 int dlm_send_one_lockres(struct dlm_ctxt *dlm, struct dlm_lock_resource *res, 1263 struct dlm_migratable_lockres *mres, 1264 u8 send_to, u8 flags) 1265 { 1266 struct list_head *queue; 1267 int total_locks, i; 1268 u64 mig_cookie = 0; 1269 struct dlm_lock *lock; 1270 int ret = 0; 1271 1272 BUG_ON(!(flags & (DLM_MRES_RECOVERY|DLM_MRES_MIGRATION))); 1273 1274 mlog(0, "sending to %u\n", send_to); 1275 1276 total_locks = dlm_num_locks_in_lockres(res); 1277 if (total_locks > DLM_MAX_MIGRATABLE_LOCKS) { 1278 /* rare, but possible */ 1279 mlog(0, "argh. lockres has %d locks. this will " 1280 "require more than one network packet to " 1281 "migrate\n", total_locks); 1282 mig_cookie = dlm_get_next_mig_cookie(); 1283 } 1284 1285 dlm_init_migratable_lockres(mres, res->lockname.name, 1286 res->lockname.len, total_locks, 1287 mig_cookie, flags, res->owner); 1288 1289 total_locks = 0; 1290 for (i=DLM_GRANTED_LIST; i<=DLM_BLOCKED_LIST; i++) { 1291 queue = dlm_list_idx_to_ptr(res, i); 1292 list_for_each_entry(lock, queue, list) { 1293 /* add another lock. */ 1294 total_locks++; 1295 if (!dlm_add_lock_to_array(lock, mres, i)) 1296 continue; 1297 1298 /* this filled the lock message, 1299 * we must send it immediately. */ 1300 ret = dlm_send_mig_lockres_msg(dlm, mres, send_to, 1301 res, total_locks); 1302 if (ret < 0) 1303 goto error; 1304 } 1305 } 1306 if (total_locks == 0) { 1307 /* send a dummy lock to indicate a mastery reference only */ 1308 mlog(0, "%s:%.*s: sending dummy lock to %u, %s\n", 1309 dlm->name, res->lockname.len, res->lockname.name, 1310 send_to, flags & DLM_MRES_RECOVERY ? "recovery" : 1311 "migration"); 1312 dlm_add_dummy_lock(dlm, mres); 1313 } 1314 /* flush any remaining locks */ 1315 ret = dlm_send_mig_lockres_msg(dlm, mres, send_to, res, total_locks); 1316 if (ret < 0) 1317 goto error; 1318 return ret; 1319 1320 error: 1321 mlog(ML_ERROR, "%s: dlm_send_mig_lockres_msg returned %d\n", 1322 dlm->name, ret); 1323 if (!dlm_is_host_down(ret)) 1324 BUG(); 1325 mlog(0, "%s: node %u went down while sending %s " 1326 "lockres %.*s\n", dlm->name, send_to, 1327 flags & DLM_MRES_RECOVERY ? "recovery" : "migration", 1328 res->lockname.len, res->lockname.name); 1329 return ret; 1330 } 1331 1332 1333 1334 /* 1335 * this message will contain no more than one page worth of 1336 * recovery data, and it will work on only one lockres. 1337 * there may be many locks in this page, and we may need to wait 1338 * for additional packets to complete all the locks (rare, but 1339 * possible). 1340 */ 1341 /* 1342 * NOTE: the allocation error cases here are scary 1343 * we really cannot afford to fail an alloc in recovery 1344 * do we spin? returning an error only delays the problem really 1345 */ 1346 1347 int dlm_mig_lockres_handler(struct o2net_msg *msg, u32 len, void *data, 1348 void **ret_data) 1349 { 1350 struct dlm_ctxt *dlm = data; 1351 struct dlm_migratable_lockres *mres = 1352 (struct dlm_migratable_lockres *)msg->buf; 1353 int ret = 0; 1354 u8 real_master; 1355 u8 extra_refs = 0; 1356 char *buf = NULL; 1357 struct dlm_work_item *item = NULL; 1358 struct dlm_lock_resource *res = NULL; 1359 unsigned int hash; 1360 1361 if (!dlm_grab(dlm)) 1362 return -EINVAL; 1363 1364 if (!dlm_joined(dlm)) { 1365 mlog(ML_ERROR, "Domain %s not joined! " 1366 "lockres %.*s, master %u\n", 1367 dlm->name, mres->lockname_len, 1368 mres->lockname, mres->master); 1369 dlm_put(dlm); 1370 return -EINVAL; 1371 } 1372 1373 BUG_ON(!(mres->flags & (DLM_MRES_RECOVERY|DLM_MRES_MIGRATION))); 1374 1375 real_master = mres->master; 1376 if (real_master == DLM_LOCK_RES_OWNER_UNKNOWN) { 1377 /* cannot migrate a lockres with no master */ 1378 BUG_ON(!(mres->flags & DLM_MRES_RECOVERY)); 1379 } 1380 1381 mlog(0, "%s message received from node %u\n", 1382 (mres->flags & DLM_MRES_RECOVERY) ? 1383 "recovery" : "migration", mres->master); 1384 if (mres->flags & DLM_MRES_ALL_DONE) 1385 mlog(0, "all done flag. all lockres data received!\n"); 1386 1387 ret = -ENOMEM; 1388 buf = kmalloc(be16_to_cpu(msg->data_len), GFP_NOFS); 1389 item = kzalloc(sizeof(*item), GFP_NOFS); 1390 if (!buf || !item) 1391 goto leave; 1392 1393 /* lookup the lock to see if we have a secondary queue for this 1394 * already... just add the locks in and this will have its owner 1395 * and RECOVERY flag changed when it completes. */ 1396 hash = dlm_lockid_hash(mres->lockname, mres->lockname_len); 1397 spin_lock(&dlm->spinlock); 1398 res = __dlm_lookup_lockres_full(dlm, mres->lockname, mres->lockname_len, 1399 hash); 1400 if (res) { 1401 /* this will get a ref on res */ 1402 /* mark it as recovering/migrating and hash it */ 1403 spin_lock(&res->spinlock); 1404 if (res->state & DLM_LOCK_RES_DROPPING_REF) { 1405 mlog(0, "%s: node is attempting to migrate " 1406 "lockres %.*s, but marked as dropping " 1407 " ref!\n", dlm->name, 1408 mres->lockname_len, mres->lockname); 1409 ret = -EINVAL; 1410 spin_unlock(&res->spinlock); 1411 spin_unlock(&dlm->spinlock); 1412 dlm_lockres_put(res); 1413 goto leave; 1414 } 1415 1416 if (mres->flags & DLM_MRES_RECOVERY) { 1417 res->state |= DLM_LOCK_RES_RECOVERING; 1418 } else { 1419 if (res->state & DLM_LOCK_RES_MIGRATING) { 1420 /* this is at least the second 1421 * lockres message */ 1422 mlog(0, "lock %.*s is already migrating\n", 1423 mres->lockname_len, 1424 mres->lockname); 1425 } else if (res->state & DLM_LOCK_RES_RECOVERING) { 1426 /* caller should BUG */ 1427 mlog(ML_ERROR, "node is attempting to migrate " 1428 "lock %.*s, but marked as recovering!\n", 1429 mres->lockname_len, mres->lockname); 1430 ret = -EFAULT; 1431 spin_unlock(&res->spinlock); 1432 spin_unlock(&dlm->spinlock); 1433 dlm_lockres_put(res); 1434 goto leave; 1435 } 1436 res->state |= DLM_LOCK_RES_MIGRATING; 1437 } 1438 spin_unlock(&res->spinlock); 1439 spin_unlock(&dlm->spinlock); 1440 } else { 1441 spin_unlock(&dlm->spinlock); 1442 /* need to allocate, just like if it was 1443 * mastered here normally */ 1444 res = dlm_new_lockres(dlm, mres->lockname, mres->lockname_len); 1445 if (!res) 1446 goto leave; 1447 1448 /* to match the ref that we would have gotten if 1449 * dlm_lookup_lockres had succeeded */ 1450 dlm_lockres_get(res); 1451 1452 /* mark it as recovering/migrating and hash it */ 1453 if (mres->flags & DLM_MRES_RECOVERY) 1454 res->state |= DLM_LOCK_RES_RECOVERING; 1455 else 1456 res->state |= DLM_LOCK_RES_MIGRATING; 1457 1458 spin_lock(&dlm->spinlock); 1459 __dlm_insert_lockres(dlm, res); 1460 spin_unlock(&dlm->spinlock); 1461 1462 /* Add an extra ref for this lock-less lockres lest the 1463 * dlm_thread purges it before we get the chance to add 1464 * locks to it */ 1465 dlm_lockres_get(res); 1466 1467 /* There are three refs that need to be put. 1468 * 1. Taken above. 1469 * 2. kref_init in dlm_new_lockres()->dlm_init_lockres(). 1470 * 3. dlm_lookup_lockres() 1471 * The first one is handled at the end of this function. The 1472 * other two are handled in the worker thread after locks have 1473 * been attached. Yes, we don't wait for purge time to match 1474 * kref_init. The lockres will still have atleast one ref 1475 * added because it is in the hash __dlm_insert_lockres() */ 1476 extra_refs++; 1477 1478 /* now that the new lockres is inserted, 1479 * make it usable by other processes */ 1480 spin_lock(&res->spinlock); 1481 res->state &= ~DLM_LOCK_RES_IN_PROGRESS; 1482 spin_unlock(&res->spinlock); 1483 wake_up(&res->wq); 1484 } 1485 1486 /* at this point we have allocated everything we need, 1487 * and we have a hashed lockres with an extra ref and 1488 * the proper res->state flags. */ 1489 ret = 0; 1490 spin_lock(&res->spinlock); 1491 /* drop this either when master requery finds a different master 1492 * or when a lock is added by the recovery worker */ 1493 dlm_lockres_grab_inflight_ref(dlm, res); 1494 if (mres->master == DLM_LOCK_RES_OWNER_UNKNOWN) { 1495 /* migration cannot have an unknown master */ 1496 BUG_ON(!(mres->flags & DLM_MRES_RECOVERY)); 1497 mlog(0, "recovery has passed me a lockres with an " 1498 "unknown owner.. will need to requery: " 1499 "%.*s\n", mres->lockname_len, mres->lockname); 1500 } else { 1501 /* take a reference now to pin the lockres, drop it 1502 * when locks are added in the worker */ 1503 dlm_change_lockres_owner(dlm, res, dlm->node_num); 1504 } 1505 spin_unlock(&res->spinlock); 1506 1507 /* queue up work for dlm_mig_lockres_worker */ 1508 dlm_grab(dlm); /* get an extra ref for the work item */ 1509 memcpy(buf, msg->buf, be16_to_cpu(msg->data_len)); /* copy the whole message */ 1510 dlm_init_work_item(dlm, item, dlm_mig_lockres_worker, buf); 1511 item->u.ml.lockres = res; /* already have a ref */ 1512 item->u.ml.real_master = real_master; 1513 item->u.ml.extra_ref = extra_refs; 1514 spin_lock(&dlm->work_lock); 1515 list_add_tail(&item->list, &dlm->work_list); 1516 spin_unlock(&dlm->work_lock); 1517 queue_work(dlm->dlm_worker, &dlm->dispatched_work); 1518 1519 leave: 1520 /* One extra ref taken needs to be put here */ 1521 if (extra_refs) 1522 dlm_lockres_put(res); 1523 1524 dlm_put(dlm); 1525 if (ret < 0) { 1526 kfree(buf); 1527 kfree(item); 1528 mlog_errno(ret); 1529 } 1530 1531 return ret; 1532 } 1533 1534 1535 static void dlm_mig_lockres_worker(struct dlm_work_item *item, void *data) 1536 { 1537 struct dlm_ctxt *dlm; 1538 struct dlm_migratable_lockres *mres; 1539 int ret = 0; 1540 struct dlm_lock_resource *res; 1541 u8 real_master; 1542 u8 extra_ref; 1543 1544 dlm = item->dlm; 1545 mres = (struct dlm_migratable_lockres *)data; 1546 1547 res = item->u.ml.lockres; 1548 real_master = item->u.ml.real_master; 1549 extra_ref = item->u.ml.extra_ref; 1550 1551 if (real_master == DLM_LOCK_RES_OWNER_UNKNOWN) { 1552 /* this case is super-rare. only occurs if 1553 * node death happens during migration. */ 1554 again: 1555 ret = dlm_lockres_master_requery(dlm, res, &real_master); 1556 if (ret < 0) { 1557 mlog(0, "dlm_lockres_master_requery ret=%d\n", 1558 ret); 1559 goto again; 1560 } 1561 if (real_master == DLM_LOCK_RES_OWNER_UNKNOWN) { 1562 mlog(0, "lockres %.*s not claimed. " 1563 "this node will take it.\n", 1564 res->lockname.len, res->lockname.name); 1565 } else { 1566 spin_lock(&res->spinlock); 1567 dlm_lockres_drop_inflight_ref(dlm, res); 1568 spin_unlock(&res->spinlock); 1569 mlog(0, "master needs to respond to sender " 1570 "that node %u still owns %.*s\n", 1571 real_master, res->lockname.len, 1572 res->lockname.name); 1573 /* cannot touch this lockres */ 1574 goto leave; 1575 } 1576 } 1577 1578 ret = dlm_process_recovery_data(dlm, res, mres); 1579 if (ret < 0) 1580 mlog(0, "dlm_process_recovery_data returned %d\n", ret); 1581 else 1582 mlog(0, "dlm_process_recovery_data succeeded\n"); 1583 1584 if ((mres->flags & (DLM_MRES_MIGRATION|DLM_MRES_ALL_DONE)) == 1585 (DLM_MRES_MIGRATION|DLM_MRES_ALL_DONE)) { 1586 ret = dlm_finish_migration(dlm, res, mres->master); 1587 if (ret < 0) 1588 mlog_errno(ret); 1589 } 1590 1591 leave: 1592 /* See comment in dlm_mig_lockres_handler() */ 1593 if (res) { 1594 if (extra_ref) 1595 dlm_lockres_put(res); 1596 dlm_lockres_put(res); 1597 } 1598 kfree(data); 1599 } 1600 1601 1602 1603 static int dlm_lockres_master_requery(struct dlm_ctxt *dlm, 1604 struct dlm_lock_resource *res, 1605 u8 *real_master) 1606 { 1607 struct dlm_node_iter iter; 1608 int nodenum; 1609 int ret = 0; 1610 1611 *real_master = DLM_LOCK_RES_OWNER_UNKNOWN; 1612 1613 /* we only reach here if one of the two nodes in a 1614 * migration died while the migration was in progress. 1615 * at this point we need to requery the master. we 1616 * know that the new_master got as far as creating 1617 * an mle on at least one node, but we do not know 1618 * if any nodes had actually cleared the mle and set 1619 * the master to the new_master. the old master 1620 * is supposed to set the owner to UNKNOWN in the 1621 * event of a new_master death, so the only possible 1622 * responses that we can get from nodes here are 1623 * that the master is new_master, or that the master 1624 * is UNKNOWN. 1625 * if all nodes come back with UNKNOWN then we know 1626 * the lock needs remastering here. 1627 * if any node comes back with a valid master, check 1628 * to see if that master is the one that we are 1629 * recovering. if so, then the new_master died and 1630 * we need to remaster this lock. if not, then the 1631 * new_master survived and that node will respond to 1632 * other nodes about the owner. 1633 * if there is an owner, this node needs to dump this 1634 * lockres and alert the sender that this lockres 1635 * was rejected. */ 1636 spin_lock(&dlm->spinlock); 1637 dlm_node_iter_init(dlm->domain_map, &iter); 1638 spin_unlock(&dlm->spinlock); 1639 1640 while ((nodenum = dlm_node_iter_next(&iter)) >= 0) { 1641 /* do not send to self */ 1642 if (nodenum == dlm->node_num) 1643 continue; 1644 ret = dlm_do_master_requery(dlm, res, nodenum, real_master); 1645 if (ret < 0) { 1646 mlog_errno(ret); 1647 if (!dlm_is_host_down(ret)) 1648 BUG(); 1649 /* host is down, so answer for that node would be 1650 * DLM_LOCK_RES_OWNER_UNKNOWN. continue. */ 1651 } 1652 if (*real_master != DLM_LOCK_RES_OWNER_UNKNOWN) { 1653 mlog(0, "lock master is %u\n", *real_master); 1654 break; 1655 } 1656 } 1657 return ret; 1658 } 1659 1660 1661 int dlm_do_master_requery(struct dlm_ctxt *dlm, struct dlm_lock_resource *res, 1662 u8 nodenum, u8 *real_master) 1663 { 1664 int ret; 1665 struct dlm_master_requery req; 1666 int status = DLM_LOCK_RES_OWNER_UNKNOWN; 1667 1668 memset(&req, 0, sizeof(req)); 1669 req.node_idx = dlm->node_num; 1670 req.namelen = res->lockname.len; 1671 memcpy(req.name, res->lockname.name, res->lockname.len); 1672 1673 resend: 1674 ret = o2net_send_message(DLM_MASTER_REQUERY_MSG, dlm->key, 1675 &req, sizeof(req), nodenum, &status); 1676 if (ret < 0) 1677 mlog(ML_ERROR, "Error %d when sending message %u (key " 1678 "0x%x) to node %u\n", ret, DLM_MASTER_REQUERY_MSG, 1679 dlm->key, nodenum); 1680 else if (status == -ENOMEM) { 1681 mlog_errno(status); 1682 msleep(50); 1683 goto resend; 1684 } else { 1685 BUG_ON(status < 0); 1686 BUG_ON(status > DLM_LOCK_RES_OWNER_UNKNOWN); 1687 *real_master = (u8) (status & 0xff); 1688 mlog(0, "node %u responded to master requery with %u\n", 1689 nodenum, *real_master); 1690 ret = 0; 1691 } 1692 return ret; 1693 } 1694 1695 1696 /* this function cannot error, so unless the sending 1697 * or receiving of the message failed, the owner can 1698 * be trusted */ 1699 int dlm_master_requery_handler(struct o2net_msg *msg, u32 len, void *data, 1700 void **ret_data) 1701 { 1702 struct dlm_ctxt *dlm = data; 1703 struct dlm_master_requery *req = (struct dlm_master_requery *)msg->buf; 1704 struct dlm_lock_resource *res = NULL; 1705 unsigned int hash; 1706 int master = DLM_LOCK_RES_OWNER_UNKNOWN; 1707 u32 flags = DLM_ASSERT_MASTER_REQUERY; 1708 int dispatched = 0; 1709 1710 if (!dlm_grab(dlm)) { 1711 /* since the domain has gone away on this 1712 * node, the proper response is UNKNOWN */ 1713 return master; 1714 } 1715 1716 hash = dlm_lockid_hash(req->name, req->namelen); 1717 1718 spin_lock(&dlm->spinlock); 1719 res = __dlm_lookup_lockres(dlm, req->name, req->namelen, hash); 1720 if (res) { 1721 spin_lock(&res->spinlock); 1722 master = res->owner; 1723 if (master == dlm->node_num) { 1724 int ret = dlm_dispatch_assert_master(dlm, res, 1725 0, 0, flags); 1726 if (ret < 0) { 1727 mlog_errno(ret); 1728 spin_unlock(&res->spinlock); 1729 dlm_lockres_put(res); 1730 spin_unlock(&dlm->spinlock); 1731 dlm_put(dlm); 1732 /* sender will take care of this and retry */ 1733 return ret; 1734 } else { 1735 dispatched = 1; 1736 __dlm_lockres_grab_inflight_worker(dlm, res); 1737 spin_unlock(&res->spinlock); 1738 } 1739 } else { 1740 /* put.. incase we are not the master */ 1741 spin_unlock(&res->spinlock); 1742 dlm_lockres_put(res); 1743 } 1744 } 1745 spin_unlock(&dlm->spinlock); 1746 1747 if (!dispatched) 1748 dlm_put(dlm); 1749 return master; 1750 } 1751 1752 static inline struct list_head * 1753 dlm_list_num_to_pointer(struct dlm_lock_resource *res, int list_num) 1754 { 1755 struct list_head *ret; 1756 BUG_ON(list_num < 0); 1757 BUG_ON(list_num > 2); 1758 ret = &(res->granted); 1759 ret += list_num; 1760 return ret; 1761 } 1762 /* TODO: do ast flush business 1763 * TODO: do MIGRATING and RECOVERING spinning 1764 */ 1765 1766 /* 1767 * NOTE about in-flight requests during migration: 1768 * 1769 * Before attempting the migrate, the master has marked the lockres as 1770 * MIGRATING and then flushed all of its pending ASTS. So any in-flight 1771 * requests either got queued before the MIGRATING flag got set, in which 1772 * case the lock data will reflect the change and a return message is on 1773 * the way, or the request failed to get in before MIGRATING got set. In 1774 * this case, the caller will be told to spin and wait for the MIGRATING 1775 * flag to be dropped, then recheck the master. 1776 * This holds true for the convert, cancel and unlock cases, and since lvb 1777 * updates are tied to these same messages, it applies to lvb updates as 1778 * well. For the lock case, there is no way a lock can be on the master 1779 * queue and not be on the secondary queue since the lock is always added 1780 * locally first. This means that the new target node will never be sent 1781 * a lock that he doesn't already have on the list. 1782 * In total, this means that the local lock is correct and should not be 1783 * updated to match the one sent by the master. Any messages sent back 1784 * from the master before the MIGRATING flag will bring the lock properly 1785 * up-to-date, and the change will be ordered properly for the waiter. 1786 * We will *not* attempt to modify the lock underneath the waiter. 1787 */ 1788 1789 static int dlm_process_recovery_data(struct dlm_ctxt *dlm, 1790 struct dlm_lock_resource *res, 1791 struct dlm_migratable_lockres *mres) 1792 { 1793 struct dlm_migratable_lock *ml; 1794 struct list_head *queue, *iter; 1795 struct list_head *tmpq = NULL; 1796 struct dlm_lock *newlock = NULL; 1797 struct dlm_lockstatus *lksb = NULL; 1798 int ret = 0; 1799 int i, j, bad; 1800 struct dlm_lock *lock; 1801 u8 from = O2NM_MAX_NODES; 1802 __be64 c; 1803 1804 mlog(0, "running %d locks for this lockres\n", mres->num_locks); 1805 for (i=0; i<mres->num_locks; i++) { 1806 ml = &(mres->ml[i]); 1807 1808 if (dlm_is_dummy_lock(dlm, ml, &from)) { 1809 /* placeholder, just need to set the refmap bit */ 1810 BUG_ON(mres->num_locks != 1); 1811 mlog(0, "%s:%.*s: dummy lock for %u\n", 1812 dlm->name, mres->lockname_len, mres->lockname, 1813 from); 1814 spin_lock(&res->spinlock); 1815 dlm_lockres_set_refmap_bit(dlm, res, from); 1816 spin_unlock(&res->spinlock); 1817 break; 1818 } 1819 BUG_ON(ml->highest_blocked != LKM_IVMODE); 1820 newlock = NULL; 1821 lksb = NULL; 1822 1823 queue = dlm_list_num_to_pointer(res, ml->list); 1824 tmpq = NULL; 1825 1826 /* if the lock is for the local node it needs to 1827 * be moved to the proper location within the queue. 1828 * do not allocate a new lock structure. */ 1829 if (ml->node == dlm->node_num) { 1830 /* MIGRATION ONLY! */ 1831 BUG_ON(!(mres->flags & DLM_MRES_MIGRATION)); 1832 1833 lock = NULL; 1834 spin_lock(&res->spinlock); 1835 for (j = DLM_GRANTED_LIST; j <= DLM_BLOCKED_LIST; j++) { 1836 tmpq = dlm_list_idx_to_ptr(res, j); 1837 list_for_each(iter, tmpq) { 1838 lock = list_entry(iter, 1839 struct dlm_lock, list); 1840 if (lock->ml.cookie == ml->cookie) 1841 break; 1842 lock = NULL; 1843 } 1844 if (lock) 1845 break; 1846 } 1847 1848 /* lock is always created locally first, and 1849 * destroyed locally last. it must be on the list */ 1850 if (!lock) { 1851 c = ml->cookie; 1852 mlog(ML_ERROR, "Could not find local lock " 1853 "with cookie %u:%llu, node %u, " 1854 "list %u, flags 0x%x, type %d, " 1855 "conv %d, highest blocked %d\n", 1856 dlm_get_lock_cookie_node(be64_to_cpu(c)), 1857 dlm_get_lock_cookie_seq(be64_to_cpu(c)), 1858 ml->node, ml->list, ml->flags, ml->type, 1859 ml->convert_type, ml->highest_blocked); 1860 __dlm_print_one_lock_resource(res); 1861 BUG(); 1862 } 1863 1864 if (lock->ml.node != ml->node) { 1865 c = lock->ml.cookie; 1866 mlog(ML_ERROR, "Mismatched node# in lock " 1867 "cookie %u:%llu, name %.*s, node %u\n", 1868 dlm_get_lock_cookie_node(be64_to_cpu(c)), 1869 dlm_get_lock_cookie_seq(be64_to_cpu(c)), 1870 res->lockname.len, res->lockname.name, 1871 lock->ml.node); 1872 c = ml->cookie; 1873 mlog(ML_ERROR, "Migrate lock cookie %u:%llu, " 1874 "node %u, list %u, flags 0x%x, type %d, " 1875 "conv %d, highest blocked %d\n", 1876 dlm_get_lock_cookie_node(be64_to_cpu(c)), 1877 dlm_get_lock_cookie_seq(be64_to_cpu(c)), 1878 ml->node, ml->list, ml->flags, ml->type, 1879 ml->convert_type, ml->highest_blocked); 1880 __dlm_print_one_lock_resource(res); 1881 BUG(); 1882 } 1883 1884 if (tmpq != queue) { 1885 c = ml->cookie; 1886 mlog(0, "Lock cookie %u:%llu was on list %u " 1887 "instead of list %u for %.*s\n", 1888 dlm_get_lock_cookie_node(be64_to_cpu(c)), 1889 dlm_get_lock_cookie_seq(be64_to_cpu(c)), 1890 j, ml->list, res->lockname.len, 1891 res->lockname.name); 1892 __dlm_print_one_lock_resource(res); 1893 spin_unlock(&res->spinlock); 1894 continue; 1895 } 1896 1897 /* see NOTE above about why we do not update 1898 * to match the master here */ 1899 1900 /* move the lock to its proper place */ 1901 /* do not alter lock refcount. switching lists. */ 1902 list_move_tail(&lock->list, queue); 1903 spin_unlock(&res->spinlock); 1904 1905 mlog(0, "just reordered a local lock!\n"); 1906 continue; 1907 } 1908 1909 /* lock is for another node. */ 1910 newlock = dlm_new_lock(ml->type, ml->node, 1911 be64_to_cpu(ml->cookie), NULL); 1912 if (!newlock) { 1913 ret = -ENOMEM; 1914 goto leave; 1915 } 1916 lksb = newlock->lksb; 1917 dlm_lock_attach_lockres(newlock, res); 1918 1919 if (ml->convert_type != LKM_IVMODE) { 1920 BUG_ON(queue != &res->converting); 1921 newlock->ml.convert_type = ml->convert_type; 1922 } 1923 lksb->flags |= (ml->flags & 1924 (DLM_LKSB_PUT_LVB|DLM_LKSB_GET_LVB)); 1925 1926 if (ml->type == LKM_NLMODE) 1927 goto skip_lvb; 1928 1929 /* 1930 * If the lock is in the blocked list it can't have a valid lvb, 1931 * so skip it 1932 */ 1933 if (ml->list == DLM_BLOCKED_LIST) 1934 goto skip_lvb; 1935 1936 if (!dlm_lvb_is_empty(mres->lvb)) { 1937 if (lksb->flags & DLM_LKSB_PUT_LVB) { 1938 /* other node was trying to update 1939 * lvb when node died. recreate the 1940 * lksb with the updated lvb. */ 1941 memcpy(lksb->lvb, mres->lvb, DLM_LVB_LEN); 1942 /* the lock resource lvb update must happen 1943 * NOW, before the spinlock is dropped. 1944 * we no longer wait for the AST to update 1945 * the lvb. */ 1946 memcpy(res->lvb, mres->lvb, DLM_LVB_LEN); 1947 } else { 1948 /* otherwise, the node is sending its 1949 * most recent valid lvb info */ 1950 BUG_ON(ml->type != LKM_EXMODE && 1951 ml->type != LKM_PRMODE); 1952 if (!dlm_lvb_is_empty(res->lvb) && 1953 (ml->type == LKM_EXMODE || 1954 memcmp(res->lvb, mres->lvb, DLM_LVB_LEN))) { 1955 int i; 1956 mlog(ML_ERROR, "%s:%.*s: received bad " 1957 "lvb! type=%d\n", dlm->name, 1958 res->lockname.len, 1959 res->lockname.name, ml->type); 1960 printk("lockres lvb=["); 1961 for (i=0; i<DLM_LVB_LEN; i++) 1962 printk("%02x", res->lvb[i]); 1963 printk("]\nmigrated lvb=["); 1964 for (i=0; i<DLM_LVB_LEN; i++) 1965 printk("%02x", mres->lvb[i]); 1966 printk("]\n"); 1967 dlm_print_one_lock_resource(res); 1968 BUG(); 1969 } 1970 memcpy(res->lvb, mres->lvb, DLM_LVB_LEN); 1971 } 1972 } 1973 skip_lvb: 1974 1975 /* NOTE: 1976 * wrt lock queue ordering and recovery: 1977 * 1. order of locks on granted queue is 1978 * meaningless. 1979 * 2. order of locks on converting queue is 1980 * LOST with the node death. sorry charlie. 1981 * 3. order of locks on the blocked queue is 1982 * also LOST. 1983 * order of locks does not affect integrity, it 1984 * just means that a lock request may get pushed 1985 * back in line as a result of the node death. 1986 * also note that for a given node the lock order 1987 * for its secondary queue locks is preserved 1988 * relative to each other, but clearly *not* 1989 * preserved relative to locks from other nodes. 1990 */ 1991 bad = 0; 1992 spin_lock(&res->spinlock); 1993 list_for_each_entry(lock, queue, list) { 1994 if (lock->ml.cookie == ml->cookie) { 1995 c = lock->ml.cookie; 1996 mlog(ML_ERROR, "%s:%.*s: %u:%llu: lock already " 1997 "exists on this lockres!\n", dlm->name, 1998 res->lockname.len, res->lockname.name, 1999 dlm_get_lock_cookie_node(be64_to_cpu(c)), 2000 dlm_get_lock_cookie_seq(be64_to_cpu(c))); 2001 2002 mlog(ML_NOTICE, "sent lock: type=%d, conv=%d, " 2003 "node=%u, cookie=%u:%llu, queue=%d\n", 2004 ml->type, ml->convert_type, ml->node, 2005 dlm_get_lock_cookie_node(be64_to_cpu(ml->cookie)), 2006 dlm_get_lock_cookie_seq(be64_to_cpu(ml->cookie)), 2007 ml->list); 2008 2009 __dlm_print_one_lock_resource(res); 2010 bad = 1; 2011 break; 2012 } 2013 } 2014 if (!bad) { 2015 dlm_lock_get(newlock); 2016 if (mres->flags & DLM_MRES_RECOVERY && 2017 ml->list == DLM_CONVERTING_LIST && 2018 newlock->ml.type > 2019 newlock->ml.convert_type) { 2020 /* newlock is doing downconvert, add it to the 2021 * head of converting list */ 2022 list_add(&newlock->list, queue); 2023 } else 2024 list_add_tail(&newlock->list, queue); 2025 mlog(0, "%s:%.*s: added lock for node %u, " 2026 "setting refmap bit\n", dlm->name, 2027 res->lockname.len, res->lockname.name, ml->node); 2028 dlm_lockres_set_refmap_bit(dlm, res, ml->node); 2029 } 2030 spin_unlock(&res->spinlock); 2031 } 2032 mlog(0, "done running all the locks\n"); 2033 2034 leave: 2035 /* balance the ref taken when the work was queued */ 2036 spin_lock(&res->spinlock); 2037 dlm_lockres_drop_inflight_ref(dlm, res); 2038 spin_unlock(&res->spinlock); 2039 2040 if (ret < 0) 2041 mlog_errno(ret); 2042 2043 return ret; 2044 } 2045 2046 void dlm_move_lockres_to_recovery_list(struct dlm_ctxt *dlm, 2047 struct dlm_lock_resource *res) 2048 { 2049 int i; 2050 struct list_head *queue; 2051 struct dlm_lock *lock, *next; 2052 2053 assert_spin_locked(&dlm->spinlock); 2054 assert_spin_locked(&res->spinlock); 2055 res->state |= DLM_LOCK_RES_RECOVERING; 2056 if (!list_empty(&res->recovering)) { 2057 mlog(0, 2058 "Recovering res %s:%.*s, is already on recovery list!\n", 2059 dlm->name, res->lockname.len, res->lockname.name); 2060 list_del_init(&res->recovering); 2061 dlm_lockres_put(res); 2062 } 2063 /* We need to hold a reference while on the recovery list */ 2064 dlm_lockres_get(res); 2065 list_add_tail(&res->recovering, &dlm->reco.resources); 2066 2067 /* find any pending locks and put them back on proper list */ 2068 for (i=DLM_BLOCKED_LIST; i>=DLM_GRANTED_LIST; i--) { 2069 queue = dlm_list_idx_to_ptr(res, i); 2070 list_for_each_entry_safe(lock, next, queue, list) { 2071 dlm_lock_get(lock); 2072 if (lock->convert_pending) { 2073 /* move converting lock back to granted */ 2074 mlog(0, "node died with convert pending " 2075 "on %.*s. move back to granted list.\n", 2076 res->lockname.len, res->lockname.name); 2077 dlm_revert_pending_convert(res, lock); 2078 lock->convert_pending = 0; 2079 } else if (lock->lock_pending) { 2080 /* remove pending lock requests completely */ 2081 BUG_ON(i != DLM_BLOCKED_LIST); 2082 mlog(0, "node died with lock pending " 2083 "on %.*s. remove from blocked list and skip.\n", 2084 res->lockname.len, res->lockname.name); 2085 /* lock will be floating until ref in 2086 * dlmlock_remote is freed after the network 2087 * call returns. ok for it to not be on any 2088 * list since no ast can be called 2089 * (the master is dead). */ 2090 dlm_revert_pending_lock(res, lock); 2091 lock->lock_pending = 0; 2092 } else if (lock->unlock_pending) { 2093 /* if an unlock was in progress, treat as 2094 * if this had completed successfully 2095 * before sending this lock state to the 2096 * new master. note that the dlm_unlock 2097 * call is still responsible for calling 2098 * the unlockast. that will happen after 2099 * the network call times out. for now, 2100 * just move lists to prepare the new 2101 * recovery master. */ 2102 BUG_ON(i != DLM_GRANTED_LIST); 2103 mlog(0, "node died with unlock pending " 2104 "on %.*s. remove from blocked list and skip.\n", 2105 res->lockname.len, res->lockname.name); 2106 dlm_commit_pending_unlock(res, lock); 2107 lock->unlock_pending = 0; 2108 } else if (lock->cancel_pending) { 2109 /* if a cancel was in progress, treat as 2110 * if this had completed successfully 2111 * before sending this lock state to the 2112 * new master */ 2113 BUG_ON(i != DLM_CONVERTING_LIST); 2114 mlog(0, "node died with cancel pending " 2115 "on %.*s. move back to granted list.\n", 2116 res->lockname.len, res->lockname.name); 2117 dlm_commit_pending_cancel(res, lock); 2118 lock->cancel_pending = 0; 2119 } 2120 dlm_lock_put(lock); 2121 } 2122 } 2123 } 2124 2125 2126 2127 /* removes all recovered locks from the recovery list. 2128 * sets the res->owner to the new master. 2129 * unsets the RECOVERY flag and wakes waiters. */ 2130 static void dlm_finish_local_lockres_recovery(struct dlm_ctxt *dlm, 2131 u8 dead_node, u8 new_master) 2132 { 2133 int i; 2134 struct hlist_head *bucket; 2135 struct dlm_lock_resource *res, *next; 2136 2137 assert_spin_locked(&dlm->spinlock); 2138 2139 list_for_each_entry_safe(res, next, &dlm->reco.resources, recovering) { 2140 if (res->owner == dead_node) { 2141 mlog(0, "%s: res %.*s, Changing owner from %u to %u\n", 2142 dlm->name, res->lockname.len, res->lockname.name, 2143 res->owner, new_master); 2144 list_del_init(&res->recovering); 2145 spin_lock(&res->spinlock); 2146 /* new_master has our reference from 2147 * the lock state sent during recovery */ 2148 dlm_change_lockres_owner(dlm, res, new_master); 2149 res->state &= ~DLM_LOCK_RES_RECOVERING; 2150 if (__dlm_lockres_has_locks(res)) 2151 __dlm_dirty_lockres(dlm, res); 2152 spin_unlock(&res->spinlock); 2153 wake_up(&res->wq); 2154 dlm_lockres_put(res); 2155 } 2156 } 2157 2158 /* this will become unnecessary eventually, but 2159 * for now we need to run the whole hash, clear 2160 * the RECOVERING state and set the owner 2161 * if necessary */ 2162 for (i = 0; i < DLM_HASH_BUCKETS; i++) { 2163 bucket = dlm_lockres_hash(dlm, i); 2164 hlist_for_each_entry(res, bucket, hash_node) { 2165 if (res->state & DLM_LOCK_RES_RECOVERY_WAITING) { 2166 spin_lock(&res->spinlock); 2167 res->state &= ~DLM_LOCK_RES_RECOVERY_WAITING; 2168 spin_unlock(&res->spinlock); 2169 wake_up(&res->wq); 2170 } 2171 2172 if (!(res->state & DLM_LOCK_RES_RECOVERING)) 2173 continue; 2174 2175 if (res->owner != dead_node && 2176 res->owner != dlm->node_num) 2177 continue; 2178 2179 if (!list_empty(&res->recovering)) { 2180 list_del_init(&res->recovering); 2181 dlm_lockres_put(res); 2182 } 2183 2184 /* new_master has our reference from 2185 * the lock state sent during recovery */ 2186 mlog(0, "%s: res %.*s, Changing owner from %u to %u\n", 2187 dlm->name, res->lockname.len, res->lockname.name, 2188 res->owner, new_master); 2189 spin_lock(&res->spinlock); 2190 dlm_change_lockres_owner(dlm, res, new_master); 2191 res->state &= ~DLM_LOCK_RES_RECOVERING; 2192 if (__dlm_lockres_has_locks(res)) 2193 __dlm_dirty_lockres(dlm, res); 2194 spin_unlock(&res->spinlock); 2195 wake_up(&res->wq); 2196 } 2197 } 2198 } 2199 2200 static inline int dlm_lvb_needs_invalidation(struct dlm_lock *lock, int local) 2201 { 2202 if (local) { 2203 if (lock->ml.type != LKM_EXMODE && 2204 lock->ml.type != LKM_PRMODE) 2205 return 1; 2206 } else if (lock->ml.type == LKM_EXMODE) 2207 return 1; 2208 return 0; 2209 } 2210 2211 static void dlm_revalidate_lvb(struct dlm_ctxt *dlm, 2212 struct dlm_lock_resource *res, u8 dead_node) 2213 { 2214 struct list_head *queue; 2215 struct dlm_lock *lock; 2216 int blank_lvb = 0, local = 0; 2217 int i; 2218 u8 search_node; 2219 2220 assert_spin_locked(&dlm->spinlock); 2221 assert_spin_locked(&res->spinlock); 2222 2223 if (res->owner == dlm->node_num) 2224 /* if this node owned the lockres, and if the dead node 2225 * had an EX when he died, blank out the lvb */ 2226 search_node = dead_node; 2227 else { 2228 /* if this is a secondary lockres, and we had no EX or PR 2229 * locks granted, we can no longer trust the lvb */ 2230 search_node = dlm->node_num; 2231 local = 1; /* check local state for valid lvb */ 2232 } 2233 2234 for (i=DLM_GRANTED_LIST; i<=DLM_CONVERTING_LIST; i++) { 2235 queue = dlm_list_idx_to_ptr(res, i); 2236 list_for_each_entry(lock, queue, list) { 2237 if (lock->ml.node == search_node) { 2238 if (dlm_lvb_needs_invalidation(lock, local)) { 2239 /* zero the lksb lvb and lockres lvb */ 2240 blank_lvb = 1; 2241 memset(lock->lksb->lvb, 0, DLM_LVB_LEN); 2242 } 2243 } 2244 } 2245 } 2246 2247 if (blank_lvb) { 2248 mlog(0, "clearing %.*s lvb, dead node %u had EX\n", 2249 res->lockname.len, res->lockname.name, dead_node); 2250 memset(res->lvb, 0, DLM_LVB_LEN); 2251 } 2252 } 2253 2254 static void dlm_free_dead_locks(struct dlm_ctxt *dlm, 2255 struct dlm_lock_resource *res, u8 dead_node) 2256 { 2257 struct dlm_lock *lock, *next; 2258 unsigned int freed = 0; 2259 2260 /* this node is the lockres master: 2261 * 1) remove any stale locks for the dead node 2262 * 2) if the dead node had an EX when he died, blank out the lvb 2263 */ 2264 assert_spin_locked(&dlm->spinlock); 2265 assert_spin_locked(&res->spinlock); 2266 2267 /* We do two dlm_lock_put(). One for removing from list and the other is 2268 * to force the DLM_UNLOCK_FREE_LOCK action so as to free the locks */ 2269 2270 /* TODO: check pending_asts, pending_basts here */ 2271 list_for_each_entry_safe(lock, next, &res->granted, list) { 2272 if (lock->ml.node == dead_node) { 2273 list_del_init(&lock->list); 2274 dlm_lock_put(lock); 2275 /* Can't schedule DLM_UNLOCK_FREE_LOCK - do manually */ 2276 dlm_lock_put(lock); 2277 freed++; 2278 } 2279 } 2280 list_for_each_entry_safe(lock, next, &res->converting, list) { 2281 if (lock->ml.node == dead_node) { 2282 list_del_init(&lock->list); 2283 dlm_lock_put(lock); 2284 /* Can't schedule DLM_UNLOCK_FREE_LOCK - do manually */ 2285 dlm_lock_put(lock); 2286 freed++; 2287 } 2288 } 2289 list_for_each_entry_safe(lock, next, &res->blocked, list) { 2290 if (lock->ml.node == dead_node) { 2291 list_del_init(&lock->list); 2292 dlm_lock_put(lock); 2293 /* Can't schedule DLM_UNLOCK_FREE_LOCK - do manually */ 2294 dlm_lock_put(lock); 2295 freed++; 2296 } 2297 } 2298 2299 if (freed) { 2300 mlog(0, "%s:%.*s: freed %u locks for dead node %u, " 2301 "dropping ref from lockres\n", dlm->name, 2302 res->lockname.len, res->lockname.name, freed, dead_node); 2303 if(!test_bit(dead_node, res->refmap)) { 2304 mlog(ML_ERROR, "%s:%.*s: freed %u locks for dead node %u, " 2305 "but ref was not set\n", dlm->name, 2306 res->lockname.len, res->lockname.name, freed, dead_node); 2307 __dlm_print_one_lock_resource(res); 2308 } 2309 res->state |= DLM_LOCK_RES_RECOVERY_WAITING; 2310 dlm_lockres_clear_refmap_bit(dlm, res, dead_node); 2311 } else if (test_bit(dead_node, res->refmap)) { 2312 mlog(0, "%s:%.*s: dead node %u had a ref, but had " 2313 "no locks and had not purged before dying\n", dlm->name, 2314 res->lockname.len, res->lockname.name, dead_node); 2315 dlm_lockres_clear_refmap_bit(dlm, res, dead_node); 2316 } 2317 2318 /* do not kick thread yet */ 2319 __dlm_dirty_lockres(dlm, res); 2320 } 2321 2322 static void dlm_do_local_recovery_cleanup(struct dlm_ctxt *dlm, u8 dead_node) 2323 { 2324 struct dlm_lock_resource *res; 2325 int i; 2326 struct hlist_head *bucket; 2327 struct hlist_node *tmp; 2328 struct dlm_lock *lock; 2329 2330 2331 /* purge any stale mles */ 2332 dlm_clean_master_list(dlm, dead_node); 2333 2334 /* 2335 * now clean up all lock resources. there are two rules: 2336 * 2337 * 1) if the dead node was the master, move the lockres 2338 * to the recovering list. set the RECOVERING flag. 2339 * this lockres needs to be cleaned up before it can 2340 * be used further. 2341 * 2342 * 2) if this node was the master, remove all locks from 2343 * each of the lockres queues that were owned by the 2344 * dead node. once recovery finishes, the dlm thread 2345 * can be kicked again to see if any ASTs or BASTs 2346 * need to be fired as a result. 2347 */ 2348 for (i = 0; i < DLM_HASH_BUCKETS; i++) { 2349 bucket = dlm_lockres_hash(dlm, i); 2350 hlist_for_each_entry_safe(res, tmp, bucket, hash_node) { 2351 /* always prune any $RECOVERY entries for dead nodes, 2352 * otherwise hangs can occur during later recovery */ 2353 if (dlm_is_recovery_lock(res->lockname.name, 2354 res->lockname.len)) { 2355 spin_lock(&res->spinlock); 2356 list_for_each_entry(lock, &res->granted, list) { 2357 if (lock->ml.node == dead_node) { 2358 mlog(0, "AHA! there was " 2359 "a $RECOVERY lock for dead " 2360 "node %u (%s)!\n", 2361 dead_node, dlm->name); 2362 list_del_init(&lock->list); 2363 dlm_lock_put(lock); 2364 /* Can't schedule 2365 * DLM_UNLOCK_FREE_LOCK 2366 * - do manually */ 2367 dlm_lock_put(lock); 2368 break; 2369 } 2370 } 2371 2372 if ((res->owner == dead_node) && 2373 (res->state & DLM_LOCK_RES_DROPPING_REF)) { 2374 dlm_lockres_get(res); 2375 __dlm_do_purge_lockres(dlm, res); 2376 spin_unlock(&res->spinlock); 2377 wake_up(&res->wq); 2378 dlm_lockres_put(res); 2379 continue; 2380 } else if (res->owner == dlm->node_num) 2381 dlm_lockres_clear_refmap_bit(dlm, res, dead_node); 2382 spin_unlock(&res->spinlock); 2383 continue; 2384 } 2385 spin_lock(&res->spinlock); 2386 /* zero the lvb if necessary */ 2387 dlm_revalidate_lvb(dlm, res, dead_node); 2388 if (res->owner == dead_node) { 2389 if (res->state & DLM_LOCK_RES_DROPPING_REF) { 2390 mlog(0, "%s:%.*s: owned by " 2391 "dead node %u, this node was " 2392 "dropping its ref when master died. " 2393 "continue, purging the lockres.\n", 2394 dlm->name, res->lockname.len, 2395 res->lockname.name, dead_node); 2396 dlm_lockres_get(res); 2397 __dlm_do_purge_lockres(dlm, res); 2398 spin_unlock(&res->spinlock); 2399 wake_up(&res->wq); 2400 dlm_lockres_put(res); 2401 continue; 2402 } 2403 dlm_move_lockres_to_recovery_list(dlm, res); 2404 } else if (res->owner == dlm->node_num) { 2405 dlm_free_dead_locks(dlm, res, dead_node); 2406 __dlm_lockres_calc_usage(dlm, res); 2407 } else if (res->owner == DLM_LOCK_RES_OWNER_UNKNOWN) { 2408 if (test_bit(dead_node, res->refmap)) { 2409 mlog(0, "%s:%.*s: dead node %u had a ref, but had " 2410 "no locks and had not purged before dying\n", 2411 dlm->name, res->lockname.len, 2412 res->lockname.name, dead_node); 2413 dlm_lockres_clear_refmap_bit(dlm, res, dead_node); 2414 } 2415 } 2416 spin_unlock(&res->spinlock); 2417 } 2418 } 2419 2420 } 2421 2422 static void __dlm_hb_node_down(struct dlm_ctxt *dlm, int idx) 2423 { 2424 assert_spin_locked(&dlm->spinlock); 2425 2426 if (dlm->reco.new_master == idx) { 2427 mlog(0, "%s: recovery master %d just died\n", 2428 dlm->name, idx); 2429 if (dlm->reco.state & DLM_RECO_STATE_FINALIZE) { 2430 /* finalize1 was reached, so it is safe to clear 2431 * the new_master and dead_node. that recovery 2432 * is complete. */ 2433 mlog(0, "%s: dead master %d had reached " 2434 "finalize1 state, clearing\n", dlm->name, idx); 2435 dlm->reco.state &= ~DLM_RECO_STATE_FINALIZE; 2436 __dlm_reset_recovery(dlm); 2437 } 2438 } 2439 2440 /* Clean up join state on node death. */ 2441 if (dlm->joining_node == idx) { 2442 mlog(0, "Clearing join state for node %u\n", idx); 2443 __dlm_set_joining_node(dlm, DLM_LOCK_RES_OWNER_UNKNOWN); 2444 } 2445 2446 /* check to see if the node is already considered dead */ 2447 if (!test_bit(idx, dlm->live_nodes_map)) { 2448 mlog(0, "for domain %s, node %d is already dead. " 2449 "another node likely did recovery already.\n", 2450 dlm->name, idx); 2451 return; 2452 } 2453 2454 /* check to see if we do not care about this node */ 2455 if (!test_bit(idx, dlm->domain_map)) { 2456 /* This also catches the case that we get a node down 2457 * but haven't joined the domain yet. */ 2458 mlog(0, "node %u already removed from domain!\n", idx); 2459 return; 2460 } 2461 2462 clear_bit(idx, dlm->live_nodes_map); 2463 2464 /* make sure local cleanup occurs before the heartbeat events */ 2465 if (!test_bit(idx, dlm->recovery_map)) 2466 dlm_do_local_recovery_cleanup(dlm, idx); 2467 2468 /* notify anything attached to the heartbeat events */ 2469 dlm_hb_event_notify_attached(dlm, idx, 0); 2470 2471 mlog(0, "node %u being removed from domain map!\n", idx); 2472 clear_bit(idx, dlm->domain_map); 2473 clear_bit(idx, dlm->exit_domain_map); 2474 /* wake up migration waiters if a node goes down. 2475 * perhaps later we can genericize this for other waiters. */ 2476 wake_up(&dlm->migration_wq); 2477 2478 set_bit(idx, dlm->recovery_map); 2479 } 2480 2481 void dlm_hb_node_down_cb(struct o2nm_node *node, int idx, void *data) 2482 { 2483 struct dlm_ctxt *dlm = data; 2484 2485 if (!dlm_grab(dlm)) 2486 return; 2487 2488 /* 2489 * This will notify any dlm users that a node in our domain 2490 * went away without notifying us first. 2491 */ 2492 if (test_bit(idx, dlm->domain_map)) 2493 dlm_fire_domain_eviction_callbacks(dlm, idx); 2494 2495 spin_lock(&dlm->spinlock); 2496 __dlm_hb_node_down(dlm, idx); 2497 spin_unlock(&dlm->spinlock); 2498 2499 dlm_put(dlm); 2500 } 2501 2502 void dlm_hb_node_up_cb(struct o2nm_node *node, int idx, void *data) 2503 { 2504 struct dlm_ctxt *dlm = data; 2505 2506 if (!dlm_grab(dlm)) 2507 return; 2508 2509 spin_lock(&dlm->spinlock); 2510 set_bit(idx, dlm->live_nodes_map); 2511 /* do NOT notify mle attached to the heartbeat events. 2512 * new nodes are not interesting in mastery until joined. */ 2513 spin_unlock(&dlm->spinlock); 2514 2515 dlm_put(dlm); 2516 } 2517 2518 static void dlm_reco_ast(void *astdata) 2519 { 2520 struct dlm_ctxt *dlm = astdata; 2521 mlog(0, "ast for recovery lock fired!, this=%u, dlm=%s\n", 2522 dlm->node_num, dlm->name); 2523 } 2524 static void dlm_reco_bast(void *astdata, int blocked_type) 2525 { 2526 struct dlm_ctxt *dlm = astdata; 2527 mlog(0, "bast for recovery lock fired!, this=%u, dlm=%s\n", 2528 dlm->node_num, dlm->name); 2529 } 2530 static void dlm_reco_unlock_ast(void *astdata, enum dlm_status st) 2531 { 2532 mlog(0, "unlockast for recovery lock fired!\n"); 2533 } 2534 2535 /* 2536 * dlm_pick_recovery_master will continually attempt to use 2537 * dlmlock() on the special "$RECOVERY" lockres with the 2538 * LKM_NOQUEUE flag to get an EX. every thread that enters 2539 * this function on each node racing to become the recovery 2540 * master will not stop attempting this until either: 2541 * a) this node gets the EX (and becomes the recovery master), 2542 * or b) dlm->reco.new_master gets set to some nodenum 2543 * != O2NM_INVALID_NODE_NUM (another node will do the reco). 2544 * so each time a recovery master is needed, the entire cluster 2545 * will sync at this point. if the new master dies, that will 2546 * be detected in dlm_do_recovery */ 2547 static int dlm_pick_recovery_master(struct dlm_ctxt *dlm) 2548 { 2549 enum dlm_status ret; 2550 struct dlm_lockstatus lksb; 2551 int status = -EINVAL; 2552 2553 mlog(0, "starting recovery of %s at %lu, dead=%u, this=%u\n", 2554 dlm->name, jiffies, dlm->reco.dead_node, dlm->node_num); 2555 again: 2556 memset(&lksb, 0, sizeof(lksb)); 2557 2558 ret = dlmlock(dlm, LKM_EXMODE, &lksb, LKM_NOQUEUE|LKM_RECOVERY, 2559 DLM_RECOVERY_LOCK_NAME, DLM_RECOVERY_LOCK_NAME_LEN, 2560 dlm_reco_ast, dlm, dlm_reco_bast); 2561 2562 mlog(0, "%s: dlmlock($RECOVERY) returned %d, lksb=%d\n", 2563 dlm->name, ret, lksb.status); 2564 2565 if (ret == DLM_NORMAL) { 2566 mlog(0, "dlm=%s dlmlock says I got it (this=%u)\n", 2567 dlm->name, dlm->node_num); 2568 2569 /* got the EX lock. check to see if another node 2570 * just became the reco master */ 2571 if (dlm_reco_master_ready(dlm)) { 2572 mlog(0, "%s: got reco EX lock, but %u will " 2573 "do the recovery\n", dlm->name, 2574 dlm->reco.new_master); 2575 status = -EEXIST; 2576 } else { 2577 status = 0; 2578 2579 /* see if recovery was already finished elsewhere */ 2580 spin_lock(&dlm->spinlock); 2581 if (dlm->reco.dead_node == O2NM_INVALID_NODE_NUM) { 2582 status = -EINVAL; 2583 mlog(0, "%s: got reco EX lock, but " 2584 "node got recovered already\n", dlm->name); 2585 if (dlm->reco.new_master != O2NM_INVALID_NODE_NUM) { 2586 mlog(ML_ERROR, "%s: new master is %u " 2587 "but no dead node!\n", 2588 dlm->name, dlm->reco.new_master); 2589 BUG(); 2590 } 2591 } 2592 spin_unlock(&dlm->spinlock); 2593 } 2594 2595 /* if this node has actually become the recovery master, 2596 * set the master and send the messages to begin recovery */ 2597 if (!status) { 2598 mlog(0, "%s: dead=%u, this=%u, sending " 2599 "begin_reco now\n", dlm->name, 2600 dlm->reco.dead_node, dlm->node_num); 2601 status = dlm_send_begin_reco_message(dlm, 2602 dlm->reco.dead_node); 2603 /* this always succeeds */ 2604 BUG_ON(status); 2605 2606 /* set the new_master to this node */ 2607 spin_lock(&dlm->spinlock); 2608 dlm_set_reco_master(dlm, dlm->node_num); 2609 spin_unlock(&dlm->spinlock); 2610 } 2611 2612 /* recovery lock is a special case. ast will not get fired, 2613 * so just go ahead and unlock it. */ 2614 ret = dlmunlock(dlm, &lksb, 0, dlm_reco_unlock_ast, dlm); 2615 if (ret == DLM_DENIED) { 2616 mlog(0, "got DLM_DENIED, trying LKM_CANCEL\n"); 2617 ret = dlmunlock(dlm, &lksb, LKM_CANCEL, dlm_reco_unlock_ast, dlm); 2618 } 2619 if (ret != DLM_NORMAL) { 2620 /* this would really suck. this could only happen 2621 * if there was a network error during the unlock 2622 * because of node death. this means the unlock 2623 * is actually "done" and the lock structure is 2624 * even freed. we can continue, but only 2625 * because this specific lock name is special. */ 2626 mlog(ML_ERROR, "dlmunlock returned %d\n", ret); 2627 } 2628 } else if (ret == DLM_NOTQUEUED) { 2629 mlog(0, "dlm=%s dlmlock says another node got it (this=%u)\n", 2630 dlm->name, dlm->node_num); 2631 /* another node is master. wait on 2632 * reco.new_master != O2NM_INVALID_NODE_NUM 2633 * for at most one second */ 2634 wait_event_timeout(dlm->dlm_reco_thread_wq, 2635 dlm_reco_master_ready(dlm), 2636 msecs_to_jiffies(1000)); 2637 if (!dlm_reco_master_ready(dlm)) { 2638 mlog(0, "%s: reco master taking awhile\n", 2639 dlm->name); 2640 goto again; 2641 } 2642 /* another node has informed this one that it is reco master */ 2643 mlog(0, "%s: reco master %u is ready to recover %u\n", 2644 dlm->name, dlm->reco.new_master, dlm->reco.dead_node); 2645 status = -EEXIST; 2646 } else if (ret == DLM_RECOVERING) { 2647 mlog(0, "dlm=%s dlmlock says master node died (this=%u)\n", 2648 dlm->name, dlm->node_num); 2649 goto again; 2650 } else { 2651 struct dlm_lock_resource *res; 2652 2653 /* dlmlock returned something other than NOTQUEUED or NORMAL */ 2654 mlog(ML_ERROR, "%s: got %s from dlmlock($RECOVERY), " 2655 "lksb.status=%s\n", dlm->name, dlm_errname(ret), 2656 dlm_errname(lksb.status)); 2657 res = dlm_lookup_lockres(dlm, DLM_RECOVERY_LOCK_NAME, 2658 DLM_RECOVERY_LOCK_NAME_LEN); 2659 if (res) { 2660 dlm_print_one_lock_resource(res); 2661 dlm_lockres_put(res); 2662 } else { 2663 mlog(ML_ERROR, "recovery lock not found\n"); 2664 } 2665 BUG(); 2666 } 2667 2668 return status; 2669 } 2670 2671 static int dlm_send_begin_reco_message(struct dlm_ctxt *dlm, u8 dead_node) 2672 { 2673 struct dlm_begin_reco br; 2674 int ret = 0; 2675 struct dlm_node_iter iter; 2676 int nodenum; 2677 int status; 2678 2679 mlog(0, "%s: dead node is %u\n", dlm->name, dead_node); 2680 2681 spin_lock(&dlm->spinlock); 2682 dlm_node_iter_init(dlm->domain_map, &iter); 2683 spin_unlock(&dlm->spinlock); 2684 2685 clear_bit(dead_node, iter.node_map); 2686 2687 memset(&br, 0, sizeof(br)); 2688 br.node_idx = dlm->node_num; 2689 br.dead_node = dead_node; 2690 2691 while ((nodenum = dlm_node_iter_next(&iter)) >= 0) { 2692 ret = 0; 2693 if (nodenum == dead_node) { 2694 mlog(0, "not sending begin reco to dead node " 2695 "%u\n", dead_node); 2696 continue; 2697 } 2698 if (nodenum == dlm->node_num) { 2699 mlog(0, "not sending begin reco to self\n"); 2700 continue; 2701 } 2702 retry: 2703 ret = -EINVAL; 2704 mlog(0, "attempting to send begin reco msg to %d\n", 2705 nodenum); 2706 ret = o2net_send_message(DLM_BEGIN_RECO_MSG, dlm->key, 2707 &br, sizeof(br), nodenum, &status); 2708 /* negative status is handled ok by caller here */ 2709 if (ret >= 0) 2710 ret = status; 2711 if (dlm_is_host_down(ret)) { 2712 /* node is down. not involved in recovery 2713 * so just keep going */ 2714 mlog(ML_NOTICE, "%s: node %u was down when sending " 2715 "begin reco msg (%d)\n", dlm->name, nodenum, ret); 2716 ret = 0; 2717 } 2718 2719 /* 2720 * Prior to commit aad1b15310b9bcd59fa81ab8f2b1513b59553ea8, 2721 * dlm_begin_reco_handler() returned EAGAIN and not -EAGAIN. 2722 * We are handling both for compatibility reasons. 2723 */ 2724 if (ret == -EAGAIN || ret == EAGAIN) { 2725 mlog(0, "%s: trying to start recovery of node " 2726 "%u, but node %u is waiting for last recovery " 2727 "to complete, backoff for a bit\n", dlm->name, 2728 dead_node, nodenum); 2729 msleep(100); 2730 goto retry; 2731 } 2732 if (ret < 0) { 2733 struct dlm_lock_resource *res; 2734 2735 /* this is now a serious problem, possibly ENOMEM 2736 * in the network stack. must retry */ 2737 mlog_errno(ret); 2738 mlog(ML_ERROR, "begin reco of dlm %s to node %u " 2739 "returned %d\n", dlm->name, nodenum, ret); 2740 res = dlm_lookup_lockres(dlm, DLM_RECOVERY_LOCK_NAME, 2741 DLM_RECOVERY_LOCK_NAME_LEN); 2742 if (res) { 2743 dlm_print_one_lock_resource(res); 2744 dlm_lockres_put(res); 2745 } else { 2746 mlog(ML_ERROR, "recovery lock not found\n"); 2747 } 2748 /* sleep for a bit in hopes that we can avoid 2749 * another ENOMEM */ 2750 msleep(100); 2751 goto retry; 2752 } 2753 } 2754 2755 return ret; 2756 } 2757 2758 int dlm_begin_reco_handler(struct o2net_msg *msg, u32 len, void *data, 2759 void **ret_data) 2760 { 2761 struct dlm_ctxt *dlm = data; 2762 struct dlm_begin_reco *br = (struct dlm_begin_reco *)msg->buf; 2763 2764 /* ok to return 0, domain has gone away */ 2765 if (!dlm_grab(dlm)) 2766 return 0; 2767 2768 spin_lock(&dlm->spinlock); 2769 if (dlm->reco.state & DLM_RECO_STATE_FINALIZE) { 2770 mlog(0, "%s: node %u wants to recover node %u (%u:%u) " 2771 "but this node is in finalize state, waiting on finalize2\n", 2772 dlm->name, br->node_idx, br->dead_node, 2773 dlm->reco.dead_node, dlm->reco.new_master); 2774 spin_unlock(&dlm->spinlock); 2775 dlm_put(dlm); 2776 return -EAGAIN; 2777 } 2778 spin_unlock(&dlm->spinlock); 2779 2780 mlog(0, "%s: node %u wants to recover node %u (%u:%u)\n", 2781 dlm->name, br->node_idx, br->dead_node, 2782 dlm->reco.dead_node, dlm->reco.new_master); 2783 2784 dlm_fire_domain_eviction_callbacks(dlm, br->dead_node); 2785 2786 spin_lock(&dlm->spinlock); 2787 if (dlm->reco.new_master != O2NM_INVALID_NODE_NUM) { 2788 if (test_bit(dlm->reco.new_master, dlm->recovery_map)) { 2789 mlog(0, "%s: new_master %u died, changing " 2790 "to %u\n", dlm->name, dlm->reco.new_master, 2791 br->node_idx); 2792 } else { 2793 mlog(0, "%s: new_master %u NOT DEAD, changing " 2794 "to %u\n", dlm->name, dlm->reco.new_master, 2795 br->node_idx); 2796 /* may not have seen the new master as dead yet */ 2797 } 2798 } 2799 if (dlm->reco.dead_node != O2NM_INVALID_NODE_NUM) { 2800 mlog(ML_NOTICE, "%s: dead_node previously set to %u, " 2801 "node %u changing it to %u\n", dlm->name, 2802 dlm->reco.dead_node, br->node_idx, br->dead_node); 2803 } 2804 dlm_set_reco_master(dlm, br->node_idx); 2805 dlm_set_reco_dead_node(dlm, br->dead_node); 2806 if (!test_bit(br->dead_node, dlm->recovery_map)) { 2807 mlog(0, "recovery master %u sees %u as dead, but this " 2808 "node has not yet. marking %u as dead\n", 2809 br->node_idx, br->dead_node, br->dead_node); 2810 if (!test_bit(br->dead_node, dlm->domain_map) || 2811 !test_bit(br->dead_node, dlm->live_nodes_map)) 2812 mlog(0, "%u not in domain/live_nodes map " 2813 "so setting it in reco map manually\n", 2814 br->dead_node); 2815 /* force the recovery cleanup in __dlm_hb_node_down 2816 * both of these will be cleared in a moment */ 2817 set_bit(br->dead_node, dlm->domain_map); 2818 set_bit(br->dead_node, dlm->live_nodes_map); 2819 __dlm_hb_node_down(dlm, br->dead_node); 2820 } 2821 spin_unlock(&dlm->spinlock); 2822 2823 dlm_kick_recovery_thread(dlm); 2824 2825 mlog(0, "%s: recovery started by node %u, for %u (%u:%u)\n", 2826 dlm->name, br->node_idx, br->dead_node, 2827 dlm->reco.dead_node, dlm->reco.new_master); 2828 2829 dlm_put(dlm); 2830 return 0; 2831 } 2832 2833 #define DLM_FINALIZE_STAGE2 0x01 2834 static int dlm_send_finalize_reco_message(struct dlm_ctxt *dlm) 2835 { 2836 int ret = 0; 2837 struct dlm_finalize_reco fr; 2838 struct dlm_node_iter iter; 2839 int nodenum; 2840 int status; 2841 int stage = 1; 2842 2843 mlog(0, "finishing recovery for node %s:%u, " 2844 "stage %d\n", dlm->name, dlm->reco.dead_node, stage); 2845 2846 spin_lock(&dlm->spinlock); 2847 dlm_node_iter_init(dlm->domain_map, &iter); 2848 spin_unlock(&dlm->spinlock); 2849 2850 stage2: 2851 memset(&fr, 0, sizeof(fr)); 2852 fr.node_idx = dlm->node_num; 2853 fr.dead_node = dlm->reco.dead_node; 2854 if (stage == 2) 2855 fr.flags |= DLM_FINALIZE_STAGE2; 2856 2857 while ((nodenum = dlm_node_iter_next(&iter)) >= 0) { 2858 if (nodenum == dlm->node_num) 2859 continue; 2860 ret = o2net_send_message(DLM_FINALIZE_RECO_MSG, dlm->key, 2861 &fr, sizeof(fr), nodenum, &status); 2862 if (ret >= 0) 2863 ret = status; 2864 if (ret < 0) { 2865 mlog(ML_ERROR, "Error %d when sending message %u (key " 2866 "0x%x) to node %u\n", ret, DLM_FINALIZE_RECO_MSG, 2867 dlm->key, nodenum); 2868 if (dlm_is_host_down(ret)) { 2869 /* this has no effect on this recovery 2870 * session, so set the status to zero to 2871 * finish out the last recovery */ 2872 mlog(ML_ERROR, "node %u went down after this " 2873 "node finished recovery.\n", nodenum); 2874 ret = 0; 2875 continue; 2876 } 2877 break; 2878 } 2879 } 2880 if (stage == 1) { 2881 /* reset the node_iter back to the top and send finalize2 */ 2882 iter.curnode = -1; 2883 stage = 2; 2884 goto stage2; 2885 } 2886 2887 return ret; 2888 } 2889 2890 int dlm_finalize_reco_handler(struct o2net_msg *msg, u32 len, void *data, 2891 void **ret_data) 2892 { 2893 struct dlm_ctxt *dlm = data; 2894 struct dlm_finalize_reco *fr = (struct dlm_finalize_reco *)msg->buf; 2895 int stage = 1; 2896 2897 /* ok to return 0, domain has gone away */ 2898 if (!dlm_grab(dlm)) 2899 return 0; 2900 2901 if (fr->flags & DLM_FINALIZE_STAGE2) 2902 stage = 2; 2903 2904 mlog(0, "%s: node %u finalizing recovery stage%d of " 2905 "node %u (%u:%u)\n", dlm->name, fr->node_idx, stage, 2906 fr->dead_node, dlm->reco.dead_node, dlm->reco.new_master); 2907 2908 spin_lock(&dlm->spinlock); 2909 2910 if (dlm->reco.new_master != fr->node_idx) { 2911 mlog(ML_ERROR, "node %u sent recovery finalize msg, but node " 2912 "%u is supposed to be the new master, dead=%u\n", 2913 fr->node_idx, dlm->reco.new_master, fr->dead_node); 2914 BUG(); 2915 } 2916 if (dlm->reco.dead_node != fr->dead_node) { 2917 mlog(ML_ERROR, "node %u sent recovery finalize msg for dead " 2918 "node %u, but node %u is supposed to be dead\n", 2919 fr->node_idx, fr->dead_node, dlm->reco.dead_node); 2920 BUG(); 2921 } 2922 2923 switch (stage) { 2924 case 1: 2925 dlm_finish_local_lockres_recovery(dlm, fr->dead_node, fr->node_idx); 2926 if (dlm->reco.state & DLM_RECO_STATE_FINALIZE) { 2927 mlog(ML_ERROR, "%s: received finalize1 from " 2928 "new master %u for dead node %u, but " 2929 "this node has already received it!\n", 2930 dlm->name, fr->node_idx, fr->dead_node); 2931 dlm_print_reco_node_status(dlm); 2932 BUG(); 2933 } 2934 dlm->reco.state |= DLM_RECO_STATE_FINALIZE; 2935 spin_unlock(&dlm->spinlock); 2936 break; 2937 case 2: 2938 if (!(dlm->reco.state & DLM_RECO_STATE_FINALIZE)) { 2939 mlog(ML_ERROR, "%s: received finalize2 from " 2940 "new master %u for dead node %u, but " 2941 "this node did not have finalize1!\n", 2942 dlm->name, fr->node_idx, fr->dead_node); 2943 dlm_print_reco_node_status(dlm); 2944 BUG(); 2945 } 2946 dlm->reco.state &= ~DLM_RECO_STATE_FINALIZE; 2947 __dlm_reset_recovery(dlm); 2948 spin_unlock(&dlm->spinlock); 2949 dlm_kick_recovery_thread(dlm); 2950 break; 2951 } 2952 2953 mlog(0, "%s: recovery done, reco master was %u, dead now %u, master now %u\n", 2954 dlm->name, fr->node_idx, dlm->reco.dead_node, dlm->reco.new_master); 2955 2956 dlm_put(dlm); 2957 return 0; 2958 } 2959