1 /* -*- mode: c; c-basic-offset: 8; -*- 2 * vim: noexpandtab sw=8 ts=8 sts=0: 3 * 4 * dlmrecovery.c 5 * 6 * recovery stuff 7 * 8 * Copyright (C) 2004 Oracle. All rights reserved. 9 * 10 * This program is free software; you can redistribute it and/or 11 * modify it under the terms of the GNU General Public 12 * License as published by the Free Software Foundation; either 13 * version 2 of the License, or (at your option) any later version. 14 * 15 * This program is distributed in the hope that it will be useful, 16 * but WITHOUT ANY WARRANTY; without even the implied warranty of 17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 18 * General Public License for more details. 19 * 20 * You should have received a copy of the GNU General Public 21 * License along with this program; if not, write to the 22 * Free Software Foundation, Inc., 59 Temple Place - Suite 330, 23 * Boston, MA 021110-1307, USA. 24 * 25 */ 26 27 28 #include <linux/module.h> 29 #include <linux/fs.h> 30 #include <linux/types.h> 31 #include <linux/slab.h> 32 #include <linux/highmem.h> 33 #include <linux/init.h> 34 #include <linux/sysctl.h> 35 #include <linux/random.h> 36 #include <linux/blkdev.h> 37 #include <linux/socket.h> 38 #include <linux/inet.h> 39 #include <linux/timer.h> 40 #include <linux/kthread.h> 41 #include <linux/delay.h> 42 43 44 #include "cluster/heartbeat.h" 45 #include "cluster/nodemanager.h" 46 #include "cluster/tcp.h" 47 48 #include "dlmapi.h" 49 #include "dlmcommon.h" 50 #include "dlmdomain.h" 51 52 #define MLOG_MASK_PREFIX (ML_DLM|ML_DLM_RECOVERY) 53 #include "cluster/masklog.h" 54 55 static void dlm_do_local_recovery_cleanup(struct dlm_ctxt *dlm, u8 dead_node); 56 57 static int dlm_recovery_thread(void *data); 58 static int dlm_do_recovery(struct dlm_ctxt *dlm); 59 60 static int dlm_pick_recovery_master(struct dlm_ctxt *dlm); 61 static int dlm_remaster_locks(struct dlm_ctxt *dlm, u8 dead_node); 62 static int dlm_init_recovery_area(struct dlm_ctxt *dlm, u8 dead_node); 63 static int dlm_request_all_locks(struct dlm_ctxt *dlm, 64 u8 request_from, u8 dead_node); 65 static void dlm_destroy_recovery_area(struct dlm_ctxt *dlm, u8 dead_node); 66 67 static inline int dlm_num_locks_in_lockres(struct dlm_lock_resource *res); 68 static void dlm_init_migratable_lockres(struct dlm_migratable_lockres *mres, 69 const char *lockname, int namelen, 70 int total_locks, u64 cookie, 71 u8 flags, u8 master); 72 static int dlm_send_mig_lockres_msg(struct dlm_ctxt *dlm, 73 struct dlm_migratable_lockres *mres, 74 u8 send_to, 75 struct dlm_lock_resource *res, 76 int total_locks); 77 static int dlm_process_recovery_data(struct dlm_ctxt *dlm, 78 struct dlm_lock_resource *res, 79 struct dlm_migratable_lockres *mres); 80 static int dlm_send_finalize_reco_message(struct dlm_ctxt *dlm); 81 static int dlm_send_all_done_msg(struct dlm_ctxt *dlm, 82 u8 dead_node, u8 send_to); 83 static int dlm_send_begin_reco_message(struct dlm_ctxt *dlm, u8 dead_node); 84 static void dlm_move_reco_locks_to_list(struct dlm_ctxt *dlm, 85 struct list_head *list, u8 dead_node); 86 static void dlm_finish_local_lockres_recovery(struct dlm_ctxt *dlm, 87 u8 dead_node, u8 new_master); 88 static void dlm_reco_ast(void *astdata); 89 static void dlm_reco_bast(void *astdata, int blocked_type); 90 static void dlm_reco_unlock_ast(void *astdata, enum dlm_status st); 91 static void dlm_request_all_locks_worker(struct dlm_work_item *item, 92 void *data); 93 static void dlm_mig_lockres_worker(struct dlm_work_item *item, void *data); 94 static int dlm_lockres_master_requery(struct dlm_ctxt *dlm, 95 struct dlm_lock_resource *res, 96 u8 *real_master); 97 98 static u64 dlm_get_next_mig_cookie(void); 99 100 static DEFINE_SPINLOCK(dlm_reco_state_lock); 101 static DEFINE_SPINLOCK(dlm_mig_cookie_lock); 102 static u64 dlm_mig_cookie = 1; 103 104 static u64 dlm_get_next_mig_cookie(void) 105 { 106 u64 c; 107 spin_lock(&dlm_mig_cookie_lock); 108 c = dlm_mig_cookie; 109 if (dlm_mig_cookie == (~0ULL)) 110 dlm_mig_cookie = 1; 111 else 112 dlm_mig_cookie++; 113 spin_unlock(&dlm_mig_cookie_lock); 114 return c; 115 } 116 117 static inline void dlm_set_reco_dead_node(struct dlm_ctxt *dlm, 118 u8 dead_node) 119 { 120 assert_spin_locked(&dlm->spinlock); 121 if (dlm->reco.dead_node != dead_node) 122 mlog(0, "%s: changing dead_node from %u to %u\n", 123 dlm->name, dlm->reco.dead_node, dead_node); 124 dlm->reco.dead_node = dead_node; 125 } 126 127 static inline void dlm_set_reco_master(struct dlm_ctxt *dlm, 128 u8 master) 129 { 130 assert_spin_locked(&dlm->spinlock); 131 mlog(0, "%s: changing new_master from %u to %u\n", 132 dlm->name, dlm->reco.new_master, master); 133 dlm->reco.new_master = master; 134 } 135 136 static inline void __dlm_reset_recovery(struct dlm_ctxt *dlm) 137 { 138 assert_spin_locked(&dlm->spinlock); 139 clear_bit(dlm->reco.dead_node, dlm->recovery_map); 140 dlm_set_reco_dead_node(dlm, O2NM_INVALID_NODE_NUM); 141 dlm_set_reco_master(dlm, O2NM_INVALID_NODE_NUM); 142 } 143 144 static inline void dlm_reset_recovery(struct dlm_ctxt *dlm) 145 { 146 spin_lock(&dlm->spinlock); 147 __dlm_reset_recovery(dlm); 148 spin_unlock(&dlm->spinlock); 149 } 150 151 /* Worker function used during recovery. */ 152 void dlm_dispatch_work(struct work_struct *work) 153 { 154 struct dlm_ctxt *dlm = 155 container_of(work, struct dlm_ctxt, dispatched_work); 156 LIST_HEAD(tmp_list); 157 struct dlm_work_item *item, *next; 158 dlm_workfunc_t *workfunc; 159 int tot=0; 160 161 spin_lock(&dlm->work_lock); 162 list_splice_init(&dlm->work_list, &tmp_list); 163 spin_unlock(&dlm->work_lock); 164 165 list_for_each_entry(item, &tmp_list, list) { 166 tot++; 167 } 168 mlog(0, "%s: work thread has %d work items\n", dlm->name, tot); 169 170 list_for_each_entry_safe(item, next, &tmp_list, list) { 171 workfunc = item->func; 172 list_del_init(&item->list); 173 174 /* already have ref on dlm to avoid having 175 * it disappear. just double-check. */ 176 BUG_ON(item->dlm != dlm); 177 178 /* this is allowed to sleep and 179 * call network stuff */ 180 workfunc(item, item->data); 181 182 dlm_put(dlm); 183 kfree(item); 184 } 185 } 186 187 /* 188 * RECOVERY THREAD 189 */ 190 191 void dlm_kick_recovery_thread(struct dlm_ctxt *dlm) 192 { 193 /* wake the recovery thread 194 * this will wake the reco thread in one of three places 195 * 1) sleeping with no recovery happening 196 * 2) sleeping with recovery mastered elsewhere 197 * 3) recovery mastered here, waiting on reco data */ 198 199 wake_up(&dlm->dlm_reco_thread_wq); 200 } 201 202 /* Launch the recovery thread */ 203 int dlm_launch_recovery_thread(struct dlm_ctxt *dlm) 204 { 205 mlog(0, "starting dlm recovery thread...\n"); 206 207 dlm->dlm_reco_thread_task = kthread_run(dlm_recovery_thread, dlm, 208 "dlm_reco-%s", dlm->name); 209 if (IS_ERR(dlm->dlm_reco_thread_task)) { 210 mlog_errno(PTR_ERR(dlm->dlm_reco_thread_task)); 211 dlm->dlm_reco_thread_task = NULL; 212 return -EINVAL; 213 } 214 215 return 0; 216 } 217 218 void dlm_complete_recovery_thread(struct dlm_ctxt *dlm) 219 { 220 if (dlm->dlm_reco_thread_task) { 221 mlog(0, "waiting for dlm recovery thread to exit\n"); 222 kthread_stop(dlm->dlm_reco_thread_task); 223 dlm->dlm_reco_thread_task = NULL; 224 } 225 } 226 227 228 229 /* 230 * this is lame, but here's how recovery works... 231 * 1) all recovery threads cluster wide will work on recovering 232 * ONE node at a time 233 * 2) negotiate who will take over all the locks for the dead node. 234 * thats right... ALL the locks. 235 * 3) once a new master is chosen, everyone scans all locks 236 * and moves aside those mastered by the dead guy 237 * 4) each of these locks should be locked until recovery is done 238 * 5) the new master collects up all of secondary lock queue info 239 * one lock at a time, forcing each node to communicate back 240 * before continuing 241 * 6) each secondary lock queue responds with the full known lock info 242 * 7) once the new master has run all its locks, it sends a ALLDONE! 243 * message to everyone 244 * 8) upon receiving this message, the secondary queue node unlocks 245 * and responds to the ALLDONE 246 * 9) once the new master gets responses from everyone, he unlocks 247 * everything and recovery for this dead node is done 248 *10) go back to 2) while there are still dead nodes 249 * 250 */ 251 252 static void dlm_print_reco_node_status(struct dlm_ctxt *dlm) 253 { 254 struct dlm_reco_node_data *ndata; 255 struct dlm_lock_resource *res; 256 257 mlog(ML_NOTICE, "%s(%d): recovery info, state=%s, dead=%u, master=%u\n", 258 dlm->name, task_pid_nr(dlm->dlm_reco_thread_task), 259 dlm->reco.state & DLM_RECO_STATE_ACTIVE ? "ACTIVE" : "inactive", 260 dlm->reco.dead_node, dlm->reco.new_master); 261 262 list_for_each_entry(ndata, &dlm->reco.node_data, list) { 263 char *st = "unknown"; 264 switch (ndata->state) { 265 case DLM_RECO_NODE_DATA_INIT: 266 st = "init"; 267 break; 268 case DLM_RECO_NODE_DATA_REQUESTING: 269 st = "requesting"; 270 break; 271 case DLM_RECO_NODE_DATA_DEAD: 272 st = "dead"; 273 break; 274 case DLM_RECO_NODE_DATA_RECEIVING: 275 st = "receiving"; 276 break; 277 case DLM_RECO_NODE_DATA_REQUESTED: 278 st = "requested"; 279 break; 280 case DLM_RECO_NODE_DATA_DONE: 281 st = "done"; 282 break; 283 case DLM_RECO_NODE_DATA_FINALIZE_SENT: 284 st = "finalize-sent"; 285 break; 286 default: 287 st = "bad"; 288 break; 289 } 290 mlog(ML_NOTICE, "%s: reco state, node %u, state=%s\n", 291 dlm->name, ndata->node_num, st); 292 } 293 list_for_each_entry(res, &dlm->reco.resources, recovering) { 294 mlog(ML_NOTICE, "%s: lockres %.*s on recovering list\n", 295 dlm->name, res->lockname.len, res->lockname.name); 296 } 297 } 298 299 #define DLM_RECO_THREAD_TIMEOUT_MS (5 * 1000) 300 301 static int dlm_recovery_thread(void *data) 302 { 303 int status; 304 struct dlm_ctxt *dlm = data; 305 unsigned long timeout = msecs_to_jiffies(DLM_RECO_THREAD_TIMEOUT_MS); 306 307 mlog(0, "dlm thread running for %s...\n", dlm->name); 308 309 while (!kthread_should_stop()) { 310 if (dlm_domain_fully_joined(dlm)) { 311 status = dlm_do_recovery(dlm); 312 if (status == -EAGAIN) { 313 /* do not sleep, recheck immediately. */ 314 continue; 315 } 316 if (status < 0) 317 mlog_errno(status); 318 } 319 320 wait_event_interruptible_timeout(dlm->dlm_reco_thread_wq, 321 kthread_should_stop(), 322 timeout); 323 } 324 325 mlog(0, "quitting DLM recovery thread\n"); 326 return 0; 327 } 328 329 /* returns true when the recovery master has contacted us */ 330 static int dlm_reco_master_ready(struct dlm_ctxt *dlm) 331 { 332 int ready; 333 spin_lock(&dlm->spinlock); 334 ready = (dlm->reco.new_master != O2NM_INVALID_NODE_NUM); 335 spin_unlock(&dlm->spinlock); 336 return ready; 337 } 338 339 /* returns true if node is no longer in the domain 340 * could be dead or just not joined */ 341 int dlm_is_node_dead(struct dlm_ctxt *dlm, u8 node) 342 { 343 int dead; 344 spin_lock(&dlm->spinlock); 345 dead = !test_bit(node, dlm->domain_map); 346 spin_unlock(&dlm->spinlock); 347 return dead; 348 } 349 350 /* returns true if node is no longer in the domain 351 * could be dead or just not joined */ 352 static int dlm_is_node_recovered(struct dlm_ctxt *dlm, u8 node) 353 { 354 int recovered; 355 spin_lock(&dlm->spinlock); 356 recovered = !test_bit(node, dlm->recovery_map); 357 spin_unlock(&dlm->spinlock); 358 return recovered; 359 } 360 361 362 void dlm_wait_for_node_death(struct dlm_ctxt *dlm, u8 node, int timeout) 363 { 364 if (dlm_is_node_dead(dlm, node)) 365 return; 366 367 printk(KERN_NOTICE "o2dlm: Waiting on the death of node %u in " 368 "domain %s\n", node, dlm->name); 369 370 if (timeout) 371 wait_event_timeout(dlm->dlm_reco_thread_wq, 372 dlm_is_node_dead(dlm, node), 373 msecs_to_jiffies(timeout)); 374 else 375 wait_event(dlm->dlm_reco_thread_wq, 376 dlm_is_node_dead(dlm, node)); 377 } 378 379 void dlm_wait_for_node_recovery(struct dlm_ctxt *dlm, u8 node, int timeout) 380 { 381 if (dlm_is_node_recovered(dlm, node)) 382 return; 383 384 printk(KERN_NOTICE "o2dlm: Waiting on the recovery of node %u in " 385 "domain %s\n", node, dlm->name); 386 387 if (timeout) 388 wait_event_timeout(dlm->dlm_reco_thread_wq, 389 dlm_is_node_recovered(dlm, node), 390 msecs_to_jiffies(timeout)); 391 else 392 wait_event(dlm->dlm_reco_thread_wq, 393 dlm_is_node_recovered(dlm, node)); 394 } 395 396 /* callers of the top-level api calls (dlmlock/dlmunlock) should 397 * block on the dlm->reco.event when recovery is in progress. 398 * the dlm recovery thread will set this state when it begins 399 * recovering a dead node (as the new master or not) and clear 400 * the state and wake as soon as all affected lock resources have 401 * been marked with the RECOVERY flag */ 402 static int dlm_in_recovery(struct dlm_ctxt *dlm) 403 { 404 int in_recovery; 405 spin_lock(&dlm->spinlock); 406 in_recovery = !!(dlm->reco.state & DLM_RECO_STATE_ACTIVE); 407 spin_unlock(&dlm->spinlock); 408 return in_recovery; 409 } 410 411 412 void dlm_wait_for_recovery(struct dlm_ctxt *dlm) 413 { 414 if (dlm_in_recovery(dlm)) { 415 mlog(0, "%s: reco thread %d in recovery: " 416 "state=%d, master=%u, dead=%u\n", 417 dlm->name, task_pid_nr(dlm->dlm_reco_thread_task), 418 dlm->reco.state, dlm->reco.new_master, 419 dlm->reco.dead_node); 420 } 421 wait_event(dlm->reco.event, !dlm_in_recovery(dlm)); 422 } 423 424 static void dlm_begin_recovery(struct dlm_ctxt *dlm) 425 { 426 spin_lock(&dlm->spinlock); 427 BUG_ON(dlm->reco.state & DLM_RECO_STATE_ACTIVE); 428 printk(KERN_NOTICE "o2dlm: Begin recovery on domain %s for node %u\n", 429 dlm->name, dlm->reco.dead_node); 430 dlm->reco.state |= DLM_RECO_STATE_ACTIVE; 431 spin_unlock(&dlm->spinlock); 432 } 433 434 static void dlm_end_recovery(struct dlm_ctxt *dlm) 435 { 436 spin_lock(&dlm->spinlock); 437 BUG_ON(!(dlm->reco.state & DLM_RECO_STATE_ACTIVE)); 438 dlm->reco.state &= ~DLM_RECO_STATE_ACTIVE; 439 spin_unlock(&dlm->spinlock); 440 printk(KERN_NOTICE "o2dlm: End recovery on domain %s\n", dlm->name); 441 wake_up(&dlm->reco.event); 442 } 443 444 static void dlm_print_recovery_master(struct dlm_ctxt *dlm) 445 { 446 printk(KERN_NOTICE "o2dlm: Node %u (%s) is the Recovery Master for the " 447 "dead node %u in domain %s\n", dlm->reco.new_master, 448 (dlm->node_num == dlm->reco.new_master ? "me" : "he"), 449 dlm->reco.dead_node, dlm->name); 450 } 451 452 static int dlm_do_recovery(struct dlm_ctxt *dlm) 453 { 454 int status = 0; 455 int ret; 456 457 spin_lock(&dlm->spinlock); 458 459 /* check to see if the new master has died */ 460 if (dlm->reco.new_master != O2NM_INVALID_NODE_NUM && 461 test_bit(dlm->reco.new_master, dlm->recovery_map)) { 462 mlog(0, "new master %u died while recovering %u!\n", 463 dlm->reco.new_master, dlm->reco.dead_node); 464 /* unset the new_master, leave dead_node */ 465 dlm_set_reco_master(dlm, O2NM_INVALID_NODE_NUM); 466 } 467 468 /* select a target to recover */ 469 if (dlm->reco.dead_node == O2NM_INVALID_NODE_NUM) { 470 int bit; 471 472 bit = find_next_bit (dlm->recovery_map, O2NM_MAX_NODES, 0); 473 if (bit >= O2NM_MAX_NODES || bit < 0) 474 dlm_set_reco_dead_node(dlm, O2NM_INVALID_NODE_NUM); 475 else 476 dlm_set_reco_dead_node(dlm, bit); 477 } else if (!test_bit(dlm->reco.dead_node, dlm->recovery_map)) { 478 /* BUG? */ 479 mlog(ML_ERROR, "dead_node %u no longer in recovery map!\n", 480 dlm->reco.dead_node); 481 dlm_set_reco_dead_node(dlm, O2NM_INVALID_NODE_NUM); 482 } 483 484 if (dlm->reco.dead_node == O2NM_INVALID_NODE_NUM) { 485 // mlog(0, "nothing to recover! sleeping now!\n"); 486 spin_unlock(&dlm->spinlock); 487 /* return to main thread loop and sleep. */ 488 return 0; 489 } 490 mlog(0, "%s(%d):recovery thread found node %u in the recovery map!\n", 491 dlm->name, task_pid_nr(dlm->dlm_reco_thread_task), 492 dlm->reco.dead_node); 493 spin_unlock(&dlm->spinlock); 494 495 /* take write barrier */ 496 /* (stops the list reshuffling thread, proxy ast handling) */ 497 dlm_begin_recovery(dlm); 498 499 if (dlm->reco.new_master == dlm->node_num) 500 goto master_here; 501 502 if (dlm->reco.new_master == O2NM_INVALID_NODE_NUM) { 503 /* choose a new master, returns 0 if this node 504 * is the master, -EEXIST if it's another node. 505 * this does not return until a new master is chosen 506 * or recovery completes entirely. */ 507 ret = dlm_pick_recovery_master(dlm); 508 if (!ret) { 509 /* already notified everyone. go. */ 510 goto master_here; 511 } 512 mlog(0, "another node will master this recovery session.\n"); 513 } 514 515 dlm_print_recovery_master(dlm); 516 517 /* it is safe to start everything back up here 518 * because all of the dead node's lock resources 519 * have been marked as in-recovery */ 520 dlm_end_recovery(dlm); 521 522 /* sleep out in main dlm_recovery_thread loop. */ 523 return 0; 524 525 master_here: 526 dlm_print_recovery_master(dlm); 527 528 status = dlm_remaster_locks(dlm, dlm->reco.dead_node); 529 if (status < 0) { 530 /* we should never hit this anymore */ 531 mlog(ML_ERROR, "%s: Error %d remastering locks for node %u, " 532 "retrying.\n", dlm->name, status, dlm->reco.dead_node); 533 /* yield a bit to allow any final network messages 534 * to get handled on remaining nodes */ 535 msleep(100); 536 } else { 537 /* success! see if any other nodes need recovery */ 538 mlog(0, "DONE mastering recovery of %s:%u here(this=%u)!\n", 539 dlm->name, dlm->reco.dead_node, dlm->node_num); 540 spin_lock(&dlm->spinlock); 541 __dlm_reset_recovery(dlm); 542 dlm->reco.state &= ~DLM_RECO_STATE_FINALIZE; 543 spin_unlock(&dlm->spinlock); 544 } 545 dlm_end_recovery(dlm); 546 547 /* continue and look for another dead node */ 548 return -EAGAIN; 549 } 550 551 static int dlm_remaster_locks(struct dlm_ctxt *dlm, u8 dead_node) 552 { 553 int status = 0; 554 struct dlm_reco_node_data *ndata; 555 int all_nodes_done; 556 int destroy = 0; 557 int pass = 0; 558 559 do { 560 /* we have become recovery master. there is no escaping 561 * this, so just keep trying until we get it. */ 562 status = dlm_init_recovery_area(dlm, dead_node); 563 if (status < 0) { 564 mlog(ML_ERROR, "%s: failed to alloc recovery area, " 565 "retrying\n", dlm->name); 566 msleep(1000); 567 } 568 } while (status != 0); 569 570 /* safe to access the node data list without a lock, since this 571 * process is the only one to change the list */ 572 list_for_each_entry(ndata, &dlm->reco.node_data, list) { 573 BUG_ON(ndata->state != DLM_RECO_NODE_DATA_INIT); 574 ndata->state = DLM_RECO_NODE_DATA_REQUESTING; 575 576 mlog(0, "%s: Requesting lock info from node %u\n", dlm->name, 577 ndata->node_num); 578 579 if (ndata->node_num == dlm->node_num) { 580 ndata->state = DLM_RECO_NODE_DATA_DONE; 581 continue; 582 } 583 584 do { 585 status = dlm_request_all_locks(dlm, ndata->node_num, 586 dead_node); 587 if (status < 0) { 588 mlog_errno(status); 589 if (dlm_is_host_down(status)) { 590 /* node died, ignore it for recovery */ 591 status = 0; 592 ndata->state = DLM_RECO_NODE_DATA_DEAD; 593 /* wait for the domain map to catch up 594 * with the network state. */ 595 wait_event_timeout(dlm->dlm_reco_thread_wq, 596 dlm_is_node_dead(dlm, 597 ndata->node_num), 598 msecs_to_jiffies(1000)); 599 mlog(0, "waited 1 sec for %u, " 600 "dead? %s\n", ndata->node_num, 601 dlm_is_node_dead(dlm, ndata->node_num) ? 602 "yes" : "no"); 603 } else { 604 /* -ENOMEM on the other node */ 605 mlog(0, "%s: node %u returned " 606 "%d during recovery, retrying " 607 "after a short wait\n", 608 dlm->name, ndata->node_num, 609 status); 610 msleep(100); 611 } 612 } 613 } while (status != 0); 614 615 spin_lock(&dlm_reco_state_lock); 616 switch (ndata->state) { 617 case DLM_RECO_NODE_DATA_INIT: 618 case DLM_RECO_NODE_DATA_FINALIZE_SENT: 619 case DLM_RECO_NODE_DATA_REQUESTED: 620 BUG(); 621 break; 622 case DLM_RECO_NODE_DATA_DEAD: 623 mlog(0, "node %u died after requesting " 624 "recovery info for node %u\n", 625 ndata->node_num, dead_node); 626 /* fine. don't need this node's info. 627 * continue without it. */ 628 break; 629 case DLM_RECO_NODE_DATA_REQUESTING: 630 ndata->state = DLM_RECO_NODE_DATA_REQUESTED; 631 mlog(0, "now receiving recovery data from " 632 "node %u for dead node %u\n", 633 ndata->node_num, dead_node); 634 break; 635 case DLM_RECO_NODE_DATA_RECEIVING: 636 mlog(0, "already receiving recovery data from " 637 "node %u for dead node %u\n", 638 ndata->node_num, dead_node); 639 break; 640 case DLM_RECO_NODE_DATA_DONE: 641 mlog(0, "already DONE receiving recovery data " 642 "from node %u for dead node %u\n", 643 ndata->node_num, dead_node); 644 break; 645 } 646 spin_unlock(&dlm_reco_state_lock); 647 } 648 649 mlog(0, "%s: Done requesting all lock info\n", dlm->name); 650 651 /* nodes should be sending reco data now 652 * just need to wait */ 653 654 while (1) { 655 /* check all the nodes now to see if we are 656 * done, or if anyone died */ 657 all_nodes_done = 1; 658 spin_lock(&dlm_reco_state_lock); 659 list_for_each_entry(ndata, &dlm->reco.node_data, list) { 660 mlog(0, "checking recovery state of node %u\n", 661 ndata->node_num); 662 switch (ndata->state) { 663 case DLM_RECO_NODE_DATA_INIT: 664 case DLM_RECO_NODE_DATA_REQUESTING: 665 mlog(ML_ERROR, "bad ndata state for " 666 "node %u: state=%d\n", 667 ndata->node_num, ndata->state); 668 BUG(); 669 break; 670 case DLM_RECO_NODE_DATA_DEAD: 671 mlog(0, "node %u died after " 672 "requesting recovery info for " 673 "node %u\n", ndata->node_num, 674 dead_node); 675 break; 676 case DLM_RECO_NODE_DATA_RECEIVING: 677 case DLM_RECO_NODE_DATA_REQUESTED: 678 mlog(0, "%s: node %u still in state %s\n", 679 dlm->name, ndata->node_num, 680 ndata->state==DLM_RECO_NODE_DATA_RECEIVING ? 681 "receiving" : "requested"); 682 all_nodes_done = 0; 683 break; 684 case DLM_RECO_NODE_DATA_DONE: 685 mlog(0, "%s: node %u state is done\n", 686 dlm->name, ndata->node_num); 687 break; 688 case DLM_RECO_NODE_DATA_FINALIZE_SENT: 689 mlog(0, "%s: node %u state is finalize\n", 690 dlm->name, ndata->node_num); 691 break; 692 } 693 } 694 spin_unlock(&dlm_reco_state_lock); 695 696 mlog(0, "pass #%d, all_nodes_done?: %s\n", ++pass, 697 all_nodes_done?"yes":"no"); 698 if (all_nodes_done) { 699 int ret; 700 701 /* Set this flag on recovery master to avoid 702 * a new recovery for another dead node start 703 * before the recovery is not done. That may 704 * cause recovery hung.*/ 705 spin_lock(&dlm->spinlock); 706 dlm->reco.state |= DLM_RECO_STATE_FINALIZE; 707 spin_unlock(&dlm->spinlock); 708 709 /* all nodes are now in DLM_RECO_NODE_DATA_DONE state 710 * just send a finalize message to everyone and 711 * clean up */ 712 mlog(0, "all nodes are done! send finalize\n"); 713 ret = dlm_send_finalize_reco_message(dlm); 714 if (ret < 0) 715 mlog_errno(ret); 716 717 spin_lock(&dlm->spinlock); 718 dlm_finish_local_lockres_recovery(dlm, dead_node, 719 dlm->node_num); 720 spin_unlock(&dlm->spinlock); 721 mlog(0, "should be done with recovery!\n"); 722 723 mlog(0, "finishing recovery of %s at %lu, " 724 "dead=%u, this=%u, new=%u\n", dlm->name, 725 jiffies, dlm->reco.dead_node, 726 dlm->node_num, dlm->reco.new_master); 727 destroy = 1; 728 status = 0; 729 /* rescan everything marked dirty along the way */ 730 dlm_kick_thread(dlm, NULL); 731 break; 732 } 733 /* wait to be signalled, with periodic timeout 734 * to check for node death */ 735 wait_event_interruptible_timeout(dlm->dlm_reco_thread_wq, 736 kthread_should_stop(), 737 msecs_to_jiffies(DLM_RECO_THREAD_TIMEOUT_MS)); 738 739 } 740 741 if (destroy) 742 dlm_destroy_recovery_area(dlm, dead_node); 743 744 return status; 745 } 746 747 static int dlm_init_recovery_area(struct dlm_ctxt *dlm, u8 dead_node) 748 { 749 int num=0; 750 struct dlm_reco_node_data *ndata; 751 752 spin_lock(&dlm->spinlock); 753 memcpy(dlm->reco.node_map, dlm->domain_map, sizeof(dlm->domain_map)); 754 /* nodes can only be removed (by dying) after dropping 755 * this lock, and death will be trapped later, so this should do */ 756 spin_unlock(&dlm->spinlock); 757 758 while (1) { 759 num = find_next_bit (dlm->reco.node_map, O2NM_MAX_NODES, num); 760 if (num >= O2NM_MAX_NODES) { 761 break; 762 } 763 BUG_ON(num == dead_node); 764 765 ndata = kzalloc(sizeof(*ndata), GFP_NOFS); 766 if (!ndata) { 767 dlm_destroy_recovery_area(dlm, dead_node); 768 return -ENOMEM; 769 } 770 ndata->node_num = num; 771 ndata->state = DLM_RECO_NODE_DATA_INIT; 772 spin_lock(&dlm_reco_state_lock); 773 list_add_tail(&ndata->list, &dlm->reco.node_data); 774 spin_unlock(&dlm_reco_state_lock); 775 num++; 776 } 777 778 return 0; 779 } 780 781 static void dlm_destroy_recovery_area(struct dlm_ctxt *dlm, u8 dead_node) 782 { 783 struct dlm_reco_node_data *ndata, *next; 784 LIST_HEAD(tmplist); 785 786 spin_lock(&dlm_reco_state_lock); 787 list_splice_init(&dlm->reco.node_data, &tmplist); 788 spin_unlock(&dlm_reco_state_lock); 789 790 list_for_each_entry_safe(ndata, next, &tmplist, list) { 791 list_del_init(&ndata->list); 792 kfree(ndata); 793 } 794 } 795 796 static int dlm_request_all_locks(struct dlm_ctxt *dlm, u8 request_from, 797 u8 dead_node) 798 { 799 struct dlm_lock_request lr; 800 int ret; 801 int status; 802 803 mlog(0, "\n"); 804 805 806 mlog(0, "dlm_request_all_locks: dead node is %u, sending request " 807 "to %u\n", dead_node, request_from); 808 809 memset(&lr, 0, sizeof(lr)); 810 lr.node_idx = dlm->node_num; 811 lr.dead_node = dead_node; 812 813 // send message 814 ret = o2net_send_message(DLM_LOCK_REQUEST_MSG, dlm->key, 815 &lr, sizeof(lr), request_from, &status); 816 817 /* negative status is handled by caller */ 818 if (ret < 0) 819 mlog(ML_ERROR, "%s: Error %d send LOCK_REQUEST to node %u " 820 "to recover dead node %u\n", dlm->name, ret, 821 request_from, dead_node); 822 else 823 ret = status; 824 // return from here, then 825 // sleep until all received or error 826 return ret; 827 828 } 829 830 int dlm_request_all_locks_handler(struct o2net_msg *msg, u32 len, void *data, 831 void **ret_data) 832 { 833 struct dlm_ctxt *dlm = data; 834 struct dlm_lock_request *lr = (struct dlm_lock_request *)msg->buf; 835 char *buf = NULL; 836 struct dlm_work_item *item = NULL; 837 838 if (!dlm_grab(dlm)) 839 return -EINVAL; 840 841 if (lr->dead_node != dlm->reco.dead_node) { 842 mlog(ML_ERROR, "%s: node %u sent dead_node=%u, but local " 843 "dead_node is %u\n", dlm->name, lr->node_idx, 844 lr->dead_node, dlm->reco.dead_node); 845 dlm_print_reco_node_status(dlm); 846 /* this is a hack */ 847 dlm_put(dlm); 848 return -ENOMEM; 849 } 850 BUG_ON(lr->dead_node != dlm->reco.dead_node); 851 852 item = kzalloc(sizeof(*item), GFP_NOFS); 853 if (!item) { 854 dlm_put(dlm); 855 return -ENOMEM; 856 } 857 858 /* this will get freed by dlm_request_all_locks_worker */ 859 buf = (char *) __get_free_page(GFP_NOFS); 860 if (!buf) { 861 kfree(item); 862 dlm_put(dlm); 863 return -ENOMEM; 864 } 865 866 /* queue up work for dlm_request_all_locks_worker */ 867 dlm_grab(dlm); /* get an extra ref for the work item */ 868 dlm_init_work_item(dlm, item, dlm_request_all_locks_worker, buf); 869 item->u.ral.reco_master = lr->node_idx; 870 item->u.ral.dead_node = lr->dead_node; 871 spin_lock(&dlm->work_lock); 872 list_add_tail(&item->list, &dlm->work_list); 873 spin_unlock(&dlm->work_lock); 874 queue_work(dlm->dlm_worker, &dlm->dispatched_work); 875 876 dlm_put(dlm); 877 return 0; 878 } 879 880 static void dlm_request_all_locks_worker(struct dlm_work_item *item, void *data) 881 { 882 struct dlm_migratable_lockres *mres; 883 struct dlm_lock_resource *res; 884 struct dlm_ctxt *dlm; 885 LIST_HEAD(resources); 886 int ret; 887 u8 dead_node, reco_master; 888 int skip_all_done = 0; 889 890 dlm = item->dlm; 891 dead_node = item->u.ral.dead_node; 892 reco_master = item->u.ral.reco_master; 893 mres = (struct dlm_migratable_lockres *)data; 894 895 mlog(0, "%s: recovery worker started, dead=%u, master=%u\n", 896 dlm->name, dead_node, reco_master); 897 898 if (dead_node != dlm->reco.dead_node || 899 reco_master != dlm->reco.new_master) { 900 /* worker could have been created before the recovery master 901 * died. if so, do not continue, but do not error. */ 902 if (dlm->reco.new_master == O2NM_INVALID_NODE_NUM) { 903 mlog(ML_NOTICE, "%s: will not send recovery state, " 904 "recovery master %u died, thread=(dead=%u,mas=%u)" 905 " current=(dead=%u,mas=%u)\n", dlm->name, 906 reco_master, dead_node, reco_master, 907 dlm->reco.dead_node, dlm->reco.new_master); 908 } else { 909 mlog(ML_NOTICE, "%s: reco state invalid: reco(dead=%u, " 910 "master=%u), request(dead=%u, master=%u)\n", 911 dlm->name, dlm->reco.dead_node, 912 dlm->reco.new_master, dead_node, reco_master); 913 } 914 goto leave; 915 } 916 917 /* lock resources should have already been moved to the 918 * dlm->reco.resources list. now move items from that list 919 * to a temp list if the dead owner matches. note that the 920 * whole cluster recovers only one node at a time, so we 921 * can safely move UNKNOWN lock resources for each recovery 922 * session. */ 923 dlm_move_reco_locks_to_list(dlm, &resources, dead_node); 924 925 /* now we can begin blasting lockreses without the dlm lock */ 926 927 /* any errors returned will be due to the new_master dying, 928 * the dlm_reco_thread should detect this */ 929 list_for_each_entry(res, &resources, recovering) { 930 ret = dlm_send_one_lockres(dlm, res, mres, reco_master, 931 DLM_MRES_RECOVERY); 932 if (ret < 0) { 933 mlog(ML_ERROR, "%s: node %u went down while sending " 934 "recovery state for dead node %u, ret=%d\n", dlm->name, 935 reco_master, dead_node, ret); 936 skip_all_done = 1; 937 break; 938 } 939 } 940 941 /* move the resources back to the list */ 942 spin_lock(&dlm->spinlock); 943 list_splice_init(&resources, &dlm->reco.resources); 944 spin_unlock(&dlm->spinlock); 945 946 if (!skip_all_done) { 947 ret = dlm_send_all_done_msg(dlm, dead_node, reco_master); 948 if (ret < 0) { 949 mlog(ML_ERROR, "%s: node %u went down while sending " 950 "recovery all-done for dead node %u, ret=%d\n", 951 dlm->name, reco_master, dead_node, ret); 952 } 953 } 954 leave: 955 free_page((unsigned long)data); 956 } 957 958 959 static int dlm_send_all_done_msg(struct dlm_ctxt *dlm, u8 dead_node, u8 send_to) 960 { 961 int ret, tmpret; 962 struct dlm_reco_data_done done_msg; 963 964 memset(&done_msg, 0, sizeof(done_msg)); 965 done_msg.node_idx = dlm->node_num; 966 done_msg.dead_node = dead_node; 967 mlog(0, "sending DATA DONE message to %u, " 968 "my node=%u, dead node=%u\n", send_to, done_msg.node_idx, 969 done_msg.dead_node); 970 971 ret = o2net_send_message(DLM_RECO_DATA_DONE_MSG, dlm->key, &done_msg, 972 sizeof(done_msg), send_to, &tmpret); 973 if (ret < 0) { 974 mlog(ML_ERROR, "%s: Error %d send RECO_DATA_DONE to node %u " 975 "to recover dead node %u\n", dlm->name, ret, send_to, 976 dead_node); 977 if (!dlm_is_host_down(ret)) { 978 BUG(); 979 } 980 } else 981 ret = tmpret; 982 return ret; 983 } 984 985 986 int dlm_reco_data_done_handler(struct o2net_msg *msg, u32 len, void *data, 987 void **ret_data) 988 { 989 struct dlm_ctxt *dlm = data; 990 struct dlm_reco_data_done *done = (struct dlm_reco_data_done *)msg->buf; 991 struct dlm_reco_node_data *ndata = NULL; 992 int ret = -EINVAL; 993 994 if (!dlm_grab(dlm)) 995 return -EINVAL; 996 997 mlog(0, "got DATA DONE: dead_node=%u, reco.dead_node=%u, " 998 "node_idx=%u, this node=%u\n", done->dead_node, 999 dlm->reco.dead_node, done->node_idx, dlm->node_num); 1000 1001 mlog_bug_on_msg((done->dead_node != dlm->reco.dead_node), 1002 "Got DATA DONE: dead_node=%u, reco.dead_node=%u, " 1003 "node_idx=%u, this node=%u\n", done->dead_node, 1004 dlm->reco.dead_node, done->node_idx, dlm->node_num); 1005 1006 spin_lock(&dlm_reco_state_lock); 1007 list_for_each_entry(ndata, &dlm->reco.node_data, list) { 1008 if (ndata->node_num != done->node_idx) 1009 continue; 1010 1011 switch (ndata->state) { 1012 /* should have moved beyond INIT but not to FINALIZE yet */ 1013 case DLM_RECO_NODE_DATA_INIT: 1014 case DLM_RECO_NODE_DATA_DEAD: 1015 case DLM_RECO_NODE_DATA_FINALIZE_SENT: 1016 mlog(ML_ERROR, "bad ndata state for node %u:" 1017 " state=%d\n", ndata->node_num, 1018 ndata->state); 1019 BUG(); 1020 break; 1021 /* these states are possible at this point, anywhere along 1022 * the line of recovery */ 1023 case DLM_RECO_NODE_DATA_DONE: 1024 case DLM_RECO_NODE_DATA_RECEIVING: 1025 case DLM_RECO_NODE_DATA_REQUESTED: 1026 case DLM_RECO_NODE_DATA_REQUESTING: 1027 mlog(0, "node %u is DONE sending " 1028 "recovery data!\n", 1029 ndata->node_num); 1030 1031 ndata->state = DLM_RECO_NODE_DATA_DONE; 1032 ret = 0; 1033 break; 1034 } 1035 } 1036 spin_unlock(&dlm_reco_state_lock); 1037 1038 /* wake the recovery thread, some node is done */ 1039 if (!ret) 1040 dlm_kick_recovery_thread(dlm); 1041 1042 if (ret < 0) 1043 mlog(ML_ERROR, "failed to find recovery node data for node " 1044 "%u\n", done->node_idx); 1045 dlm_put(dlm); 1046 1047 mlog(0, "leaving reco data done handler, ret=%d\n", ret); 1048 return ret; 1049 } 1050 1051 static void dlm_move_reco_locks_to_list(struct dlm_ctxt *dlm, 1052 struct list_head *list, 1053 u8 dead_node) 1054 { 1055 struct dlm_lock_resource *res, *next; 1056 struct dlm_lock *lock; 1057 1058 spin_lock(&dlm->spinlock); 1059 list_for_each_entry_safe(res, next, &dlm->reco.resources, recovering) { 1060 /* always prune any $RECOVERY entries for dead nodes, 1061 * otherwise hangs can occur during later recovery */ 1062 if (dlm_is_recovery_lock(res->lockname.name, 1063 res->lockname.len)) { 1064 spin_lock(&res->spinlock); 1065 list_for_each_entry(lock, &res->granted, list) { 1066 if (lock->ml.node == dead_node) { 1067 mlog(0, "AHA! there was " 1068 "a $RECOVERY lock for dead " 1069 "node %u (%s)!\n", 1070 dead_node, dlm->name); 1071 list_del_init(&lock->list); 1072 dlm_lock_put(lock); 1073 /* Can't schedule DLM_UNLOCK_FREE_LOCK 1074 * - do manually */ 1075 dlm_lock_put(lock); 1076 break; 1077 } 1078 } 1079 spin_unlock(&res->spinlock); 1080 continue; 1081 } 1082 1083 if (res->owner == dead_node) { 1084 mlog(0, "found lockres owned by dead node while " 1085 "doing recovery for node %u. sending it.\n", 1086 dead_node); 1087 list_move_tail(&res->recovering, list); 1088 } else if (res->owner == DLM_LOCK_RES_OWNER_UNKNOWN) { 1089 mlog(0, "found UNKNOWN owner while doing recovery " 1090 "for node %u. sending it.\n", dead_node); 1091 list_move_tail(&res->recovering, list); 1092 } 1093 } 1094 spin_unlock(&dlm->spinlock); 1095 } 1096 1097 static inline int dlm_num_locks_in_lockres(struct dlm_lock_resource *res) 1098 { 1099 int total_locks = 0; 1100 struct list_head *iter, *queue = &res->granted; 1101 int i; 1102 1103 for (i=0; i<3; i++) { 1104 list_for_each(iter, queue) 1105 total_locks++; 1106 queue++; 1107 } 1108 return total_locks; 1109 } 1110 1111 1112 static int dlm_send_mig_lockres_msg(struct dlm_ctxt *dlm, 1113 struct dlm_migratable_lockres *mres, 1114 u8 send_to, 1115 struct dlm_lock_resource *res, 1116 int total_locks) 1117 { 1118 u64 mig_cookie = be64_to_cpu(mres->mig_cookie); 1119 int mres_total_locks = be32_to_cpu(mres->total_locks); 1120 int sz, ret = 0, status = 0; 1121 u8 orig_flags = mres->flags, 1122 orig_master = mres->master; 1123 1124 BUG_ON(mres->num_locks > DLM_MAX_MIGRATABLE_LOCKS); 1125 if (!mres->num_locks) 1126 return 0; 1127 1128 sz = sizeof(struct dlm_migratable_lockres) + 1129 (mres->num_locks * sizeof(struct dlm_migratable_lock)); 1130 1131 /* add an all-done flag if we reached the last lock */ 1132 orig_flags = mres->flags; 1133 BUG_ON(total_locks > mres_total_locks); 1134 if (total_locks == mres_total_locks) 1135 mres->flags |= DLM_MRES_ALL_DONE; 1136 1137 mlog(0, "%s:%.*s: sending mig lockres (%s) to %u\n", 1138 dlm->name, res->lockname.len, res->lockname.name, 1139 orig_flags & DLM_MRES_MIGRATION ? "migration" : "recovery", 1140 send_to); 1141 1142 /* send it */ 1143 ret = o2net_send_message(DLM_MIG_LOCKRES_MSG, dlm->key, mres, 1144 sz, send_to, &status); 1145 if (ret < 0) { 1146 /* XXX: negative status is not handled. 1147 * this will end up killing this node. */ 1148 mlog(ML_ERROR, "%s: res %.*s, Error %d send MIG_LOCKRES to " 1149 "node %u (%s)\n", dlm->name, mres->lockname_len, 1150 mres->lockname, ret, send_to, 1151 (orig_flags & DLM_MRES_MIGRATION ? 1152 "migration" : "recovery")); 1153 } else { 1154 /* might get an -ENOMEM back here */ 1155 ret = status; 1156 if (ret < 0) { 1157 mlog_errno(ret); 1158 1159 if (ret == -EFAULT) { 1160 mlog(ML_ERROR, "node %u told me to kill " 1161 "myself!\n", send_to); 1162 BUG(); 1163 } 1164 } 1165 } 1166 1167 /* zero and reinit the message buffer */ 1168 dlm_init_migratable_lockres(mres, res->lockname.name, 1169 res->lockname.len, mres_total_locks, 1170 mig_cookie, orig_flags, orig_master); 1171 return ret; 1172 } 1173 1174 static void dlm_init_migratable_lockres(struct dlm_migratable_lockres *mres, 1175 const char *lockname, int namelen, 1176 int total_locks, u64 cookie, 1177 u8 flags, u8 master) 1178 { 1179 /* mres here is one full page */ 1180 clear_page(mres); 1181 mres->lockname_len = namelen; 1182 memcpy(mres->lockname, lockname, namelen); 1183 mres->num_locks = 0; 1184 mres->total_locks = cpu_to_be32(total_locks); 1185 mres->mig_cookie = cpu_to_be64(cookie); 1186 mres->flags = flags; 1187 mres->master = master; 1188 } 1189 1190 static void dlm_prepare_lvb_for_migration(struct dlm_lock *lock, 1191 struct dlm_migratable_lockres *mres, 1192 int queue) 1193 { 1194 if (!lock->lksb) 1195 return; 1196 1197 /* Ignore lvb in all locks in the blocked list */ 1198 if (queue == DLM_BLOCKED_LIST) 1199 return; 1200 1201 /* Only consider lvbs in locks with granted EX or PR lock levels */ 1202 if (lock->ml.type != LKM_EXMODE && lock->ml.type != LKM_PRMODE) 1203 return; 1204 1205 if (dlm_lvb_is_empty(mres->lvb)) { 1206 memcpy(mres->lvb, lock->lksb->lvb, DLM_LVB_LEN); 1207 return; 1208 } 1209 1210 /* Ensure the lvb copied for migration matches in other valid locks */ 1211 if (!memcmp(mres->lvb, lock->lksb->lvb, DLM_LVB_LEN)) 1212 return; 1213 1214 mlog(ML_ERROR, "Mismatched lvb in lock cookie=%u:%llu, name=%.*s, " 1215 "node=%u\n", 1216 dlm_get_lock_cookie_node(be64_to_cpu(lock->ml.cookie)), 1217 dlm_get_lock_cookie_seq(be64_to_cpu(lock->ml.cookie)), 1218 lock->lockres->lockname.len, lock->lockres->lockname.name, 1219 lock->ml.node); 1220 dlm_print_one_lock_resource(lock->lockres); 1221 BUG(); 1222 } 1223 1224 /* returns 1 if this lock fills the network structure, 1225 * 0 otherwise */ 1226 static int dlm_add_lock_to_array(struct dlm_lock *lock, 1227 struct dlm_migratable_lockres *mres, int queue) 1228 { 1229 struct dlm_migratable_lock *ml; 1230 int lock_num = mres->num_locks; 1231 1232 ml = &(mres->ml[lock_num]); 1233 ml->cookie = lock->ml.cookie; 1234 ml->type = lock->ml.type; 1235 ml->convert_type = lock->ml.convert_type; 1236 ml->highest_blocked = lock->ml.highest_blocked; 1237 ml->list = queue; 1238 if (lock->lksb) { 1239 ml->flags = lock->lksb->flags; 1240 dlm_prepare_lvb_for_migration(lock, mres, queue); 1241 } 1242 ml->node = lock->ml.node; 1243 mres->num_locks++; 1244 /* we reached the max, send this network message */ 1245 if (mres->num_locks == DLM_MAX_MIGRATABLE_LOCKS) 1246 return 1; 1247 return 0; 1248 } 1249 1250 static void dlm_add_dummy_lock(struct dlm_ctxt *dlm, 1251 struct dlm_migratable_lockres *mres) 1252 { 1253 struct dlm_lock dummy; 1254 memset(&dummy, 0, sizeof(dummy)); 1255 dummy.ml.cookie = 0; 1256 dummy.ml.type = LKM_IVMODE; 1257 dummy.ml.convert_type = LKM_IVMODE; 1258 dummy.ml.highest_blocked = LKM_IVMODE; 1259 dummy.lksb = NULL; 1260 dummy.ml.node = dlm->node_num; 1261 dlm_add_lock_to_array(&dummy, mres, DLM_BLOCKED_LIST); 1262 } 1263 1264 static inline int dlm_is_dummy_lock(struct dlm_ctxt *dlm, 1265 struct dlm_migratable_lock *ml, 1266 u8 *nodenum) 1267 { 1268 if (unlikely(ml->cookie == 0 && 1269 ml->type == LKM_IVMODE && 1270 ml->convert_type == LKM_IVMODE && 1271 ml->highest_blocked == LKM_IVMODE && 1272 ml->list == DLM_BLOCKED_LIST)) { 1273 *nodenum = ml->node; 1274 return 1; 1275 } 1276 return 0; 1277 } 1278 1279 int dlm_send_one_lockres(struct dlm_ctxt *dlm, struct dlm_lock_resource *res, 1280 struct dlm_migratable_lockres *mres, 1281 u8 send_to, u8 flags) 1282 { 1283 struct list_head *queue; 1284 int total_locks, i; 1285 u64 mig_cookie = 0; 1286 struct dlm_lock *lock; 1287 int ret = 0; 1288 1289 BUG_ON(!(flags & (DLM_MRES_RECOVERY|DLM_MRES_MIGRATION))); 1290 1291 mlog(0, "sending to %u\n", send_to); 1292 1293 total_locks = dlm_num_locks_in_lockres(res); 1294 if (total_locks > DLM_MAX_MIGRATABLE_LOCKS) { 1295 /* rare, but possible */ 1296 mlog(0, "argh. lockres has %d locks. this will " 1297 "require more than one network packet to " 1298 "migrate\n", total_locks); 1299 mig_cookie = dlm_get_next_mig_cookie(); 1300 } 1301 1302 dlm_init_migratable_lockres(mres, res->lockname.name, 1303 res->lockname.len, total_locks, 1304 mig_cookie, flags, res->owner); 1305 1306 total_locks = 0; 1307 for (i=DLM_GRANTED_LIST; i<=DLM_BLOCKED_LIST; i++) { 1308 queue = dlm_list_idx_to_ptr(res, i); 1309 list_for_each_entry(lock, queue, list) { 1310 /* add another lock. */ 1311 total_locks++; 1312 if (!dlm_add_lock_to_array(lock, mres, i)) 1313 continue; 1314 1315 /* this filled the lock message, 1316 * we must send it immediately. */ 1317 ret = dlm_send_mig_lockres_msg(dlm, mres, send_to, 1318 res, total_locks); 1319 if (ret < 0) 1320 goto error; 1321 } 1322 } 1323 if (total_locks == 0) { 1324 /* send a dummy lock to indicate a mastery reference only */ 1325 mlog(0, "%s:%.*s: sending dummy lock to %u, %s\n", 1326 dlm->name, res->lockname.len, res->lockname.name, 1327 send_to, flags & DLM_MRES_RECOVERY ? "recovery" : 1328 "migration"); 1329 dlm_add_dummy_lock(dlm, mres); 1330 } 1331 /* flush any remaining locks */ 1332 ret = dlm_send_mig_lockres_msg(dlm, mres, send_to, res, total_locks); 1333 if (ret < 0) 1334 goto error; 1335 return ret; 1336 1337 error: 1338 mlog(ML_ERROR, "%s: dlm_send_mig_lockres_msg returned %d\n", 1339 dlm->name, ret); 1340 if (!dlm_is_host_down(ret)) 1341 BUG(); 1342 mlog(0, "%s: node %u went down while sending %s " 1343 "lockres %.*s\n", dlm->name, send_to, 1344 flags & DLM_MRES_RECOVERY ? "recovery" : "migration", 1345 res->lockname.len, res->lockname.name); 1346 return ret; 1347 } 1348 1349 1350 1351 /* 1352 * this message will contain no more than one page worth of 1353 * recovery data, and it will work on only one lockres. 1354 * there may be many locks in this page, and we may need to wait 1355 * for additional packets to complete all the locks (rare, but 1356 * possible). 1357 */ 1358 /* 1359 * NOTE: the allocation error cases here are scary 1360 * we really cannot afford to fail an alloc in recovery 1361 * do we spin? returning an error only delays the problem really 1362 */ 1363 1364 int dlm_mig_lockres_handler(struct o2net_msg *msg, u32 len, void *data, 1365 void **ret_data) 1366 { 1367 struct dlm_ctxt *dlm = data; 1368 struct dlm_migratable_lockres *mres = 1369 (struct dlm_migratable_lockres *)msg->buf; 1370 int ret = 0; 1371 u8 real_master; 1372 u8 extra_refs = 0; 1373 char *buf = NULL; 1374 struct dlm_work_item *item = NULL; 1375 struct dlm_lock_resource *res = NULL; 1376 unsigned int hash; 1377 1378 if (!dlm_grab(dlm)) 1379 return -EINVAL; 1380 1381 BUG_ON(!(mres->flags & (DLM_MRES_RECOVERY|DLM_MRES_MIGRATION))); 1382 1383 real_master = mres->master; 1384 if (real_master == DLM_LOCK_RES_OWNER_UNKNOWN) { 1385 /* cannot migrate a lockres with no master */ 1386 BUG_ON(!(mres->flags & DLM_MRES_RECOVERY)); 1387 } 1388 1389 mlog(0, "%s message received from node %u\n", 1390 (mres->flags & DLM_MRES_RECOVERY) ? 1391 "recovery" : "migration", mres->master); 1392 if (mres->flags & DLM_MRES_ALL_DONE) 1393 mlog(0, "all done flag. all lockres data received!\n"); 1394 1395 ret = -ENOMEM; 1396 buf = kmalloc(be16_to_cpu(msg->data_len), GFP_NOFS); 1397 item = kzalloc(sizeof(*item), GFP_NOFS); 1398 if (!buf || !item) 1399 goto leave; 1400 1401 /* lookup the lock to see if we have a secondary queue for this 1402 * already... just add the locks in and this will have its owner 1403 * and RECOVERY flag changed when it completes. */ 1404 hash = dlm_lockid_hash(mres->lockname, mres->lockname_len); 1405 spin_lock(&dlm->spinlock); 1406 res = __dlm_lookup_lockres_full(dlm, mres->lockname, mres->lockname_len, 1407 hash); 1408 if (res) { 1409 /* this will get a ref on res */ 1410 /* mark it as recovering/migrating and hash it */ 1411 spin_lock(&res->spinlock); 1412 if (res->state & DLM_LOCK_RES_DROPPING_REF) { 1413 mlog(0, "%s: node is attempting to migrate " 1414 "lockres %.*s, but marked as dropping " 1415 " ref!\n", dlm->name, 1416 mres->lockname_len, mres->lockname); 1417 ret = -EINVAL; 1418 spin_unlock(&res->spinlock); 1419 spin_unlock(&dlm->spinlock); 1420 dlm_lockres_put(res); 1421 goto leave; 1422 } 1423 1424 if (mres->flags & DLM_MRES_RECOVERY) { 1425 res->state |= DLM_LOCK_RES_RECOVERING; 1426 } else { 1427 if (res->state & DLM_LOCK_RES_MIGRATING) { 1428 /* this is at least the second 1429 * lockres message */ 1430 mlog(0, "lock %.*s is already migrating\n", 1431 mres->lockname_len, 1432 mres->lockname); 1433 } else if (res->state & DLM_LOCK_RES_RECOVERING) { 1434 /* caller should BUG */ 1435 mlog(ML_ERROR, "node is attempting to migrate " 1436 "lock %.*s, but marked as recovering!\n", 1437 mres->lockname_len, mres->lockname); 1438 ret = -EFAULT; 1439 spin_unlock(&res->spinlock); 1440 spin_unlock(&dlm->spinlock); 1441 dlm_lockres_put(res); 1442 goto leave; 1443 } 1444 res->state |= DLM_LOCK_RES_MIGRATING; 1445 } 1446 spin_unlock(&res->spinlock); 1447 spin_unlock(&dlm->spinlock); 1448 } else { 1449 spin_unlock(&dlm->spinlock); 1450 /* need to allocate, just like if it was 1451 * mastered here normally */ 1452 res = dlm_new_lockres(dlm, mres->lockname, mres->lockname_len); 1453 if (!res) 1454 goto leave; 1455 1456 /* to match the ref that we would have gotten if 1457 * dlm_lookup_lockres had succeeded */ 1458 dlm_lockres_get(res); 1459 1460 /* mark it as recovering/migrating and hash it */ 1461 if (mres->flags & DLM_MRES_RECOVERY) 1462 res->state |= DLM_LOCK_RES_RECOVERING; 1463 else 1464 res->state |= DLM_LOCK_RES_MIGRATING; 1465 1466 spin_lock(&dlm->spinlock); 1467 __dlm_insert_lockres(dlm, res); 1468 spin_unlock(&dlm->spinlock); 1469 1470 /* Add an extra ref for this lock-less lockres lest the 1471 * dlm_thread purges it before we get the chance to add 1472 * locks to it */ 1473 dlm_lockres_get(res); 1474 1475 /* There are three refs that need to be put. 1476 * 1. Taken above. 1477 * 2. kref_init in dlm_new_lockres()->dlm_init_lockres(). 1478 * 3. dlm_lookup_lockres() 1479 * The first one is handled at the end of this function. The 1480 * other two are handled in the worker thread after locks have 1481 * been attached. Yes, we don't wait for purge time to match 1482 * kref_init. The lockres will still have atleast one ref 1483 * added because it is in the hash __dlm_insert_lockres() */ 1484 extra_refs++; 1485 1486 /* now that the new lockres is inserted, 1487 * make it usable by other processes */ 1488 spin_lock(&res->spinlock); 1489 res->state &= ~DLM_LOCK_RES_IN_PROGRESS; 1490 spin_unlock(&res->spinlock); 1491 wake_up(&res->wq); 1492 } 1493 1494 /* at this point we have allocated everything we need, 1495 * and we have a hashed lockres with an extra ref and 1496 * the proper res->state flags. */ 1497 ret = 0; 1498 spin_lock(&res->spinlock); 1499 /* drop this either when master requery finds a different master 1500 * or when a lock is added by the recovery worker */ 1501 dlm_lockres_grab_inflight_ref(dlm, res); 1502 if (mres->master == DLM_LOCK_RES_OWNER_UNKNOWN) { 1503 /* migration cannot have an unknown master */ 1504 BUG_ON(!(mres->flags & DLM_MRES_RECOVERY)); 1505 mlog(0, "recovery has passed me a lockres with an " 1506 "unknown owner.. will need to requery: " 1507 "%.*s\n", mres->lockname_len, mres->lockname); 1508 } else { 1509 /* take a reference now to pin the lockres, drop it 1510 * when locks are added in the worker */ 1511 dlm_change_lockres_owner(dlm, res, dlm->node_num); 1512 } 1513 spin_unlock(&res->spinlock); 1514 1515 /* queue up work for dlm_mig_lockres_worker */ 1516 dlm_grab(dlm); /* get an extra ref for the work item */ 1517 memcpy(buf, msg->buf, be16_to_cpu(msg->data_len)); /* copy the whole message */ 1518 dlm_init_work_item(dlm, item, dlm_mig_lockres_worker, buf); 1519 item->u.ml.lockres = res; /* already have a ref */ 1520 item->u.ml.real_master = real_master; 1521 item->u.ml.extra_ref = extra_refs; 1522 spin_lock(&dlm->work_lock); 1523 list_add_tail(&item->list, &dlm->work_list); 1524 spin_unlock(&dlm->work_lock); 1525 queue_work(dlm->dlm_worker, &dlm->dispatched_work); 1526 1527 leave: 1528 /* One extra ref taken needs to be put here */ 1529 if (extra_refs) 1530 dlm_lockres_put(res); 1531 1532 dlm_put(dlm); 1533 if (ret < 0) { 1534 kfree(buf); 1535 kfree(item); 1536 mlog_errno(ret); 1537 } 1538 1539 return ret; 1540 } 1541 1542 1543 static void dlm_mig_lockres_worker(struct dlm_work_item *item, void *data) 1544 { 1545 struct dlm_ctxt *dlm; 1546 struct dlm_migratable_lockres *mres; 1547 int ret = 0; 1548 struct dlm_lock_resource *res; 1549 u8 real_master; 1550 u8 extra_ref; 1551 1552 dlm = item->dlm; 1553 mres = (struct dlm_migratable_lockres *)data; 1554 1555 res = item->u.ml.lockres; 1556 real_master = item->u.ml.real_master; 1557 extra_ref = item->u.ml.extra_ref; 1558 1559 if (real_master == DLM_LOCK_RES_OWNER_UNKNOWN) { 1560 /* this case is super-rare. only occurs if 1561 * node death happens during migration. */ 1562 again: 1563 ret = dlm_lockres_master_requery(dlm, res, &real_master); 1564 if (ret < 0) { 1565 mlog(0, "dlm_lockres_master_requery ret=%d\n", 1566 ret); 1567 goto again; 1568 } 1569 if (real_master == DLM_LOCK_RES_OWNER_UNKNOWN) { 1570 mlog(0, "lockres %.*s not claimed. " 1571 "this node will take it.\n", 1572 res->lockname.len, res->lockname.name); 1573 } else { 1574 spin_lock(&res->spinlock); 1575 dlm_lockres_drop_inflight_ref(dlm, res); 1576 spin_unlock(&res->spinlock); 1577 mlog(0, "master needs to respond to sender " 1578 "that node %u still owns %.*s\n", 1579 real_master, res->lockname.len, 1580 res->lockname.name); 1581 /* cannot touch this lockres */ 1582 goto leave; 1583 } 1584 } 1585 1586 ret = dlm_process_recovery_data(dlm, res, mres); 1587 if (ret < 0) 1588 mlog(0, "dlm_process_recovery_data returned %d\n", ret); 1589 else 1590 mlog(0, "dlm_process_recovery_data succeeded\n"); 1591 1592 if ((mres->flags & (DLM_MRES_MIGRATION|DLM_MRES_ALL_DONE)) == 1593 (DLM_MRES_MIGRATION|DLM_MRES_ALL_DONE)) { 1594 ret = dlm_finish_migration(dlm, res, mres->master); 1595 if (ret < 0) 1596 mlog_errno(ret); 1597 } 1598 1599 leave: 1600 /* See comment in dlm_mig_lockres_handler() */ 1601 if (res) { 1602 if (extra_ref) 1603 dlm_lockres_put(res); 1604 dlm_lockres_put(res); 1605 } 1606 kfree(data); 1607 } 1608 1609 1610 1611 static int dlm_lockres_master_requery(struct dlm_ctxt *dlm, 1612 struct dlm_lock_resource *res, 1613 u8 *real_master) 1614 { 1615 struct dlm_node_iter iter; 1616 int nodenum; 1617 int ret = 0; 1618 1619 *real_master = DLM_LOCK_RES_OWNER_UNKNOWN; 1620 1621 /* we only reach here if one of the two nodes in a 1622 * migration died while the migration was in progress. 1623 * at this point we need to requery the master. we 1624 * know that the new_master got as far as creating 1625 * an mle on at least one node, but we do not know 1626 * if any nodes had actually cleared the mle and set 1627 * the master to the new_master. the old master 1628 * is supposed to set the owner to UNKNOWN in the 1629 * event of a new_master death, so the only possible 1630 * responses that we can get from nodes here are 1631 * that the master is new_master, or that the master 1632 * is UNKNOWN. 1633 * if all nodes come back with UNKNOWN then we know 1634 * the lock needs remastering here. 1635 * if any node comes back with a valid master, check 1636 * to see if that master is the one that we are 1637 * recovering. if so, then the new_master died and 1638 * we need to remaster this lock. if not, then the 1639 * new_master survived and that node will respond to 1640 * other nodes about the owner. 1641 * if there is an owner, this node needs to dump this 1642 * lockres and alert the sender that this lockres 1643 * was rejected. */ 1644 spin_lock(&dlm->spinlock); 1645 dlm_node_iter_init(dlm->domain_map, &iter); 1646 spin_unlock(&dlm->spinlock); 1647 1648 while ((nodenum = dlm_node_iter_next(&iter)) >= 0) { 1649 /* do not send to self */ 1650 if (nodenum == dlm->node_num) 1651 continue; 1652 ret = dlm_do_master_requery(dlm, res, nodenum, real_master); 1653 if (ret < 0) { 1654 mlog_errno(ret); 1655 if (!dlm_is_host_down(ret)) 1656 BUG(); 1657 /* host is down, so answer for that node would be 1658 * DLM_LOCK_RES_OWNER_UNKNOWN. continue. */ 1659 } 1660 if (*real_master != DLM_LOCK_RES_OWNER_UNKNOWN) { 1661 mlog(0, "lock master is %u\n", *real_master); 1662 break; 1663 } 1664 } 1665 return ret; 1666 } 1667 1668 1669 int dlm_do_master_requery(struct dlm_ctxt *dlm, struct dlm_lock_resource *res, 1670 u8 nodenum, u8 *real_master) 1671 { 1672 int ret = -EINVAL; 1673 struct dlm_master_requery req; 1674 int status = DLM_LOCK_RES_OWNER_UNKNOWN; 1675 1676 memset(&req, 0, sizeof(req)); 1677 req.node_idx = dlm->node_num; 1678 req.namelen = res->lockname.len; 1679 memcpy(req.name, res->lockname.name, res->lockname.len); 1680 1681 resend: 1682 ret = o2net_send_message(DLM_MASTER_REQUERY_MSG, dlm->key, 1683 &req, sizeof(req), nodenum, &status); 1684 if (ret < 0) 1685 mlog(ML_ERROR, "Error %d when sending message %u (key " 1686 "0x%x) to node %u\n", ret, DLM_MASTER_REQUERY_MSG, 1687 dlm->key, nodenum); 1688 else if (status == -ENOMEM) { 1689 mlog_errno(status); 1690 msleep(50); 1691 goto resend; 1692 } else { 1693 BUG_ON(status < 0); 1694 BUG_ON(status > DLM_LOCK_RES_OWNER_UNKNOWN); 1695 *real_master = (u8) (status & 0xff); 1696 mlog(0, "node %u responded to master requery with %u\n", 1697 nodenum, *real_master); 1698 ret = 0; 1699 } 1700 return ret; 1701 } 1702 1703 1704 /* this function cannot error, so unless the sending 1705 * or receiving of the message failed, the owner can 1706 * be trusted */ 1707 int dlm_master_requery_handler(struct o2net_msg *msg, u32 len, void *data, 1708 void **ret_data) 1709 { 1710 struct dlm_ctxt *dlm = data; 1711 struct dlm_master_requery *req = (struct dlm_master_requery *)msg->buf; 1712 struct dlm_lock_resource *res = NULL; 1713 unsigned int hash; 1714 int master = DLM_LOCK_RES_OWNER_UNKNOWN; 1715 u32 flags = DLM_ASSERT_MASTER_REQUERY; 1716 int dispatched = 0; 1717 1718 if (!dlm_grab(dlm)) { 1719 /* since the domain has gone away on this 1720 * node, the proper response is UNKNOWN */ 1721 return master; 1722 } 1723 1724 hash = dlm_lockid_hash(req->name, req->namelen); 1725 1726 spin_lock(&dlm->spinlock); 1727 res = __dlm_lookup_lockres(dlm, req->name, req->namelen, hash); 1728 if (res) { 1729 spin_lock(&res->spinlock); 1730 master = res->owner; 1731 if (master == dlm->node_num) { 1732 int ret = dlm_dispatch_assert_master(dlm, res, 1733 0, 0, flags); 1734 if (ret < 0) { 1735 mlog_errno(ret); 1736 spin_unlock(&res->spinlock); 1737 dlm_lockres_put(res); 1738 spin_unlock(&dlm->spinlock); 1739 dlm_put(dlm); 1740 /* sender will take care of this and retry */ 1741 return ret; 1742 } else { 1743 dispatched = 1; 1744 __dlm_lockres_grab_inflight_worker(dlm, res); 1745 spin_unlock(&res->spinlock); 1746 } 1747 } else { 1748 /* put.. incase we are not the master */ 1749 spin_unlock(&res->spinlock); 1750 dlm_lockres_put(res); 1751 } 1752 } 1753 spin_unlock(&dlm->spinlock); 1754 1755 if (!dispatched) 1756 dlm_put(dlm); 1757 return master; 1758 } 1759 1760 static inline struct list_head * 1761 dlm_list_num_to_pointer(struct dlm_lock_resource *res, int list_num) 1762 { 1763 struct list_head *ret; 1764 BUG_ON(list_num < 0); 1765 BUG_ON(list_num > 2); 1766 ret = &(res->granted); 1767 ret += list_num; 1768 return ret; 1769 } 1770 /* TODO: do ast flush business 1771 * TODO: do MIGRATING and RECOVERING spinning 1772 */ 1773 1774 /* 1775 * NOTE about in-flight requests during migration: 1776 * 1777 * Before attempting the migrate, the master has marked the lockres as 1778 * MIGRATING and then flushed all of its pending ASTS. So any in-flight 1779 * requests either got queued before the MIGRATING flag got set, in which 1780 * case the lock data will reflect the change and a return message is on 1781 * the way, or the request failed to get in before MIGRATING got set. In 1782 * this case, the caller will be told to spin and wait for the MIGRATING 1783 * flag to be dropped, then recheck the master. 1784 * This holds true for the convert, cancel and unlock cases, and since lvb 1785 * updates are tied to these same messages, it applies to lvb updates as 1786 * well. For the lock case, there is no way a lock can be on the master 1787 * queue and not be on the secondary queue since the lock is always added 1788 * locally first. This means that the new target node will never be sent 1789 * a lock that he doesn't already have on the list. 1790 * In total, this means that the local lock is correct and should not be 1791 * updated to match the one sent by the master. Any messages sent back 1792 * from the master before the MIGRATING flag will bring the lock properly 1793 * up-to-date, and the change will be ordered properly for the waiter. 1794 * We will *not* attempt to modify the lock underneath the waiter. 1795 */ 1796 1797 static int dlm_process_recovery_data(struct dlm_ctxt *dlm, 1798 struct dlm_lock_resource *res, 1799 struct dlm_migratable_lockres *mres) 1800 { 1801 struct dlm_migratable_lock *ml; 1802 struct list_head *queue, *iter; 1803 struct list_head *tmpq = NULL; 1804 struct dlm_lock *newlock = NULL; 1805 struct dlm_lockstatus *lksb = NULL; 1806 int ret = 0; 1807 int i, j, bad; 1808 struct dlm_lock *lock; 1809 u8 from = O2NM_MAX_NODES; 1810 unsigned int added = 0; 1811 __be64 c; 1812 1813 mlog(0, "running %d locks for this lockres\n", mres->num_locks); 1814 for (i=0; i<mres->num_locks; i++) { 1815 ml = &(mres->ml[i]); 1816 1817 if (dlm_is_dummy_lock(dlm, ml, &from)) { 1818 /* placeholder, just need to set the refmap bit */ 1819 BUG_ON(mres->num_locks != 1); 1820 mlog(0, "%s:%.*s: dummy lock for %u\n", 1821 dlm->name, mres->lockname_len, mres->lockname, 1822 from); 1823 spin_lock(&res->spinlock); 1824 dlm_lockres_set_refmap_bit(dlm, res, from); 1825 spin_unlock(&res->spinlock); 1826 added++; 1827 break; 1828 } 1829 BUG_ON(ml->highest_blocked != LKM_IVMODE); 1830 newlock = NULL; 1831 lksb = NULL; 1832 1833 queue = dlm_list_num_to_pointer(res, ml->list); 1834 tmpq = NULL; 1835 1836 /* if the lock is for the local node it needs to 1837 * be moved to the proper location within the queue. 1838 * do not allocate a new lock structure. */ 1839 if (ml->node == dlm->node_num) { 1840 /* MIGRATION ONLY! */ 1841 BUG_ON(!(mres->flags & DLM_MRES_MIGRATION)); 1842 1843 lock = NULL; 1844 spin_lock(&res->spinlock); 1845 for (j = DLM_GRANTED_LIST; j <= DLM_BLOCKED_LIST; j++) { 1846 tmpq = dlm_list_idx_to_ptr(res, j); 1847 list_for_each(iter, tmpq) { 1848 lock = list_entry(iter, 1849 struct dlm_lock, list); 1850 if (lock->ml.cookie == ml->cookie) 1851 break; 1852 lock = NULL; 1853 } 1854 if (lock) 1855 break; 1856 } 1857 1858 /* lock is always created locally first, and 1859 * destroyed locally last. it must be on the list */ 1860 if (!lock) { 1861 c = ml->cookie; 1862 mlog(ML_ERROR, "Could not find local lock " 1863 "with cookie %u:%llu, node %u, " 1864 "list %u, flags 0x%x, type %d, " 1865 "conv %d, highest blocked %d\n", 1866 dlm_get_lock_cookie_node(be64_to_cpu(c)), 1867 dlm_get_lock_cookie_seq(be64_to_cpu(c)), 1868 ml->node, ml->list, ml->flags, ml->type, 1869 ml->convert_type, ml->highest_blocked); 1870 __dlm_print_one_lock_resource(res); 1871 BUG(); 1872 } 1873 1874 if (lock->ml.node != ml->node) { 1875 c = lock->ml.cookie; 1876 mlog(ML_ERROR, "Mismatched node# in lock " 1877 "cookie %u:%llu, name %.*s, node %u\n", 1878 dlm_get_lock_cookie_node(be64_to_cpu(c)), 1879 dlm_get_lock_cookie_seq(be64_to_cpu(c)), 1880 res->lockname.len, res->lockname.name, 1881 lock->ml.node); 1882 c = ml->cookie; 1883 mlog(ML_ERROR, "Migrate lock cookie %u:%llu, " 1884 "node %u, list %u, flags 0x%x, type %d, " 1885 "conv %d, highest blocked %d\n", 1886 dlm_get_lock_cookie_node(be64_to_cpu(c)), 1887 dlm_get_lock_cookie_seq(be64_to_cpu(c)), 1888 ml->node, ml->list, ml->flags, ml->type, 1889 ml->convert_type, ml->highest_blocked); 1890 __dlm_print_one_lock_resource(res); 1891 BUG(); 1892 } 1893 1894 if (tmpq != queue) { 1895 c = ml->cookie; 1896 mlog(0, "Lock cookie %u:%llu was on list %u " 1897 "instead of list %u for %.*s\n", 1898 dlm_get_lock_cookie_node(be64_to_cpu(c)), 1899 dlm_get_lock_cookie_seq(be64_to_cpu(c)), 1900 j, ml->list, res->lockname.len, 1901 res->lockname.name); 1902 __dlm_print_one_lock_resource(res); 1903 spin_unlock(&res->spinlock); 1904 continue; 1905 } 1906 1907 /* see NOTE above about why we do not update 1908 * to match the master here */ 1909 1910 /* move the lock to its proper place */ 1911 /* do not alter lock refcount. switching lists. */ 1912 list_move_tail(&lock->list, queue); 1913 spin_unlock(&res->spinlock); 1914 added++; 1915 1916 mlog(0, "just reordered a local lock!\n"); 1917 continue; 1918 } 1919 1920 /* lock is for another node. */ 1921 newlock = dlm_new_lock(ml->type, ml->node, 1922 be64_to_cpu(ml->cookie), NULL); 1923 if (!newlock) { 1924 ret = -ENOMEM; 1925 goto leave; 1926 } 1927 lksb = newlock->lksb; 1928 dlm_lock_attach_lockres(newlock, res); 1929 1930 if (ml->convert_type != LKM_IVMODE) { 1931 BUG_ON(queue != &res->converting); 1932 newlock->ml.convert_type = ml->convert_type; 1933 } 1934 lksb->flags |= (ml->flags & 1935 (DLM_LKSB_PUT_LVB|DLM_LKSB_GET_LVB)); 1936 1937 if (ml->type == LKM_NLMODE) 1938 goto skip_lvb; 1939 1940 /* 1941 * If the lock is in the blocked list it can't have a valid lvb, 1942 * so skip it 1943 */ 1944 if (ml->list == DLM_BLOCKED_LIST) 1945 goto skip_lvb; 1946 1947 if (!dlm_lvb_is_empty(mres->lvb)) { 1948 if (lksb->flags & DLM_LKSB_PUT_LVB) { 1949 /* other node was trying to update 1950 * lvb when node died. recreate the 1951 * lksb with the updated lvb. */ 1952 memcpy(lksb->lvb, mres->lvb, DLM_LVB_LEN); 1953 /* the lock resource lvb update must happen 1954 * NOW, before the spinlock is dropped. 1955 * we no longer wait for the AST to update 1956 * the lvb. */ 1957 memcpy(res->lvb, mres->lvb, DLM_LVB_LEN); 1958 } else { 1959 /* otherwise, the node is sending its 1960 * most recent valid lvb info */ 1961 BUG_ON(ml->type != LKM_EXMODE && 1962 ml->type != LKM_PRMODE); 1963 if (!dlm_lvb_is_empty(res->lvb) && 1964 (ml->type == LKM_EXMODE || 1965 memcmp(res->lvb, mres->lvb, DLM_LVB_LEN))) { 1966 int i; 1967 mlog(ML_ERROR, "%s:%.*s: received bad " 1968 "lvb! type=%d\n", dlm->name, 1969 res->lockname.len, 1970 res->lockname.name, ml->type); 1971 printk("lockres lvb=["); 1972 for (i=0; i<DLM_LVB_LEN; i++) 1973 printk("%02x", res->lvb[i]); 1974 printk("]\nmigrated lvb=["); 1975 for (i=0; i<DLM_LVB_LEN; i++) 1976 printk("%02x", mres->lvb[i]); 1977 printk("]\n"); 1978 dlm_print_one_lock_resource(res); 1979 BUG(); 1980 } 1981 memcpy(res->lvb, mres->lvb, DLM_LVB_LEN); 1982 } 1983 } 1984 skip_lvb: 1985 1986 /* NOTE: 1987 * wrt lock queue ordering and recovery: 1988 * 1. order of locks on granted queue is 1989 * meaningless. 1990 * 2. order of locks on converting queue is 1991 * LOST with the node death. sorry charlie. 1992 * 3. order of locks on the blocked queue is 1993 * also LOST. 1994 * order of locks does not affect integrity, it 1995 * just means that a lock request may get pushed 1996 * back in line as a result of the node death. 1997 * also note that for a given node the lock order 1998 * for its secondary queue locks is preserved 1999 * relative to each other, but clearly *not* 2000 * preserved relative to locks from other nodes. 2001 */ 2002 bad = 0; 2003 spin_lock(&res->spinlock); 2004 list_for_each_entry(lock, queue, list) { 2005 if (lock->ml.cookie == ml->cookie) { 2006 c = lock->ml.cookie; 2007 mlog(ML_ERROR, "%s:%.*s: %u:%llu: lock already " 2008 "exists on this lockres!\n", dlm->name, 2009 res->lockname.len, res->lockname.name, 2010 dlm_get_lock_cookie_node(be64_to_cpu(c)), 2011 dlm_get_lock_cookie_seq(be64_to_cpu(c))); 2012 2013 mlog(ML_NOTICE, "sent lock: type=%d, conv=%d, " 2014 "node=%u, cookie=%u:%llu, queue=%d\n", 2015 ml->type, ml->convert_type, ml->node, 2016 dlm_get_lock_cookie_node(be64_to_cpu(ml->cookie)), 2017 dlm_get_lock_cookie_seq(be64_to_cpu(ml->cookie)), 2018 ml->list); 2019 2020 __dlm_print_one_lock_resource(res); 2021 bad = 1; 2022 break; 2023 } 2024 } 2025 if (!bad) { 2026 dlm_lock_get(newlock); 2027 if (mres->flags & DLM_MRES_RECOVERY && 2028 ml->list == DLM_CONVERTING_LIST && 2029 newlock->ml.type > 2030 newlock->ml.convert_type) { 2031 /* newlock is doing downconvert, add it to the 2032 * head of converting list */ 2033 list_add(&newlock->list, queue); 2034 } else 2035 list_add_tail(&newlock->list, queue); 2036 mlog(0, "%s:%.*s: added lock for node %u, " 2037 "setting refmap bit\n", dlm->name, 2038 res->lockname.len, res->lockname.name, ml->node); 2039 dlm_lockres_set_refmap_bit(dlm, res, ml->node); 2040 added++; 2041 } 2042 spin_unlock(&res->spinlock); 2043 } 2044 mlog(0, "done running all the locks\n"); 2045 2046 leave: 2047 /* balance the ref taken when the work was queued */ 2048 spin_lock(&res->spinlock); 2049 dlm_lockres_drop_inflight_ref(dlm, res); 2050 spin_unlock(&res->spinlock); 2051 2052 if (ret < 0) 2053 mlog_errno(ret); 2054 2055 return ret; 2056 } 2057 2058 void dlm_move_lockres_to_recovery_list(struct dlm_ctxt *dlm, 2059 struct dlm_lock_resource *res) 2060 { 2061 int i; 2062 struct list_head *queue; 2063 struct dlm_lock *lock, *next; 2064 2065 assert_spin_locked(&dlm->spinlock); 2066 assert_spin_locked(&res->spinlock); 2067 res->state |= DLM_LOCK_RES_RECOVERING; 2068 if (!list_empty(&res->recovering)) { 2069 mlog(0, 2070 "Recovering res %s:%.*s, is already on recovery list!\n", 2071 dlm->name, res->lockname.len, res->lockname.name); 2072 list_del_init(&res->recovering); 2073 dlm_lockres_put(res); 2074 } 2075 /* We need to hold a reference while on the recovery list */ 2076 dlm_lockres_get(res); 2077 list_add_tail(&res->recovering, &dlm->reco.resources); 2078 2079 /* find any pending locks and put them back on proper list */ 2080 for (i=DLM_BLOCKED_LIST; i>=DLM_GRANTED_LIST; i--) { 2081 queue = dlm_list_idx_to_ptr(res, i); 2082 list_for_each_entry_safe(lock, next, queue, list) { 2083 dlm_lock_get(lock); 2084 if (lock->convert_pending) { 2085 /* move converting lock back to granted */ 2086 mlog(0, "node died with convert pending " 2087 "on %.*s. move back to granted list.\n", 2088 res->lockname.len, res->lockname.name); 2089 dlm_revert_pending_convert(res, lock); 2090 lock->convert_pending = 0; 2091 } else if (lock->lock_pending) { 2092 /* remove pending lock requests completely */ 2093 BUG_ON(i != DLM_BLOCKED_LIST); 2094 mlog(0, "node died with lock pending " 2095 "on %.*s. remove from blocked list and skip.\n", 2096 res->lockname.len, res->lockname.name); 2097 /* lock will be floating until ref in 2098 * dlmlock_remote is freed after the network 2099 * call returns. ok for it to not be on any 2100 * list since no ast can be called 2101 * (the master is dead). */ 2102 dlm_revert_pending_lock(res, lock); 2103 lock->lock_pending = 0; 2104 } else if (lock->unlock_pending) { 2105 /* if an unlock was in progress, treat as 2106 * if this had completed successfully 2107 * before sending this lock state to the 2108 * new master. note that the dlm_unlock 2109 * call is still responsible for calling 2110 * the unlockast. that will happen after 2111 * the network call times out. for now, 2112 * just move lists to prepare the new 2113 * recovery master. */ 2114 BUG_ON(i != DLM_GRANTED_LIST); 2115 mlog(0, "node died with unlock pending " 2116 "on %.*s. remove from blocked list and skip.\n", 2117 res->lockname.len, res->lockname.name); 2118 dlm_commit_pending_unlock(res, lock); 2119 lock->unlock_pending = 0; 2120 } else if (lock->cancel_pending) { 2121 /* if a cancel was in progress, treat as 2122 * if this had completed successfully 2123 * before sending this lock state to the 2124 * new master */ 2125 BUG_ON(i != DLM_CONVERTING_LIST); 2126 mlog(0, "node died with cancel pending " 2127 "on %.*s. move back to granted list.\n", 2128 res->lockname.len, res->lockname.name); 2129 dlm_commit_pending_cancel(res, lock); 2130 lock->cancel_pending = 0; 2131 } 2132 dlm_lock_put(lock); 2133 } 2134 } 2135 } 2136 2137 2138 2139 /* removes all recovered locks from the recovery list. 2140 * sets the res->owner to the new master. 2141 * unsets the RECOVERY flag and wakes waiters. */ 2142 static void dlm_finish_local_lockres_recovery(struct dlm_ctxt *dlm, 2143 u8 dead_node, u8 new_master) 2144 { 2145 int i; 2146 struct hlist_head *bucket; 2147 struct dlm_lock_resource *res, *next; 2148 2149 assert_spin_locked(&dlm->spinlock); 2150 2151 list_for_each_entry_safe(res, next, &dlm->reco.resources, recovering) { 2152 if (res->owner == dead_node) { 2153 mlog(0, "%s: res %.*s, Changing owner from %u to %u\n", 2154 dlm->name, res->lockname.len, res->lockname.name, 2155 res->owner, new_master); 2156 list_del_init(&res->recovering); 2157 spin_lock(&res->spinlock); 2158 /* new_master has our reference from 2159 * the lock state sent during recovery */ 2160 dlm_change_lockres_owner(dlm, res, new_master); 2161 res->state &= ~DLM_LOCK_RES_RECOVERING; 2162 if (__dlm_lockres_has_locks(res)) 2163 __dlm_dirty_lockres(dlm, res); 2164 spin_unlock(&res->spinlock); 2165 wake_up(&res->wq); 2166 dlm_lockres_put(res); 2167 } 2168 } 2169 2170 /* this will become unnecessary eventually, but 2171 * for now we need to run the whole hash, clear 2172 * the RECOVERING state and set the owner 2173 * if necessary */ 2174 for (i = 0; i < DLM_HASH_BUCKETS; i++) { 2175 bucket = dlm_lockres_hash(dlm, i); 2176 hlist_for_each_entry(res, bucket, hash_node) { 2177 if (res->state & DLM_LOCK_RES_RECOVERY_WAITING) { 2178 spin_lock(&res->spinlock); 2179 res->state &= ~DLM_LOCK_RES_RECOVERY_WAITING; 2180 spin_unlock(&res->spinlock); 2181 wake_up(&res->wq); 2182 } 2183 2184 if (!(res->state & DLM_LOCK_RES_RECOVERING)) 2185 continue; 2186 2187 if (res->owner != dead_node && 2188 res->owner != dlm->node_num) 2189 continue; 2190 2191 if (!list_empty(&res->recovering)) { 2192 list_del_init(&res->recovering); 2193 dlm_lockres_put(res); 2194 } 2195 2196 /* new_master has our reference from 2197 * the lock state sent during recovery */ 2198 mlog(0, "%s: res %.*s, Changing owner from %u to %u\n", 2199 dlm->name, res->lockname.len, res->lockname.name, 2200 res->owner, new_master); 2201 spin_lock(&res->spinlock); 2202 dlm_change_lockres_owner(dlm, res, new_master); 2203 res->state &= ~DLM_LOCK_RES_RECOVERING; 2204 if (__dlm_lockres_has_locks(res)) 2205 __dlm_dirty_lockres(dlm, res); 2206 spin_unlock(&res->spinlock); 2207 wake_up(&res->wq); 2208 } 2209 } 2210 } 2211 2212 static inline int dlm_lvb_needs_invalidation(struct dlm_lock *lock, int local) 2213 { 2214 if (local) { 2215 if (lock->ml.type != LKM_EXMODE && 2216 lock->ml.type != LKM_PRMODE) 2217 return 1; 2218 } else if (lock->ml.type == LKM_EXMODE) 2219 return 1; 2220 return 0; 2221 } 2222 2223 static void dlm_revalidate_lvb(struct dlm_ctxt *dlm, 2224 struct dlm_lock_resource *res, u8 dead_node) 2225 { 2226 struct list_head *queue; 2227 struct dlm_lock *lock; 2228 int blank_lvb = 0, local = 0; 2229 int i; 2230 u8 search_node; 2231 2232 assert_spin_locked(&dlm->spinlock); 2233 assert_spin_locked(&res->spinlock); 2234 2235 if (res->owner == dlm->node_num) 2236 /* if this node owned the lockres, and if the dead node 2237 * had an EX when he died, blank out the lvb */ 2238 search_node = dead_node; 2239 else { 2240 /* if this is a secondary lockres, and we had no EX or PR 2241 * locks granted, we can no longer trust the lvb */ 2242 search_node = dlm->node_num; 2243 local = 1; /* check local state for valid lvb */ 2244 } 2245 2246 for (i=DLM_GRANTED_LIST; i<=DLM_CONVERTING_LIST; i++) { 2247 queue = dlm_list_idx_to_ptr(res, i); 2248 list_for_each_entry(lock, queue, list) { 2249 if (lock->ml.node == search_node) { 2250 if (dlm_lvb_needs_invalidation(lock, local)) { 2251 /* zero the lksb lvb and lockres lvb */ 2252 blank_lvb = 1; 2253 memset(lock->lksb->lvb, 0, DLM_LVB_LEN); 2254 } 2255 } 2256 } 2257 } 2258 2259 if (blank_lvb) { 2260 mlog(0, "clearing %.*s lvb, dead node %u had EX\n", 2261 res->lockname.len, res->lockname.name, dead_node); 2262 memset(res->lvb, 0, DLM_LVB_LEN); 2263 } 2264 } 2265 2266 static void dlm_free_dead_locks(struct dlm_ctxt *dlm, 2267 struct dlm_lock_resource *res, u8 dead_node) 2268 { 2269 struct dlm_lock *lock, *next; 2270 unsigned int freed = 0; 2271 2272 /* this node is the lockres master: 2273 * 1) remove any stale locks for the dead node 2274 * 2) if the dead node had an EX when he died, blank out the lvb 2275 */ 2276 assert_spin_locked(&dlm->spinlock); 2277 assert_spin_locked(&res->spinlock); 2278 2279 /* We do two dlm_lock_put(). One for removing from list and the other is 2280 * to force the DLM_UNLOCK_FREE_LOCK action so as to free the locks */ 2281 2282 /* TODO: check pending_asts, pending_basts here */ 2283 list_for_each_entry_safe(lock, next, &res->granted, list) { 2284 if (lock->ml.node == dead_node) { 2285 list_del_init(&lock->list); 2286 dlm_lock_put(lock); 2287 /* Can't schedule DLM_UNLOCK_FREE_LOCK - do manually */ 2288 dlm_lock_put(lock); 2289 freed++; 2290 } 2291 } 2292 list_for_each_entry_safe(lock, next, &res->converting, list) { 2293 if (lock->ml.node == dead_node) { 2294 list_del_init(&lock->list); 2295 dlm_lock_put(lock); 2296 /* Can't schedule DLM_UNLOCK_FREE_LOCK - do manually */ 2297 dlm_lock_put(lock); 2298 freed++; 2299 } 2300 } 2301 list_for_each_entry_safe(lock, next, &res->blocked, list) { 2302 if (lock->ml.node == dead_node) { 2303 list_del_init(&lock->list); 2304 dlm_lock_put(lock); 2305 /* Can't schedule DLM_UNLOCK_FREE_LOCK - do manually */ 2306 dlm_lock_put(lock); 2307 freed++; 2308 } 2309 } 2310 2311 if (freed) { 2312 mlog(0, "%s:%.*s: freed %u locks for dead node %u, " 2313 "dropping ref from lockres\n", dlm->name, 2314 res->lockname.len, res->lockname.name, freed, dead_node); 2315 if(!test_bit(dead_node, res->refmap)) { 2316 mlog(ML_ERROR, "%s:%.*s: freed %u locks for dead node %u, " 2317 "but ref was not set\n", dlm->name, 2318 res->lockname.len, res->lockname.name, freed, dead_node); 2319 __dlm_print_one_lock_resource(res); 2320 } 2321 res->state |= DLM_LOCK_RES_RECOVERY_WAITING; 2322 dlm_lockres_clear_refmap_bit(dlm, res, dead_node); 2323 } else if (test_bit(dead_node, res->refmap)) { 2324 mlog(0, "%s:%.*s: dead node %u had a ref, but had " 2325 "no locks and had not purged before dying\n", dlm->name, 2326 res->lockname.len, res->lockname.name, dead_node); 2327 dlm_lockres_clear_refmap_bit(dlm, res, dead_node); 2328 } 2329 2330 /* do not kick thread yet */ 2331 __dlm_dirty_lockres(dlm, res); 2332 } 2333 2334 /* if this node is the recovery master, and there are no 2335 * locks for a given lockres owned by this node that are in 2336 * either PR or EX mode, zero out the lvb before requesting. 2337 * 2338 */ 2339 2340 2341 static void dlm_do_local_recovery_cleanup(struct dlm_ctxt *dlm, u8 dead_node) 2342 { 2343 struct dlm_lock_resource *res; 2344 int i; 2345 struct hlist_head *bucket; 2346 struct dlm_lock *lock; 2347 2348 2349 /* purge any stale mles */ 2350 dlm_clean_master_list(dlm, dead_node); 2351 2352 /* 2353 * now clean up all lock resources. there are two rules: 2354 * 2355 * 1) if the dead node was the master, move the lockres 2356 * to the recovering list. set the RECOVERING flag. 2357 * this lockres needs to be cleaned up before it can 2358 * be used further. 2359 * 2360 * 2) if this node was the master, remove all locks from 2361 * each of the lockres queues that were owned by the 2362 * dead node. once recovery finishes, the dlm thread 2363 * can be kicked again to see if any ASTs or BASTs 2364 * need to be fired as a result. 2365 */ 2366 for (i = 0; i < DLM_HASH_BUCKETS; i++) { 2367 bucket = dlm_lockres_hash(dlm, i); 2368 hlist_for_each_entry(res, bucket, hash_node) { 2369 /* always prune any $RECOVERY entries for dead nodes, 2370 * otherwise hangs can occur during later recovery */ 2371 if (dlm_is_recovery_lock(res->lockname.name, 2372 res->lockname.len)) { 2373 spin_lock(&res->spinlock); 2374 list_for_each_entry(lock, &res->granted, list) { 2375 if (lock->ml.node == dead_node) { 2376 mlog(0, "AHA! there was " 2377 "a $RECOVERY lock for dead " 2378 "node %u (%s)!\n", 2379 dead_node, dlm->name); 2380 list_del_init(&lock->list); 2381 dlm_lock_put(lock); 2382 /* Can't schedule 2383 * DLM_UNLOCK_FREE_LOCK 2384 * - do manually */ 2385 dlm_lock_put(lock); 2386 break; 2387 } 2388 } 2389 dlm_lockres_clear_refmap_bit(dlm, res, 2390 dead_node); 2391 spin_unlock(&res->spinlock); 2392 continue; 2393 } 2394 spin_lock(&res->spinlock); 2395 /* zero the lvb if necessary */ 2396 dlm_revalidate_lvb(dlm, res, dead_node); 2397 if (res->owner == dead_node) { 2398 if (res->state & DLM_LOCK_RES_DROPPING_REF) { 2399 mlog(0, "%s:%.*s: owned by " 2400 "dead node %u, this node was " 2401 "dropping its ref when it died. " 2402 "continue, dropping the flag.\n", 2403 dlm->name, res->lockname.len, 2404 res->lockname.name, dead_node); 2405 } 2406 res->state &= ~DLM_LOCK_RES_DROPPING_REF; 2407 dlm_move_lockres_to_recovery_list(dlm, 2408 res); 2409 } else if (res->owner == dlm->node_num) { 2410 dlm_free_dead_locks(dlm, res, dead_node); 2411 __dlm_lockres_calc_usage(dlm, res); 2412 } else if (res->owner == DLM_LOCK_RES_OWNER_UNKNOWN) { 2413 if (test_bit(dead_node, res->refmap)) { 2414 mlog(0, "%s:%.*s: dead node %u had a ref, but had " 2415 "no locks and had not purged before dying\n", 2416 dlm->name, res->lockname.len, 2417 res->lockname.name, dead_node); 2418 dlm_lockres_clear_refmap_bit(dlm, res, dead_node); 2419 } 2420 } 2421 spin_unlock(&res->spinlock); 2422 } 2423 } 2424 2425 } 2426 2427 static void __dlm_hb_node_down(struct dlm_ctxt *dlm, int idx) 2428 { 2429 assert_spin_locked(&dlm->spinlock); 2430 2431 if (dlm->reco.new_master == idx) { 2432 mlog(0, "%s: recovery master %d just died\n", 2433 dlm->name, idx); 2434 if (dlm->reco.state & DLM_RECO_STATE_FINALIZE) { 2435 /* finalize1 was reached, so it is safe to clear 2436 * the new_master and dead_node. that recovery 2437 * is complete. */ 2438 mlog(0, "%s: dead master %d had reached " 2439 "finalize1 state, clearing\n", dlm->name, idx); 2440 dlm->reco.state &= ~DLM_RECO_STATE_FINALIZE; 2441 __dlm_reset_recovery(dlm); 2442 } 2443 } 2444 2445 /* Clean up join state on node death. */ 2446 if (dlm->joining_node == idx) { 2447 mlog(0, "Clearing join state for node %u\n", idx); 2448 __dlm_set_joining_node(dlm, DLM_LOCK_RES_OWNER_UNKNOWN); 2449 } 2450 2451 /* check to see if the node is already considered dead */ 2452 if (!test_bit(idx, dlm->live_nodes_map)) { 2453 mlog(0, "for domain %s, node %d is already dead. " 2454 "another node likely did recovery already.\n", 2455 dlm->name, idx); 2456 return; 2457 } 2458 2459 /* check to see if we do not care about this node */ 2460 if (!test_bit(idx, dlm->domain_map)) { 2461 /* This also catches the case that we get a node down 2462 * but haven't joined the domain yet. */ 2463 mlog(0, "node %u already removed from domain!\n", idx); 2464 return; 2465 } 2466 2467 clear_bit(idx, dlm->live_nodes_map); 2468 2469 /* make sure local cleanup occurs before the heartbeat events */ 2470 if (!test_bit(idx, dlm->recovery_map)) 2471 dlm_do_local_recovery_cleanup(dlm, idx); 2472 2473 /* notify anything attached to the heartbeat events */ 2474 dlm_hb_event_notify_attached(dlm, idx, 0); 2475 2476 mlog(0, "node %u being removed from domain map!\n", idx); 2477 clear_bit(idx, dlm->domain_map); 2478 clear_bit(idx, dlm->exit_domain_map); 2479 /* wake up migration waiters if a node goes down. 2480 * perhaps later we can genericize this for other waiters. */ 2481 wake_up(&dlm->migration_wq); 2482 2483 set_bit(idx, dlm->recovery_map); 2484 } 2485 2486 void dlm_hb_node_down_cb(struct o2nm_node *node, int idx, void *data) 2487 { 2488 struct dlm_ctxt *dlm = data; 2489 2490 if (!dlm_grab(dlm)) 2491 return; 2492 2493 /* 2494 * This will notify any dlm users that a node in our domain 2495 * went away without notifying us first. 2496 */ 2497 if (test_bit(idx, dlm->domain_map)) 2498 dlm_fire_domain_eviction_callbacks(dlm, idx); 2499 2500 spin_lock(&dlm->spinlock); 2501 __dlm_hb_node_down(dlm, idx); 2502 spin_unlock(&dlm->spinlock); 2503 2504 dlm_put(dlm); 2505 } 2506 2507 void dlm_hb_node_up_cb(struct o2nm_node *node, int idx, void *data) 2508 { 2509 struct dlm_ctxt *dlm = data; 2510 2511 if (!dlm_grab(dlm)) 2512 return; 2513 2514 spin_lock(&dlm->spinlock); 2515 set_bit(idx, dlm->live_nodes_map); 2516 /* do NOT notify mle attached to the heartbeat events. 2517 * new nodes are not interesting in mastery until joined. */ 2518 spin_unlock(&dlm->spinlock); 2519 2520 dlm_put(dlm); 2521 } 2522 2523 static void dlm_reco_ast(void *astdata) 2524 { 2525 struct dlm_ctxt *dlm = astdata; 2526 mlog(0, "ast for recovery lock fired!, this=%u, dlm=%s\n", 2527 dlm->node_num, dlm->name); 2528 } 2529 static void dlm_reco_bast(void *astdata, int blocked_type) 2530 { 2531 struct dlm_ctxt *dlm = astdata; 2532 mlog(0, "bast for recovery lock fired!, this=%u, dlm=%s\n", 2533 dlm->node_num, dlm->name); 2534 } 2535 static void dlm_reco_unlock_ast(void *astdata, enum dlm_status st) 2536 { 2537 mlog(0, "unlockast for recovery lock fired!\n"); 2538 } 2539 2540 /* 2541 * dlm_pick_recovery_master will continually attempt to use 2542 * dlmlock() on the special "$RECOVERY" lockres with the 2543 * LKM_NOQUEUE flag to get an EX. every thread that enters 2544 * this function on each node racing to become the recovery 2545 * master will not stop attempting this until either: 2546 * a) this node gets the EX (and becomes the recovery master), 2547 * or b) dlm->reco.new_master gets set to some nodenum 2548 * != O2NM_INVALID_NODE_NUM (another node will do the reco). 2549 * so each time a recovery master is needed, the entire cluster 2550 * will sync at this point. if the new master dies, that will 2551 * be detected in dlm_do_recovery */ 2552 static int dlm_pick_recovery_master(struct dlm_ctxt *dlm) 2553 { 2554 enum dlm_status ret; 2555 struct dlm_lockstatus lksb; 2556 int status = -EINVAL; 2557 2558 mlog(0, "starting recovery of %s at %lu, dead=%u, this=%u\n", 2559 dlm->name, jiffies, dlm->reco.dead_node, dlm->node_num); 2560 again: 2561 memset(&lksb, 0, sizeof(lksb)); 2562 2563 ret = dlmlock(dlm, LKM_EXMODE, &lksb, LKM_NOQUEUE|LKM_RECOVERY, 2564 DLM_RECOVERY_LOCK_NAME, DLM_RECOVERY_LOCK_NAME_LEN, 2565 dlm_reco_ast, dlm, dlm_reco_bast); 2566 2567 mlog(0, "%s: dlmlock($RECOVERY) returned %d, lksb=%d\n", 2568 dlm->name, ret, lksb.status); 2569 2570 if (ret == DLM_NORMAL) { 2571 mlog(0, "dlm=%s dlmlock says I got it (this=%u)\n", 2572 dlm->name, dlm->node_num); 2573 2574 /* got the EX lock. check to see if another node 2575 * just became the reco master */ 2576 if (dlm_reco_master_ready(dlm)) { 2577 mlog(0, "%s: got reco EX lock, but %u will " 2578 "do the recovery\n", dlm->name, 2579 dlm->reco.new_master); 2580 status = -EEXIST; 2581 } else { 2582 status = 0; 2583 2584 /* see if recovery was already finished elsewhere */ 2585 spin_lock(&dlm->spinlock); 2586 if (dlm->reco.dead_node == O2NM_INVALID_NODE_NUM) { 2587 status = -EINVAL; 2588 mlog(0, "%s: got reco EX lock, but " 2589 "node got recovered already\n", dlm->name); 2590 if (dlm->reco.new_master != O2NM_INVALID_NODE_NUM) { 2591 mlog(ML_ERROR, "%s: new master is %u " 2592 "but no dead node!\n", 2593 dlm->name, dlm->reco.new_master); 2594 BUG(); 2595 } 2596 } 2597 spin_unlock(&dlm->spinlock); 2598 } 2599 2600 /* if this node has actually become the recovery master, 2601 * set the master and send the messages to begin recovery */ 2602 if (!status) { 2603 mlog(0, "%s: dead=%u, this=%u, sending " 2604 "begin_reco now\n", dlm->name, 2605 dlm->reco.dead_node, dlm->node_num); 2606 status = dlm_send_begin_reco_message(dlm, 2607 dlm->reco.dead_node); 2608 /* this always succeeds */ 2609 BUG_ON(status); 2610 2611 /* set the new_master to this node */ 2612 spin_lock(&dlm->spinlock); 2613 dlm_set_reco_master(dlm, dlm->node_num); 2614 spin_unlock(&dlm->spinlock); 2615 } 2616 2617 /* recovery lock is a special case. ast will not get fired, 2618 * so just go ahead and unlock it. */ 2619 ret = dlmunlock(dlm, &lksb, 0, dlm_reco_unlock_ast, dlm); 2620 if (ret == DLM_DENIED) { 2621 mlog(0, "got DLM_DENIED, trying LKM_CANCEL\n"); 2622 ret = dlmunlock(dlm, &lksb, LKM_CANCEL, dlm_reco_unlock_ast, dlm); 2623 } 2624 if (ret != DLM_NORMAL) { 2625 /* this would really suck. this could only happen 2626 * if there was a network error during the unlock 2627 * because of node death. this means the unlock 2628 * is actually "done" and the lock structure is 2629 * even freed. we can continue, but only 2630 * because this specific lock name is special. */ 2631 mlog(ML_ERROR, "dlmunlock returned %d\n", ret); 2632 } 2633 } else if (ret == DLM_NOTQUEUED) { 2634 mlog(0, "dlm=%s dlmlock says another node got it (this=%u)\n", 2635 dlm->name, dlm->node_num); 2636 /* another node is master. wait on 2637 * reco.new_master != O2NM_INVALID_NODE_NUM 2638 * for at most one second */ 2639 wait_event_timeout(dlm->dlm_reco_thread_wq, 2640 dlm_reco_master_ready(dlm), 2641 msecs_to_jiffies(1000)); 2642 if (!dlm_reco_master_ready(dlm)) { 2643 mlog(0, "%s: reco master taking awhile\n", 2644 dlm->name); 2645 goto again; 2646 } 2647 /* another node has informed this one that it is reco master */ 2648 mlog(0, "%s: reco master %u is ready to recover %u\n", 2649 dlm->name, dlm->reco.new_master, dlm->reco.dead_node); 2650 status = -EEXIST; 2651 } else if (ret == DLM_RECOVERING) { 2652 mlog(0, "dlm=%s dlmlock says master node died (this=%u)\n", 2653 dlm->name, dlm->node_num); 2654 goto again; 2655 } else { 2656 struct dlm_lock_resource *res; 2657 2658 /* dlmlock returned something other than NOTQUEUED or NORMAL */ 2659 mlog(ML_ERROR, "%s: got %s from dlmlock($RECOVERY), " 2660 "lksb.status=%s\n", dlm->name, dlm_errname(ret), 2661 dlm_errname(lksb.status)); 2662 res = dlm_lookup_lockres(dlm, DLM_RECOVERY_LOCK_NAME, 2663 DLM_RECOVERY_LOCK_NAME_LEN); 2664 if (res) { 2665 dlm_print_one_lock_resource(res); 2666 dlm_lockres_put(res); 2667 } else { 2668 mlog(ML_ERROR, "recovery lock not found\n"); 2669 } 2670 BUG(); 2671 } 2672 2673 return status; 2674 } 2675 2676 static int dlm_send_begin_reco_message(struct dlm_ctxt *dlm, u8 dead_node) 2677 { 2678 struct dlm_begin_reco br; 2679 int ret = 0; 2680 struct dlm_node_iter iter; 2681 int nodenum; 2682 int status; 2683 2684 mlog(0, "%s: dead node is %u\n", dlm->name, dead_node); 2685 2686 spin_lock(&dlm->spinlock); 2687 dlm_node_iter_init(dlm->domain_map, &iter); 2688 spin_unlock(&dlm->spinlock); 2689 2690 clear_bit(dead_node, iter.node_map); 2691 2692 memset(&br, 0, sizeof(br)); 2693 br.node_idx = dlm->node_num; 2694 br.dead_node = dead_node; 2695 2696 while ((nodenum = dlm_node_iter_next(&iter)) >= 0) { 2697 ret = 0; 2698 if (nodenum == dead_node) { 2699 mlog(0, "not sending begin reco to dead node " 2700 "%u\n", dead_node); 2701 continue; 2702 } 2703 if (nodenum == dlm->node_num) { 2704 mlog(0, "not sending begin reco to self\n"); 2705 continue; 2706 } 2707 retry: 2708 ret = -EINVAL; 2709 mlog(0, "attempting to send begin reco msg to %d\n", 2710 nodenum); 2711 ret = o2net_send_message(DLM_BEGIN_RECO_MSG, dlm->key, 2712 &br, sizeof(br), nodenum, &status); 2713 /* negative status is handled ok by caller here */ 2714 if (ret >= 0) 2715 ret = status; 2716 if (dlm_is_host_down(ret)) { 2717 /* node is down. not involved in recovery 2718 * so just keep going */ 2719 mlog(ML_NOTICE, "%s: node %u was down when sending " 2720 "begin reco msg (%d)\n", dlm->name, nodenum, ret); 2721 ret = 0; 2722 } 2723 2724 /* 2725 * Prior to commit aad1b15310b9bcd59fa81ab8f2b1513b59553ea8, 2726 * dlm_begin_reco_handler() returned EAGAIN and not -EAGAIN. 2727 * We are handling both for compatibility reasons. 2728 */ 2729 if (ret == -EAGAIN || ret == EAGAIN) { 2730 mlog(0, "%s: trying to start recovery of node " 2731 "%u, but node %u is waiting for last recovery " 2732 "to complete, backoff for a bit\n", dlm->name, 2733 dead_node, nodenum); 2734 msleep(100); 2735 goto retry; 2736 } 2737 if (ret < 0) { 2738 struct dlm_lock_resource *res; 2739 2740 /* this is now a serious problem, possibly ENOMEM 2741 * in the network stack. must retry */ 2742 mlog_errno(ret); 2743 mlog(ML_ERROR, "begin reco of dlm %s to node %u " 2744 "returned %d\n", dlm->name, nodenum, ret); 2745 res = dlm_lookup_lockres(dlm, DLM_RECOVERY_LOCK_NAME, 2746 DLM_RECOVERY_LOCK_NAME_LEN); 2747 if (res) { 2748 dlm_print_one_lock_resource(res); 2749 dlm_lockres_put(res); 2750 } else { 2751 mlog(ML_ERROR, "recovery lock not found\n"); 2752 } 2753 /* sleep for a bit in hopes that we can avoid 2754 * another ENOMEM */ 2755 msleep(100); 2756 goto retry; 2757 } 2758 } 2759 2760 return ret; 2761 } 2762 2763 int dlm_begin_reco_handler(struct o2net_msg *msg, u32 len, void *data, 2764 void **ret_data) 2765 { 2766 struct dlm_ctxt *dlm = data; 2767 struct dlm_begin_reco *br = (struct dlm_begin_reco *)msg->buf; 2768 2769 /* ok to return 0, domain has gone away */ 2770 if (!dlm_grab(dlm)) 2771 return 0; 2772 2773 spin_lock(&dlm->spinlock); 2774 if (dlm->reco.state & DLM_RECO_STATE_FINALIZE) { 2775 mlog(0, "%s: node %u wants to recover node %u (%u:%u) " 2776 "but this node is in finalize state, waiting on finalize2\n", 2777 dlm->name, br->node_idx, br->dead_node, 2778 dlm->reco.dead_node, dlm->reco.new_master); 2779 spin_unlock(&dlm->spinlock); 2780 dlm_put(dlm); 2781 return -EAGAIN; 2782 } 2783 spin_unlock(&dlm->spinlock); 2784 2785 mlog(0, "%s: node %u wants to recover node %u (%u:%u)\n", 2786 dlm->name, br->node_idx, br->dead_node, 2787 dlm->reco.dead_node, dlm->reco.new_master); 2788 2789 dlm_fire_domain_eviction_callbacks(dlm, br->dead_node); 2790 2791 spin_lock(&dlm->spinlock); 2792 if (dlm->reco.new_master != O2NM_INVALID_NODE_NUM) { 2793 if (test_bit(dlm->reco.new_master, dlm->recovery_map)) { 2794 mlog(0, "%s: new_master %u died, changing " 2795 "to %u\n", dlm->name, dlm->reco.new_master, 2796 br->node_idx); 2797 } else { 2798 mlog(0, "%s: new_master %u NOT DEAD, changing " 2799 "to %u\n", dlm->name, dlm->reco.new_master, 2800 br->node_idx); 2801 /* may not have seen the new master as dead yet */ 2802 } 2803 } 2804 if (dlm->reco.dead_node != O2NM_INVALID_NODE_NUM) { 2805 mlog(ML_NOTICE, "%s: dead_node previously set to %u, " 2806 "node %u changing it to %u\n", dlm->name, 2807 dlm->reco.dead_node, br->node_idx, br->dead_node); 2808 } 2809 dlm_set_reco_master(dlm, br->node_idx); 2810 dlm_set_reco_dead_node(dlm, br->dead_node); 2811 if (!test_bit(br->dead_node, dlm->recovery_map)) { 2812 mlog(0, "recovery master %u sees %u as dead, but this " 2813 "node has not yet. marking %u as dead\n", 2814 br->node_idx, br->dead_node, br->dead_node); 2815 if (!test_bit(br->dead_node, dlm->domain_map) || 2816 !test_bit(br->dead_node, dlm->live_nodes_map)) 2817 mlog(0, "%u not in domain/live_nodes map " 2818 "so setting it in reco map manually\n", 2819 br->dead_node); 2820 /* force the recovery cleanup in __dlm_hb_node_down 2821 * both of these will be cleared in a moment */ 2822 set_bit(br->dead_node, dlm->domain_map); 2823 set_bit(br->dead_node, dlm->live_nodes_map); 2824 __dlm_hb_node_down(dlm, br->dead_node); 2825 } 2826 spin_unlock(&dlm->spinlock); 2827 2828 dlm_kick_recovery_thread(dlm); 2829 2830 mlog(0, "%s: recovery started by node %u, for %u (%u:%u)\n", 2831 dlm->name, br->node_idx, br->dead_node, 2832 dlm->reco.dead_node, dlm->reco.new_master); 2833 2834 dlm_put(dlm); 2835 return 0; 2836 } 2837 2838 #define DLM_FINALIZE_STAGE2 0x01 2839 static int dlm_send_finalize_reco_message(struct dlm_ctxt *dlm) 2840 { 2841 int ret = 0; 2842 struct dlm_finalize_reco fr; 2843 struct dlm_node_iter iter; 2844 int nodenum; 2845 int status; 2846 int stage = 1; 2847 2848 mlog(0, "finishing recovery for node %s:%u, " 2849 "stage %d\n", dlm->name, dlm->reco.dead_node, stage); 2850 2851 spin_lock(&dlm->spinlock); 2852 dlm_node_iter_init(dlm->domain_map, &iter); 2853 spin_unlock(&dlm->spinlock); 2854 2855 stage2: 2856 memset(&fr, 0, sizeof(fr)); 2857 fr.node_idx = dlm->node_num; 2858 fr.dead_node = dlm->reco.dead_node; 2859 if (stage == 2) 2860 fr.flags |= DLM_FINALIZE_STAGE2; 2861 2862 while ((nodenum = dlm_node_iter_next(&iter)) >= 0) { 2863 if (nodenum == dlm->node_num) 2864 continue; 2865 ret = o2net_send_message(DLM_FINALIZE_RECO_MSG, dlm->key, 2866 &fr, sizeof(fr), nodenum, &status); 2867 if (ret >= 0) 2868 ret = status; 2869 if (ret < 0) { 2870 mlog(ML_ERROR, "Error %d when sending message %u (key " 2871 "0x%x) to node %u\n", ret, DLM_FINALIZE_RECO_MSG, 2872 dlm->key, nodenum); 2873 if (dlm_is_host_down(ret)) { 2874 /* this has no effect on this recovery 2875 * session, so set the status to zero to 2876 * finish out the last recovery */ 2877 mlog(ML_ERROR, "node %u went down after this " 2878 "node finished recovery.\n", nodenum); 2879 ret = 0; 2880 continue; 2881 } 2882 break; 2883 } 2884 } 2885 if (stage == 1) { 2886 /* reset the node_iter back to the top and send finalize2 */ 2887 iter.curnode = -1; 2888 stage = 2; 2889 goto stage2; 2890 } 2891 2892 return ret; 2893 } 2894 2895 int dlm_finalize_reco_handler(struct o2net_msg *msg, u32 len, void *data, 2896 void **ret_data) 2897 { 2898 struct dlm_ctxt *dlm = data; 2899 struct dlm_finalize_reco *fr = (struct dlm_finalize_reco *)msg->buf; 2900 int stage = 1; 2901 2902 /* ok to return 0, domain has gone away */ 2903 if (!dlm_grab(dlm)) 2904 return 0; 2905 2906 if (fr->flags & DLM_FINALIZE_STAGE2) 2907 stage = 2; 2908 2909 mlog(0, "%s: node %u finalizing recovery stage%d of " 2910 "node %u (%u:%u)\n", dlm->name, fr->node_idx, stage, 2911 fr->dead_node, dlm->reco.dead_node, dlm->reco.new_master); 2912 2913 spin_lock(&dlm->spinlock); 2914 2915 if (dlm->reco.new_master != fr->node_idx) { 2916 mlog(ML_ERROR, "node %u sent recovery finalize msg, but node " 2917 "%u is supposed to be the new master, dead=%u\n", 2918 fr->node_idx, dlm->reco.new_master, fr->dead_node); 2919 BUG(); 2920 } 2921 if (dlm->reco.dead_node != fr->dead_node) { 2922 mlog(ML_ERROR, "node %u sent recovery finalize msg for dead " 2923 "node %u, but node %u is supposed to be dead\n", 2924 fr->node_idx, fr->dead_node, dlm->reco.dead_node); 2925 BUG(); 2926 } 2927 2928 switch (stage) { 2929 case 1: 2930 dlm_finish_local_lockres_recovery(dlm, fr->dead_node, fr->node_idx); 2931 if (dlm->reco.state & DLM_RECO_STATE_FINALIZE) { 2932 mlog(ML_ERROR, "%s: received finalize1 from " 2933 "new master %u for dead node %u, but " 2934 "this node has already received it!\n", 2935 dlm->name, fr->node_idx, fr->dead_node); 2936 dlm_print_reco_node_status(dlm); 2937 BUG(); 2938 } 2939 dlm->reco.state |= DLM_RECO_STATE_FINALIZE; 2940 spin_unlock(&dlm->spinlock); 2941 break; 2942 case 2: 2943 if (!(dlm->reco.state & DLM_RECO_STATE_FINALIZE)) { 2944 mlog(ML_ERROR, "%s: received finalize2 from " 2945 "new master %u for dead node %u, but " 2946 "this node did not have finalize1!\n", 2947 dlm->name, fr->node_idx, fr->dead_node); 2948 dlm_print_reco_node_status(dlm); 2949 BUG(); 2950 } 2951 dlm->reco.state &= ~DLM_RECO_STATE_FINALIZE; 2952 __dlm_reset_recovery(dlm); 2953 spin_unlock(&dlm->spinlock); 2954 dlm_kick_recovery_thread(dlm); 2955 break; 2956 default: 2957 BUG(); 2958 } 2959 2960 mlog(0, "%s: recovery done, reco master was %u, dead now %u, master now %u\n", 2961 dlm->name, fr->node_idx, dlm->reco.dead_node, dlm->reco.new_master); 2962 2963 dlm_put(dlm); 2964 return 0; 2965 } 2966