1 /* -*- mode: c; c-basic-offset: 8; -*- 2 * vim: noexpandtab sw=8 ts=8 sts=0: 3 * 4 * dlmrecovery.c 5 * 6 * recovery stuff 7 * 8 * Copyright (C) 2004 Oracle. All rights reserved. 9 * 10 * This program is free software; you can redistribute it and/or 11 * modify it under the terms of the GNU General Public 12 * License as published by the Free Software Foundation; either 13 * version 2 of the License, or (at your option) any later version. 14 * 15 * This program is distributed in the hope that it will be useful, 16 * but WITHOUT ANY WARRANTY; without even the implied warranty of 17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 18 * General Public License for more details. 19 * 20 * You should have received a copy of the GNU General Public 21 * License along with this program; if not, write to the 22 * Free Software Foundation, Inc., 59 Temple Place - Suite 330, 23 * Boston, MA 021110-1307, USA. 24 * 25 */ 26 27 28 #include <linux/module.h> 29 #include <linux/fs.h> 30 #include <linux/types.h> 31 #include <linux/slab.h> 32 #include <linux/highmem.h> 33 #include <linux/init.h> 34 #include <linux/sysctl.h> 35 #include <linux/random.h> 36 #include <linux/blkdev.h> 37 #include <linux/socket.h> 38 #include <linux/inet.h> 39 #include <linux/timer.h> 40 #include <linux/kthread.h> 41 #include <linux/delay.h> 42 43 44 #include "cluster/heartbeat.h" 45 #include "cluster/nodemanager.h" 46 #include "cluster/tcp.h" 47 48 #include "dlmapi.h" 49 #include "dlmcommon.h" 50 #include "dlmdomain.h" 51 52 #define MLOG_MASK_PREFIX (ML_DLM|ML_DLM_RECOVERY) 53 #include "cluster/masklog.h" 54 55 static void dlm_do_local_recovery_cleanup(struct dlm_ctxt *dlm, u8 dead_node); 56 57 static int dlm_recovery_thread(void *data); 58 static int dlm_do_recovery(struct dlm_ctxt *dlm); 59 60 static int dlm_pick_recovery_master(struct dlm_ctxt *dlm); 61 static int dlm_remaster_locks(struct dlm_ctxt *dlm, u8 dead_node); 62 static int dlm_init_recovery_area(struct dlm_ctxt *dlm, u8 dead_node); 63 static int dlm_request_all_locks(struct dlm_ctxt *dlm, 64 u8 request_from, u8 dead_node); 65 static void dlm_destroy_recovery_area(struct dlm_ctxt *dlm, u8 dead_node); 66 67 static inline int dlm_num_locks_in_lockres(struct dlm_lock_resource *res); 68 static void dlm_init_migratable_lockres(struct dlm_migratable_lockres *mres, 69 const char *lockname, int namelen, 70 int total_locks, u64 cookie, 71 u8 flags, u8 master); 72 static int dlm_send_mig_lockres_msg(struct dlm_ctxt *dlm, 73 struct dlm_migratable_lockres *mres, 74 u8 send_to, 75 struct dlm_lock_resource *res, 76 int total_locks); 77 static int dlm_process_recovery_data(struct dlm_ctxt *dlm, 78 struct dlm_lock_resource *res, 79 struct dlm_migratable_lockres *mres); 80 static int dlm_send_finalize_reco_message(struct dlm_ctxt *dlm); 81 static int dlm_send_all_done_msg(struct dlm_ctxt *dlm, 82 u8 dead_node, u8 send_to); 83 static int dlm_send_begin_reco_message(struct dlm_ctxt *dlm, u8 dead_node); 84 static void dlm_move_reco_locks_to_list(struct dlm_ctxt *dlm, 85 struct list_head *list, u8 dead_node); 86 static void dlm_finish_local_lockres_recovery(struct dlm_ctxt *dlm, 87 u8 dead_node, u8 new_master); 88 static void dlm_reco_ast(void *astdata); 89 static void dlm_reco_bast(void *astdata, int blocked_type); 90 static void dlm_reco_unlock_ast(void *astdata, enum dlm_status st); 91 static void dlm_request_all_locks_worker(struct dlm_work_item *item, 92 void *data); 93 static void dlm_mig_lockres_worker(struct dlm_work_item *item, void *data); 94 static int dlm_lockres_master_requery(struct dlm_ctxt *dlm, 95 struct dlm_lock_resource *res, 96 u8 *real_master); 97 98 static u64 dlm_get_next_mig_cookie(void); 99 100 static DEFINE_SPINLOCK(dlm_reco_state_lock); 101 static DEFINE_SPINLOCK(dlm_mig_cookie_lock); 102 static u64 dlm_mig_cookie = 1; 103 104 static u64 dlm_get_next_mig_cookie(void) 105 { 106 u64 c; 107 spin_lock(&dlm_mig_cookie_lock); 108 c = dlm_mig_cookie; 109 if (dlm_mig_cookie == (~0ULL)) 110 dlm_mig_cookie = 1; 111 else 112 dlm_mig_cookie++; 113 spin_unlock(&dlm_mig_cookie_lock); 114 return c; 115 } 116 117 static inline void dlm_set_reco_dead_node(struct dlm_ctxt *dlm, 118 u8 dead_node) 119 { 120 assert_spin_locked(&dlm->spinlock); 121 if (dlm->reco.dead_node != dead_node) 122 mlog(0, "%s: changing dead_node from %u to %u\n", 123 dlm->name, dlm->reco.dead_node, dead_node); 124 dlm->reco.dead_node = dead_node; 125 } 126 127 static inline void dlm_set_reco_master(struct dlm_ctxt *dlm, 128 u8 master) 129 { 130 assert_spin_locked(&dlm->spinlock); 131 mlog(0, "%s: changing new_master from %u to %u\n", 132 dlm->name, dlm->reco.new_master, master); 133 dlm->reco.new_master = master; 134 } 135 136 static inline void __dlm_reset_recovery(struct dlm_ctxt *dlm) 137 { 138 assert_spin_locked(&dlm->spinlock); 139 clear_bit(dlm->reco.dead_node, dlm->recovery_map); 140 dlm_set_reco_dead_node(dlm, O2NM_INVALID_NODE_NUM); 141 dlm_set_reco_master(dlm, O2NM_INVALID_NODE_NUM); 142 } 143 144 static inline void dlm_reset_recovery(struct dlm_ctxt *dlm) 145 { 146 spin_lock(&dlm->spinlock); 147 __dlm_reset_recovery(dlm); 148 spin_unlock(&dlm->spinlock); 149 } 150 151 /* Worker function used during recovery. */ 152 void dlm_dispatch_work(struct work_struct *work) 153 { 154 struct dlm_ctxt *dlm = 155 container_of(work, struct dlm_ctxt, dispatched_work); 156 LIST_HEAD(tmp_list); 157 struct dlm_work_item *item, *next; 158 dlm_workfunc_t *workfunc; 159 int tot=0; 160 161 spin_lock(&dlm->work_lock); 162 list_splice_init(&dlm->work_list, &tmp_list); 163 spin_unlock(&dlm->work_lock); 164 165 list_for_each_entry(item, &tmp_list, list) { 166 tot++; 167 } 168 mlog(0, "%s: work thread has %d work items\n", dlm->name, tot); 169 170 list_for_each_entry_safe(item, next, &tmp_list, list) { 171 workfunc = item->func; 172 list_del_init(&item->list); 173 174 /* already have ref on dlm to avoid having 175 * it disappear. just double-check. */ 176 BUG_ON(item->dlm != dlm); 177 178 /* this is allowed to sleep and 179 * call network stuff */ 180 workfunc(item, item->data); 181 182 dlm_put(dlm); 183 kfree(item); 184 } 185 } 186 187 /* 188 * RECOVERY THREAD 189 */ 190 191 void dlm_kick_recovery_thread(struct dlm_ctxt *dlm) 192 { 193 /* wake the recovery thread 194 * this will wake the reco thread in one of three places 195 * 1) sleeping with no recovery happening 196 * 2) sleeping with recovery mastered elsewhere 197 * 3) recovery mastered here, waiting on reco data */ 198 199 wake_up(&dlm->dlm_reco_thread_wq); 200 } 201 202 /* Launch the recovery thread */ 203 int dlm_launch_recovery_thread(struct dlm_ctxt *dlm) 204 { 205 mlog(0, "starting dlm recovery thread...\n"); 206 207 dlm->dlm_reco_thread_task = kthread_run(dlm_recovery_thread, dlm, 208 "dlm_reco_thread"); 209 if (IS_ERR(dlm->dlm_reco_thread_task)) { 210 mlog_errno(PTR_ERR(dlm->dlm_reco_thread_task)); 211 dlm->dlm_reco_thread_task = NULL; 212 return -EINVAL; 213 } 214 215 return 0; 216 } 217 218 void dlm_complete_recovery_thread(struct dlm_ctxt *dlm) 219 { 220 if (dlm->dlm_reco_thread_task) { 221 mlog(0, "waiting for dlm recovery thread to exit\n"); 222 kthread_stop(dlm->dlm_reco_thread_task); 223 dlm->dlm_reco_thread_task = NULL; 224 } 225 } 226 227 228 229 /* 230 * this is lame, but here's how recovery works... 231 * 1) all recovery threads cluster wide will work on recovering 232 * ONE node at a time 233 * 2) negotiate who will take over all the locks for the dead node. 234 * thats right... ALL the locks. 235 * 3) once a new master is chosen, everyone scans all locks 236 * and moves aside those mastered by the dead guy 237 * 4) each of these locks should be locked until recovery is done 238 * 5) the new master collects up all of secondary lock queue info 239 * one lock at a time, forcing each node to communicate back 240 * before continuing 241 * 6) each secondary lock queue responds with the full known lock info 242 * 7) once the new master has run all its locks, it sends a ALLDONE! 243 * message to everyone 244 * 8) upon receiving this message, the secondary queue node unlocks 245 * and responds to the ALLDONE 246 * 9) once the new master gets responses from everyone, he unlocks 247 * everything and recovery for this dead node is done 248 *10) go back to 2) while there are still dead nodes 249 * 250 */ 251 252 static void dlm_print_reco_node_status(struct dlm_ctxt *dlm) 253 { 254 struct dlm_reco_node_data *ndata; 255 struct dlm_lock_resource *res; 256 257 mlog(ML_NOTICE, "%s(%d): recovery info, state=%s, dead=%u, master=%u\n", 258 dlm->name, task_pid_nr(dlm->dlm_reco_thread_task), 259 dlm->reco.state & DLM_RECO_STATE_ACTIVE ? "ACTIVE" : "inactive", 260 dlm->reco.dead_node, dlm->reco.new_master); 261 262 list_for_each_entry(ndata, &dlm->reco.node_data, list) { 263 char *st = "unknown"; 264 switch (ndata->state) { 265 case DLM_RECO_NODE_DATA_INIT: 266 st = "init"; 267 break; 268 case DLM_RECO_NODE_DATA_REQUESTING: 269 st = "requesting"; 270 break; 271 case DLM_RECO_NODE_DATA_DEAD: 272 st = "dead"; 273 break; 274 case DLM_RECO_NODE_DATA_RECEIVING: 275 st = "receiving"; 276 break; 277 case DLM_RECO_NODE_DATA_REQUESTED: 278 st = "requested"; 279 break; 280 case DLM_RECO_NODE_DATA_DONE: 281 st = "done"; 282 break; 283 case DLM_RECO_NODE_DATA_FINALIZE_SENT: 284 st = "finalize-sent"; 285 break; 286 default: 287 st = "bad"; 288 break; 289 } 290 mlog(ML_NOTICE, "%s: reco state, node %u, state=%s\n", 291 dlm->name, ndata->node_num, st); 292 } 293 list_for_each_entry(res, &dlm->reco.resources, recovering) { 294 mlog(ML_NOTICE, "%s: lockres %.*s on recovering list\n", 295 dlm->name, res->lockname.len, res->lockname.name); 296 } 297 } 298 299 #define DLM_RECO_THREAD_TIMEOUT_MS (5 * 1000) 300 301 static int dlm_recovery_thread(void *data) 302 { 303 int status; 304 struct dlm_ctxt *dlm = data; 305 unsigned long timeout = msecs_to_jiffies(DLM_RECO_THREAD_TIMEOUT_MS); 306 307 mlog(0, "dlm thread running for %s...\n", dlm->name); 308 309 while (!kthread_should_stop()) { 310 if (dlm_domain_fully_joined(dlm)) { 311 status = dlm_do_recovery(dlm); 312 if (status == -EAGAIN) { 313 /* do not sleep, recheck immediately. */ 314 continue; 315 } 316 if (status < 0) 317 mlog_errno(status); 318 } 319 320 wait_event_interruptible_timeout(dlm->dlm_reco_thread_wq, 321 kthread_should_stop(), 322 timeout); 323 } 324 325 mlog(0, "quitting DLM recovery thread\n"); 326 return 0; 327 } 328 329 /* returns true when the recovery master has contacted us */ 330 static int dlm_reco_master_ready(struct dlm_ctxt *dlm) 331 { 332 int ready; 333 spin_lock(&dlm->spinlock); 334 ready = (dlm->reco.new_master != O2NM_INVALID_NODE_NUM); 335 spin_unlock(&dlm->spinlock); 336 return ready; 337 } 338 339 /* returns true if node is no longer in the domain 340 * could be dead or just not joined */ 341 int dlm_is_node_dead(struct dlm_ctxt *dlm, u8 node) 342 { 343 int dead; 344 spin_lock(&dlm->spinlock); 345 dead = !test_bit(node, dlm->domain_map); 346 spin_unlock(&dlm->spinlock); 347 return dead; 348 } 349 350 /* returns true if node is no longer in the domain 351 * could be dead or just not joined */ 352 static int dlm_is_node_recovered(struct dlm_ctxt *dlm, u8 node) 353 { 354 int recovered; 355 spin_lock(&dlm->spinlock); 356 recovered = !test_bit(node, dlm->recovery_map); 357 spin_unlock(&dlm->spinlock); 358 return recovered; 359 } 360 361 362 void dlm_wait_for_node_death(struct dlm_ctxt *dlm, u8 node, int timeout) 363 { 364 if (dlm_is_node_dead(dlm, node)) 365 return; 366 367 printk(KERN_NOTICE "o2dlm: Waiting on the death of node %u in " 368 "domain %s\n", node, dlm->name); 369 370 if (timeout) 371 wait_event_timeout(dlm->dlm_reco_thread_wq, 372 dlm_is_node_dead(dlm, node), 373 msecs_to_jiffies(timeout)); 374 else 375 wait_event(dlm->dlm_reco_thread_wq, 376 dlm_is_node_dead(dlm, node)); 377 } 378 379 void dlm_wait_for_node_recovery(struct dlm_ctxt *dlm, u8 node, int timeout) 380 { 381 if (dlm_is_node_recovered(dlm, node)) 382 return; 383 384 printk(KERN_NOTICE "o2dlm: Waiting on the recovery of node %u in " 385 "domain %s\n", node, dlm->name); 386 387 if (timeout) 388 wait_event_timeout(dlm->dlm_reco_thread_wq, 389 dlm_is_node_recovered(dlm, node), 390 msecs_to_jiffies(timeout)); 391 else 392 wait_event(dlm->dlm_reco_thread_wq, 393 dlm_is_node_recovered(dlm, node)); 394 } 395 396 /* callers of the top-level api calls (dlmlock/dlmunlock) should 397 * block on the dlm->reco.event when recovery is in progress. 398 * the dlm recovery thread will set this state when it begins 399 * recovering a dead node (as the new master or not) and clear 400 * the state and wake as soon as all affected lock resources have 401 * been marked with the RECOVERY flag */ 402 static int dlm_in_recovery(struct dlm_ctxt *dlm) 403 { 404 int in_recovery; 405 spin_lock(&dlm->spinlock); 406 in_recovery = !!(dlm->reco.state & DLM_RECO_STATE_ACTIVE); 407 spin_unlock(&dlm->spinlock); 408 return in_recovery; 409 } 410 411 412 void dlm_wait_for_recovery(struct dlm_ctxt *dlm) 413 { 414 if (dlm_in_recovery(dlm)) { 415 mlog(0, "%s: reco thread %d in recovery: " 416 "state=%d, master=%u, dead=%u\n", 417 dlm->name, task_pid_nr(dlm->dlm_reco_thread_task), 418 dlm->reco.state, dlm->reco.new_master, 419 dlm->reco.dead_node); 420 } 421 wait_event(dlm->reco.event, !dlm_in_recovery(dlm)); 422 } 423 424 static void dlm_begin_recovery(struct dlm_ctxt *dlm) 425 { 426 spin_lock(&dlm->spinlock); 427 BUG_ON(dlm->reco.state & DLM_RECO_STATE_ACTIVE); 428 printk(KERN_NOTICE "o2dlm: Begin recovery on domain %s for node %u\n", 429 dlm->name, dlm->reco.dead_node); 430 dlm->reco.state |= DLM_RECO_STATE_ACTIVE; 431 spin_unlock(&dlm->spinlock); 432 } 433 434 static void dlm_end_recovery(struct dlm_ctxt *dlm) 435 { 436 spin_lock(&dlm->spinlock); 437 BUG_ON(!(dlm->reco.state & DLM_RECO_STATE_ACTIVE)); 438 dlm->reco.state &= ~DLM_RECO_STATE_ACTIVE; 439 spin_unlock(&dlm->spinlock); 440 printk(KERN_NOTICE "o2dlm: End recovery on domain %s\n", dlm->name); 441 wake_up(&dlm->reco.event); 442 } 443 444 static void dlm_print_recovery_master(struct dlm_ctxt *dlm) 445 { 446 printk(KERN_NOTICE "o2dlm: Node %u (%s) is the Recovery Master for the " 447 "dead node %u in domain %s\n", dlm->reco.new_master, 448 (dlm->node_num == dlm->reco.new_master ? "me" : "he"), 449 dlm->reco.dead_node, dlm->name); 450 } 451 452 static int dlm_do_recovery(struct dlm_ctxt *dlm) 453 { 454 int status = 0; 455 int ret; 456 457 spin_lock(&dlm->spinlock); 458 459 /* check to see if the new master has died */ 460 if (dlm->reco.new_master != O2NM_INVALID_NODE_NUM && 461 test_bit(dlm->reco.new_master, dlm->recovery_map)) { 462 mlog(0, "new master %u died while recovering %u!\n", 463 dlm->reco.new_master, dlm->reco.dead_node); 464 /* unset the new_master, leave dead_node */ 465 dlm_set_reco_master(dlm, O2NM_INVALID_NODE_NUM); 466 } 467 468 /* select a target to recover */ 469 if (dlm->reco.dead_node == O2NM_INVALID_NODE_NUM) { 470 int bit; 471 472 bit = find_next_bit (dlm->recovery_map, O2NM_MAX_NODES, 0); 473 if (bit >= O2NM_MAX_NODES || bit < 0) 474 dlm_set_reco_dead_node(dlm, O2NM_INVALID_NODE_NUM); 475 else 476 dlm_set_reco_dead_node(dlm, bit); 477 } else if (!test_bit(dlm->reco.dead_node, dlm->recovery_map)) { 478 /* BUG? */ 479 mlog(ML_ERROR, "dead_node %u no longer in recovery map!\n", 480 dlm->reco.dead_node); 481 dlm_set_reco_dead_node(dlm, O2NM_INVALID_NODE_NUM); 482 } 483 484 if (dlm->reco.dead_node == O2NM_INVALID_NODE_NUM) { 485 // mlog(0, "nothing to recover! sleeping now!\n"); 486 spin_unlock(&dlm->spinlock); 487 /* return to main thread loop and sleep. */ 488 return 0; 489 } 490 mlog(0, "%s(%d):recovery thread found node %u in the recovery map!\n", 491 dlm->name, task_pid_nr(dlm->dlm_reco_thread_task), 492 dlm->reco.dead_node); 493 spin_unlock(&dlm->spinlock); 494 495 /* take write barrier */ 496 /* (stops the list reshuffling thread, proxy ast handling) */ 497 dlm_begin_recovery(dlm); 498 499 if (dlm->reco.new_master == dlm->node_num) 500 goto master_here; 501 502 if (dlm->reco.new_master == O2NM_INVALID_NODE_NUM) { 503 /* choose a new master, returns 0 if this node 504 * is the master, -EEXIST if it's another node. 505 * this does not return until a new master is chosen 506 * or recovery completes entirely. */ 507 ret = dlm_pick_recovery_master(dlm); 508 if (!ret) { 509 /* already notified everyone. go. */ 510 goto master_here; 511 } 512 mlog(0, "another node will master this recovery session.\n"); 513 } 514 515 dlm_print_recovery_master(dlm); 516 517 /* it is safe to start everything back up here 518 * because all of the dead node's lock resources 519 * have been marked as in-recovery */ 520 dlm_end_recovery(dlm); 521 522 /* sleep out in main dlm_recovery_thread loop. */ 523 return 0; 524 525 master_here: 526 dlm_print_recovery_master(dlm); 527 528 status = dlm_remaster_locks(dlm, dlm->reco.dead_node); 529 if (status < 0) { 530 /* we should never hit this anymore */ 531 mlog(ML_ERROR, "%s: Error %d remastering locks for node %u, " 532 "retrying.\n", dlm->name, status, dlm->reco.dead_node); 533 /* yield a bit to allow any final network messages 534 * to get handled on remaining nodes */ 535 msleep(100); 536 } else { 537 /* success! see if any other nodes need recovery */ 538 mlog(0, "DONE mastering recovery of %s:%u here(this=%u)!\n", 539 dlm->name, dlm->reco.dead_node, dlm->node_num); 540 dlm_reset_recovery(dlm); 541 } 542 dlm_end_recovery(dlm); 543 544 /* continue and look for another dead node */ 545 return -EAGAIN; 546 } 547 548 static int dlm_remaster_locks(struct dlm_ctxt *dlm, u8 dead_node) 549 { 550 int status = 0; 551 struct dlm_reco_node_data *ndata; 552 int all_nodes_done; 553 int destroy = 0; 554 int pass = 0; 555 556 do { 557 /* we have become recovery master. there is no escaping 558 * this, so just keep trying until we get it. */ 559 status = dlm_init_recovery_area(dlm, dead_node); 560 if (status < 0) { 561 mlog(ML_ERROR, "%s: failed to alloc recovery area, " 562 "retrying\n", dlm->name); 563 msleep(1000); 564 } 565 } while (status != 0); 566 567 /* safe to access the node data list without a lock, since this 568 * process is the only one to change the list */ 569 list_for_each_entry(ndata, &dlm->reco.node_data, list) { 570 BUG_ON(ndata->state != DLM_RECO_NODE_DATA_INIT); 571 ndata->state = DLM_RECO_NODE_DATA_REQUESTING; 572 573 mlog(0, "%s: Requesting lock info from node %u\n", dlm->name, 574 ndata->node_num); 575 576 if (ndata->node_num == dlm->node_num) { 577 ndata->state = DLM_RECO_NODE_DATA_DONE; 578 continue; 579 } 580 581 do { 582 status = dlm_request_all_locks(dlm, ndata->node_num, 583 dead_node); 584 if (status < 0) { 585 mlog_errno(status); 586 if (dlm_is_host_down(status)) { 587 /* node died, ignore it for recovery */ 588 status = 0; 589 ndata->state = DLM_RECO_NODE_DATA_DEAD; 590 /* wait for the domain map to catch up 591 * with the network state. */ 592 wait_event_timeout(dlm->dlm_reco_thread_wq, 593 dlm_is_node_dead(dlm, 594 ndata->node_num), 595 msecs_to_jiffies(1000)); 596 mlog(0, "waited 1 sec for %u, " 597 "dead? %s\n", ndata->node_num, 598 dlm_is_node_dead(dlm, ndata->node_num) ? 599 "yes" : "no"); 600 } else { 601 /* -ENOMEM on the other node */ 602 mlog(0, "%s: node %u returned " 603 "%d during recovery, retrying " 604 "after a short wait\n", 605 dlm->name, ndata->node_num, 606 status); 607 msleep(100); 608 } 609 } 610 } while (status != 0); 611 612 spin_lock(&dlm_reco_state_lock); 613 switch (ndata->state) { 614 case DLM_RECO_NODE_DATA_INIT: 615 case DLM_RECO_NODE_DATA_FINALIZE_SENT: 616 case DLM_RECO_NODE_DATA_REQUESTED: 617 BUG(); 618 break; 619 case DLM_RECO_NODE_DATA_DEAD: 620 mlog(0, "node %u died after requesting " 621 "recovery info for node %u\n", 622 ndata->node_num, dead_node); 623 /* fine. don't need this node's info. 624 * continue without it. */ 625 break; 626 case DLM_RECO_NODE_DATA_REQUESTING: 627 ndata->state = DLM_RECO_NODE_DATA_REQUESTED; 628 mlog(0, "now receiving recovery data from " 629 "node %u for dead node %u\n", 630 ndata->node_num, dead_node); 631 break; 632 case DLM_RECO_NODE_DATA_RECEIVING: 633 mlog(0, "already receiving recovery data from " 634 "node %u for dead node %u\n", 635 ndata->node_num, dead_node); 636 break; 637 case DLM_RECO_NODE_DATA_DONE: 638 mlog(0, "already DONE receiving recovery data " 639 "from node %u for dead node %u\n", 640 ndata->node_num, dead_node); 641 break; 642 } 643 spin_unlock(&dlm_reco_state_lock); 644 } 645 646 mlog(0, "%s: Done requesting all lock info\n", dlm->name); 647 648 /* nodes should be sending reco data now 649 * just need to wait */ 650 651 while (1) { 652 /* check all the nodes now to see if we are 653 * done, or if anyone died */ 654 all_nodes_done = 1; 655 spin_lock(&dlm_reco_state_lock); 656 list_for_each_entry(ndata, &dlm->reco.node_data, list) { 657 mlog(0, "checking recovery state of node %u\n", 658 ndata->node_num); 659 switch (ndata->state) { 660 case DLM_RECO_NODE_DATA_INIT: 661 case DLM_RECO_NODE_DATA_REQUESTING: 662 mlog(ML_ERROR, "bad ndata state for " 663 "node %u: state=%d\n", 664 ndata->node_num, ndata->state); 665 BUG(); 666 break; 667 case DLM_RECO_NODE_DATA_DEAD: 668 mlog(0, "node %u died after " 669 "requesting recovery info for " 670 "node %u\n", ndata->node_num, 671 dead_node); 672 break; 673 case DLM_RECO_NODE_DATA_RECEIVING: 674 case DLM_RECO_NODE_DATA_REQUESTED: 675 mlog(0, "%s: node %u still in state %s\n", 676 dlm->name, ndata->node_num, 677 ndata->state==DLM_RECO_NODE_DATA_RECEIVING ? 678 "receiving" : "requested"); 679 all_nodes_done = 0; 680 break; 681 case DLM_RECO_NODE_DATA_DONE: 682 mlog(0, "%s: node %u state is done\n", 683 dlm->name, ndata->node_num); 684 break; 685 case DLM_RECO_NODE_DATA_FINALIZE_SENT: 686 mlog(0, "%s: node %u state is finalize\n", 687 dlm->name, ndata->node_num); 688 break; 689 } 690 } 691 spin_unlock(&dlm_reco_state_lock); 692 693 mlog(0, "pass #%d, all_nodes_done?: %s\n", ++pass, 694 all_nodes_done?"yes":"no"); 695 if (all_nodes_done) { 696 int ret; 697 698 /* all nodes are now in DLM_RECO_NODE_DATA_DONE state 699 * just send a finalize message to everyone and 700 * clean up */ 701 mlog(0, "all nodes are done! send finalize\n"); 702 ret = dlm_send_finalize_reco_message(dlm); 703 if (ret < 0) 704 mlog_errno(ret); 705 706 spin_lock(&dlm->spinlock); 707 dlm_finish_local_lockres_recovery(dlm, dead_node, 708 dlm->node_num); 709 spin_unlock(&dlm->spinlock); 710 mlog(0, "should be done with recovery!\n"); 711 712 mlog(0, "finishing recovery of %s at %lu, " 713 "dead=%u, this=%u, new=%u\n", dlm->name, 714 jiffies, dlm->reco.dead_node, 715 dlm->node_num, dlm->reco.new_master); 716 destroy = 1; 717 status = 0; 718 /* rescan everything marked dirty along the way */ 719 dlm_kick_thread(dlm, NULL); 720 break; 721 } 722 /* wait to be signalled, with periodic timeout 723 * to check for node death */ 724 wait_event_interruptible_timeout(dlm->dlm_reco_thread_wq, 725 kthread_should_stop(), 726 msecs_to_jiffies(DLM_RECO_THREAD_TIMEOUT_MS)); 727 728 } 729 730 if (destroy) 731 dlm_destroy_recovery_area(dlm, dead_node); 732 733 return status; 734 } 735 736 static int dlm_init_recovery_area(struct dlm_ctxt *dlm, u8 dead_node) 737 { 738 int num=0; 739 struct dlm_reco_node_data *ndata; 740 741 spin_lock(&dlm->spinlock); 742 memcpy(dlm->reco.node_map, dlm->domain_map, sizeof(dlm->domain_map)); 743 /* nodes can only be removed (by dying) after dropping 744 * this lock, and death will be trapped later, so this should do */ 745 spin_unlock(&dlm->spinlock); 746 747 while (1) { 748 num = find_next_bit (dlm->reco.node_map, O2NM_MAX_NODES, num); 749 if (num >= O2NM_MAX_NODES) { 750 break; 751 } 752 BUG_ON(num == dead_node); 753 754 ndata = kzalloc(sizeof(*ndata), GFP_NOFS); 755 if (!ndata) { 756 dlm_destroy_recovery_area(dlm, dead_node); 757 return -ENOMEM; 758 } 759 ndata->node_num = num; 760 ndata->state = DLM_RECO_NODE_DATA_INIT; 761 spin_lock(&dlm_reco_state_lock); 762 list_add_tail(&ndata->list, &dlm->reco.node_data); 763 spin_unlock(&dlm_reco_state_lock); 764 num++; 765 } 766 767 return 0; 768 } 769 770 static void dlm_destroy_recovery_area(struct dlm_ctxt *dlm, u8 dead_node) 771 { 772 struct dlm_reco_node_data *ndata, *next; 773 LIST_HEAD(tmplist); 774 775 spin_lock(&dlm_reco_state_lock); 776 list_splice_init(&dlm->reco.node_data, &tmplist); 777 spin_unlock(&dlm_reco_state_lock); 778 779 list_for_each_entry_safe(ndata, next, &tmplist, list) { 780 list_del_init(&ndata->list); 781 kfree(ndata); 782 } 783 } 784 785 static int dlm_request_all_locks(struct dlm_ctxt *dlm, u8 request_from, 786 u8 dead_node) 787 { 788 struct dlm_lock_request lr; 789 int ret; 790 int status; 791 792 mlog(0, "\n"); 793 794 795 mlog(0, "dlm_request_all_locks: dead node is %u, sending request " 796 "to %u\n", dead_node, request_from); 797 798 memset(&lr, 0, sizeof(lr)); 799 lr.node_idx = dlm->node_num; 800 lr.dead_node = dead_node; 801 802 // send message 803 ret = o2net_send_message(DLM_LOCK_REQUEST_MSG, dlm->key, 804 &lr, sizeof(lr), request_from, &status); 805 806 /* negative status is handled by caller */ 807 if (ret < 0) 808 mlog(ML_ERROR, "%s: Error %d send LOCK_REQUEST to node %u " 809 "to recover dead node %u\n", dlm->name, ret, 810 request_from, dead_node); 811 else 812 ret = status; 813 // return from here, then 814 // sleep until all received or error 815 return ret; 816 817 } 818 819 int dlm_request_all_locks_handler(struct o2net_msg *msg, u32 len, void *data, 820 void **ret_data) 821 { 822 struct dlm_ctxt *dlm = data; 823 struct dlm_lock_request *lr = (struct dlm_lock_request *)msg->buf; 824 char *buf = NULL; 825 struct dlm_work_item *item = NULL; 826 827 if (!dlm_grab(dlm)) 828 return -EINVAL; 829 830 if (lr->dead_node != dlm->reco.dead_node) { 831 mlog(ML_ERROR, "%s: node %u sent dead_node=%u, but local " 832 "dead_node is %u\n", dlm->name, lr->node_idx, 833 lr->dead_node, dlm->reco.dead_node); 834 dlm_print_reco_node_status(dlm); 835 /* this is a hack */ 836 dlm_put(dlm); 837 return -ENOMEM; 838 } 839 BUG_ON(lr->dead_node != dlm->reco.dead_node); 840 841 item = kzalloc(sizeof(*item), GFP_NOFS); 842 if (!item) { 843 dlm_put(dlm); 844 return -ENOMEM; 845 } 846 847 /* this will get freed by dlm_request_all_locks_worker */ 848 buf = (char *) __get_free_page(GFP_NOFS); 849 if (!buf) { 850 kfree(item); 851 dlm_put(dlm); 852 return -ENOMEM; 853 } 854 855 /* queue up work for dlm_request_all_locks_worker */ 856 dlm_grab(dlm); /* get an extra ref for the work item */ 857 dlm_init_work_item(dlm, item, dlm_request_all_locks_worker, buf); 858 item->u.ral.reco_master = lr->node_idx; 859 item->u.ral.dead_node = lr->dead_node; 860 spin_lock(&dlm->work_lock); 861 list_add_tail(&item->list, &dlm->work_list); 862 spin_unlock(&dlm->work_lock); 863 queue_work(dlm->dlm_worker, &dlm->dispatched_work); 864 865 dlm_put(dlm); 866 return 0; 867 } 868 869 static void dlm_request_all_locks_worker(struct dlm_work_item *item, void *data) 870 { 871 struct dlm_migratable_lockres *mres; 872 struct dlm_lock_resource *res; 873 struct dlm_ctxt *dlm; 874 LIST_HEAD(resources); 875 int ret; 876 u8 dead_node, reco_master; 877 int skip_all_done = 0; 878 879 dlm = item->dlm; 880 dead_node = item->u.ral.dead_node; 881 reco_master = item->u.ral.reco_master; 882 mres = (struct dlm_migratable_lockres *)data; 883 884 mlog(0, "%s: recovery worker started, dead=%u, master=%u\n", 885 dlm->name, dead_node, reco_master); 886 887 if (dead_node != dlm->reco.dead_node || 888 reco_master != dlm->reco.new_master) { 889 /* worker could have been created before the recovery master 890 * died. if so, do not continue, but do not error. */ 891 if (dlm->reco.new_master == O2NM_INVALID_NODE_NUM) { 892 mlog(ML_NOTICE, "%s: will not send recovery state, " 893 "recovery master %u died, thread=(dead=%u,mas=%u)" 894 " current=(dead=%u,mas=%u)\n", dlm->name, 895 reco_master, dead_node, reco_master, 896 dlm->reco.dead_node, dlm->reco.new_master); 897 } else { 898 mlog(ML_NOTICE, "%s: reco state invalid: reco(dead=%u, " 899 "master=%u), request(dead=%u, master=%u)\n", 900 dlm->name, dlm->reco.dead_node, 901 dlm->reco.new_master, dead_node, reco_master); 902 } 903 goto leave; 904 } 905 906 /* lock resources should have already been moved to the 907 * dlm->reco.resources list. now move items from that list 908 * to a temp list if the dead owner matches. note that the 909 * whole cluster recovers only one node at a time, so we 910 * can safely move UNKNOWN lock resources for each recovery 911 * session. */ 912 dlm_move_reco_locks_to_list(dlm, &resources, dead_node); 913 914 /* now we can begin blasting lockreses without the dlm lock */ 915 916 /* any errors returned will be due to the new_master dying, 917 * the dlm_reco_thread should detect this */ 918 list_for_each_entry(res, &resources, recovering) { 919 ret = dlm_send_one_lockres(dlm, res, mres, reco_master, 920 DLM_MRES_RECOVERY); 921 if (ret < 0) { 922 mlog(ML_ERROR, "%s: node %u went down while sending " 923 "recovery state for dead node %u, ret=%d\n", dlm->name, 924 reco_master, dead_node, ret); 925 skip_all_done = 1; 926 break; 927 } 928 } 929 930 /* move the resources back to the list */ 931 spin_lock(&dlm->spinlock); 932 list_splice_init(&resources, &dlm->reco.resources); 933 spin_unlock(&dlm->spinlock); 934 935 if (!skip_all_done) { 936 ret = dlm_send_all_done_msg(dlm, dead_node, reco_master); 937 if (ret < 0) { 938 mlog(ML_ERROR, "%s: node %u went down while sending " 939 "recovery all-done for dead node %u, ret=%d\n", 940 dlm->name, reco_master, dead_node, ret); 941 } 942 } 943 leave: 944 free_page((unsigned long)data); 945 } 946 947 948 static int dlm_send_all_done_msg(struct dlm_ctxt *dlm, u8 dead_node, u8 send_to) 949 { 950 int ret, tmpret; 951 struct dlm_reco_data_done done_msg; 952 953 memset(&done_msg, 0, sizeof(done_msg)); 954 done_msg.node_idx = dlm->node_num; 955 done_msg.dead_node = dead_node; 956 mlog(0, "sending DATA DONE message to %u, " 957 "my node=%u, dead node=%u\n", send_to, done_msg.node_idx, 958 done_msg.dead_node); 959 960 ret = o2net_send_message(DLM_RECO_DATA_DONE_MSG, dlm->key, &done_msg, 961 sizeof(done_msg), send_to, &tmpret); 962 if (ret < 0) { 963 mlog(ML_ERROR, "%s: Error %d send RECO_DATA_DONE to node %u " 964 "to recover dead node %u\n", dlm->name, ret, send_to, 965 dead_node); 966 if (!dlm_is_host_down(ret)) { 967 BUG(); 968 } 969 } else 970 ret = tmpret; 971 return ret; 972 } 973 974 975 int dlm_reco_data_done_handler(struct o2net_msg *msg, u32 len, void *data, 976 void **ret_data) 977 { 978 struct dlm_ctxt *dlm = data; 979 struct dlm_reco_data_done *done = (struct dlm_reco_data_done *)msg->buf; 980 struct dlm_reco_node_data *ndata = NULL; 981 int ret = -EINVAL; 982 983 if (!dlm_grab(dlm)) 984 return -EINVAL; 985 986 mlog(0, "got DATA DONE: dead_node=%u, reco.dead_node=%u, " 987 "node_idx=%u, this node=%u\n", done->dead_node, 988 dlm->reco.dead_node, done->node_idx, dlm->node_num); 989 990 mlog_bug_on_msg((done->dead_node != dlm->reco.dead_node), 991 "Got DATA DONE: dead_node=%u, reco.dead_node=%u, " 992 "node_idx=%u, this node=%u\n", done->dead_node, 993 dlm->reco.dead_node, done->node_idx, dlm->node_num); 994 995 spin_lock(&dlm_reco_state_lock); 996 list_for_each_entry(ndata, &dlm->reco.node_data, list) { 997 if (ndata->node_num != done->node_idx) 998 continue; 999 1000 switch (ndata->state) { 1001 /* should have moved beyond INIT but not to FINALIZE yet */ 1002 case DLM_RECO_NODE_DATA_INIT: 1003 case DLM_RECO_NODE_DATA_DEAD: 1004 case DLM_RECO_NODE_DATA_FINALIZE_SENT: 1005 mlog(ML_ERROR, "bad ndata state for node %u:" 1006 " state=%d\n", ndata->node_num, 1007 ndata->state); 1008 BUG(); 1009 break; 1010 /* these states are possible at this point, anywhere along 1011 * the line of recovery */ 1012 case DLM_RECO_NODE_DATA_DONE: 1013 case DLM_RECO_NODE_DATA_RECEIVING: 1014 case DLM_RECO_NODE_DATA_REQUESTED: 1015 case DLM_RECO_NODE_DATA_REQUESTING: 1016 mlog(0, "node %u is DONE sending " 1017 "recovery data!\n", 1018 ndata->node_num); 1019 1020 ndata->state = DLM_RECO_NODE_DATA_DONE; 1021 ret = 0; 1022 break; 1023 } 1024 } 1025 spin_unlock(&dlm_reco_state_lock); 1026 1027 /* wake the recovery thread, some node is done */ 1028 if (!ret) 1029 dlm_kick_recovery_thread(dlm); 1030 1031 if (ret < 0) 1032 mlog(ML_ERROR, "failed to find recovery node data for node " 1033 "%u\n", done->node_idx); 1034 dlm_put(dlm); 1035 1036 mlog(0, "leaving reco data done handler, ret=%d\n", ret); 1037 return ret; 1038 } 1039 1040 static void dlm_move_reco_locks_to_list(struct dlm_ctxt *dlm, 1041 struct list_head *list, 1042 u8 dead_node) 1043 { 1044 struct dlm_lock_resource *res, *next; 1045 struct dlm_lock *lock; 1046 1047 spin_lock(&dlm->spinlock); 1048 list_for_each_entry_safe(res, next, &dlm->reco.resources, recovering) { 1049 /* always prune any $RECOVERY entries for dead nodes, 1050 * otherwise hangs can occur during later recovery */ 1051 if (dlm_is_recovery_lock(res->lockname.name, 1052 res->lockname.len)) { 1053 spin_lock(&res->spinlock); 1054 list_for_each_entry(lock, &res->granted, list) { 1055 if (lock->ml.node == dead_node) { 1056 mlog(0, "AHA! there was " 1057 "a $RECOVERY lock for dead " 1058 "node %u (%s)!\n", 1059 dead_node, dlm->name); 1060 list_del_init(&lock->list); 1061 dlm_lock_put(lock); 1062 break; 1063 } 1064 } 1065 spin_unlock(&res->spinlock); 1066 continue; 1067 } 1068 1069 if (res->owner == dead_node) { 1070 mlog(0, "found lockres owned by dead node while " 1071 "doing recovery for node %u. sending it.\n", 1072 dead_node); 1073 list_move_tail(&res->recovering, list); 1074 } else if (res->owner == DLM_LOCK_RES_OWNER_UNKNOWN) { 1075 mlog(0, "found UNKNOWN owner while doing recovery " 1076 "for node %u. sending it.\n", dead_node); 1077 list_move_tail(&res->recovering, list); 1078 } 1079 } 1080 spin_unlock(&dlm->spinlock); 1081 } 1082 1083 static inline int dlm_num_locks_in_lockres(struct dlm_lock_resource *res) 1084 { 1085 int total_locks = 0; 1086 struct list_head *iter, *queue = &res->granted; 1087 int i; 1088 1089 for (i=0; i<3; i++) { 1090 list_for_each(iter, queue) 1091 total_locks++; 1092 queue++; 1093 } 1094 return total_locks; 1095 } 1096 1097 1098 static int dlm_send_mig_lockres_msg(struct dlm_ctxt *dlm, 1099 struct dlm_migratable_lockres *mres, 1100 u8 send_to, 1101 struct dlm_lock_resource *res, 1102 int total_locks) 1103 { 1104 u64 mig_cookie = be64_to_cpu(mres->mig_cookie); 1105 int mres_total_locks = be32_to_cpu(mres->total_locks); 1106 int sz, ret = 0, status = 0; 1107 u8 orig_flags = mres->flags, 1108 orig_master = mres->master; 1109 1110 BUG_ON(mres->num_locks > DLM_MAX_MIGRATABLE_LOCKS); 1111 if (!mres->num_locks) 1112 return 0; 1113 1114 sz = sizeof(struct dlm_migratable_lockres) + 1115 (mres->num_locks * sizeof(struct dlm_migratable_lock)); 1116 1117 /* add an all-done flag if we reached the last lock */ 1118 orig_flags = mres->flags; 1119 BUG_ON(total_locks > mres_total_locks); 1120 if (total_locks == mres_total_locks) 1121 mres->flags |= DLM_MRES_ALL_DONE; 1122 1123 mlog(0, "%s:%.*s: sending mig lockres (%s) to %u\n", 1124 dlm->name, res->lockname.len, res->lockname.name, 1125 orig_flags & DLM_MRES_MIGRATION ? "migration" : "recovery", 1126 send_to); 1127 1128 /* send it */ 1129 ret = o2net_send_message(DLM_MIG_LOCKRES_MSG, dlm->key, mres, 1130 sz, send_to, &status); 1131 if (ret < 0) { 1132 /* XXX: negative status is not handled. 1133 * this will end up killing this node. */ 1134 mlog(ML_ERROR, "%s: res %.*s, Error %d send MIG_LOCKRES to " 1135 "node %u (%s)\n", dlm->name, mres->lockname_len, 1136 mres->lockname, ret, send_to, 1137 (orig_flags & DLM_MRES_MIGRATION ? 1138 "migration" : "recovery")); 1139 } else { 1140 /* might get an -ENOMEM back here */ 1141 ret = status; 1142 if (ret < 0) { 1143 mlog_errno(ret); 1144 1145 if (ret == -EFAULT) { 1146 mlog(ML_ERROR, "node %u told me to kill " 1147 "myself!\n", send_to); 1148 BUG(); 1149 } 1150 } 1151 } 1152 1153 /* zero and reinit the message buffer */ 1154 dlm_init_migratable_lockres(mres, res->lockname.name, 1155 res->lockname.len, mres_total_locks, 1156 mig_cookie, orig_flags, orig_master); 1157 return ret; 1158 } 1159 1160 static void dlm_init_migratable_lockres(struct dlm_migratable_lockres *mres, 1161 const char *lockname, int namelen, 1162 int total_locks, u64 cookie, 1163 u8 flags, u8 master) 1164 { 1165 /* mres here is one full page */ 1166 clear_page(mres); 1167 mres->lockname_len = namelen; 1168 memcpy(mres->lockname, lockname, namelen); 1169 mres->num_locks = 0; 1170 mres->total_locks = cpu_to_be32(total_locks); 1171 mres->mig_cookie = cpu_to_be64(cookie); 1172 mres->flags = flags; 1173 mres->master = master; 1174 } 1175 1176 static void dlm_prepare_lvb_for_migration(struct dlm_lock *lock, 1177 struct dlm_migratable_lockres *mres, 1178 int queue) 1179 { 1180 if (!lock->lksb) 1181 return; 1182 1183 /* Ignore lvb in all locks in the blocked list */ 1184 if (queue == DLM_BLOCKED_LIST) 1185 return; 1186 1187 /* Only consider lvbs in locks with granted EX or PR lock levels */ 1188 if (lock->ml.type != LKM_EXMODE && lock->ml.type != LKM_PRMODE) 1189 return; 1190 1191 if (dlm_lvb_is_empty(mres->lvb)) { 1192 memcpy(mres->lvb, lock->lksb->lvb, DLM_LVB_LEN); 1193 return; 1194 } 1195 1196 /* Ensure the lvb copied for migration matches in other valid locks */ 1197 if (!memcmp(mres->lvb, lock->lksb->lvb, DLM_LVB_LEN)) 1198 return; 1199 1200 mlog(ML_ERROR, "Mismatched lvb in lock cookie=%u:%llu, name=%.*s, " 1201 "node=%u\n", 1202 dlm_get_lock_cookie_node(be64_to_cpu(lock->ml.cookie)), 1203 dlm_get_lock_cookie_seq(be64_to_cpu(lock->ml.cookie)), 1204 lock->lockres->lockname.len, lock->lockres->lockname.name, 1205 lock->ml.node); 1206 dlm_print_one_lock_resource(lock->lockres); 1207 BUG(); 1208 } 1209 1210 /* returns 1 if this lock fills the network structure, 1211 * 0 otherwise */ 1212 static int dlm_add_lock_to_array(struct dlm_lock *lock, 1213 struct dlm_migratable_lockres *mres, int queue) 1214 { 1215 struct dlm_migratable_lock *ml; 1216 int lock_num = mres->num_locks; 1217 1218 ml = &(mres->ml[lock_num]); 1219 ml->cookie = lock->ml.cookie; 1220 ml->type = lock->ml.type; 1221 ml->convert_type = lock->ml.convert_type; 1222 ml->highest_blocked = lock->ml.highest_blocked; 1223 ml->list = queue; 1224 if (lock->lksb) { 1225 ml->flags = lock->lksb->flags; 1226 dlm_prepare_lvb_for_migration(lock, mres, queue); 1227 } 1228 ml->node = lock->ml.node; 1229 mres->num_locks++; 1230 /* we reached the max, send this network message */ 1231 if (mres->num_locks == DLM_MAX_MIGRATABLE_LOCKS) 1232 return 1; 1233 return 0; 1234 } 1235 1236 static void dlm_add_dummy_lock(struct dlm_ctxt *dlm, 1237 struct dlm_migratable_lockres *mres) 1238 { 1239 struct dlm_lock dummy; 1240 memset(&dummy, 0, sizeof(dummy)); 1241 dummy.ml.cookie = 0; 1242 dummy.ml.type = LKM_IVMODE; 1243 dummy.ml.convert_type = LKM_IVMODE; 1244 dummy.ml.highest_blocked = LKM_IVMODE; 1245 dummy.lksb = NULL; 1246 dummy.ml.node = dlm->node_num; 1247 dlm_add_lock_to_array(&dummy, mres, DLM_BLOCKED_LIST); 1248 } 1249 1250 static inline int dlm_is_dummy_lock(struct dlm_ctxt *dlm, 1251 struct dlm_migratable_lock *ml, 1252 u8 *nodenum) 1253 { 1254 if (unlikely(ml->cookie == 0 && 1255 ml->type == LKM_IVMODE && 1256 ml->convert_type == LKM_IVMODE && 1257 ml->highest_blocked == LKM_IVMODE && 1258 ml->list == DLM_BLOCKED_LIST)) { 1259 *nodenum = ml->node; 1260 return 1; 1261 } 1262 return 0; 1263 } 1264 1265 int dlm_send_one_lockres(struct dlm_ctxt *dlm, struct dlm_lock_resource *res, 1266 struct dlm_migratable_lockres *mres, 1267 u8 send_to, u8 flags) 1268 { 1269 struct list_head *queue; 1270 int total_locks, i; 1271 u64 mig_cookie = 0; 1272 struct dlm_lock *lock; 1273 int ret = 0; 1274 1275 BUG_ON(!(flags & (DLM_MRES_RECOVERY|DLM_MRES_MIGRATION))); 1276 1277 mlog(0, "sending to %u\n", send_to); 1278 1279 total_locks = dlm_num_locks_in_lockres(res); 1280 if (total_locks > DLM_MAX_MIGRATABLE_LOCKS) { 1281 /* rare, but possible */ 1282 mlog(0, "argh. lockres has %d locks. this will " 1283 "require more than one network packet to " 1284 "migrate\n", total_locks); 1285 mig_cookie = dlm_get_next_mig_cookie(); 1286 } 1287 1288 dlm_init_migratable_lockres(mres, res->lockname.name, 1289 res->lockname.len, total_locks, 1290 mig_cookie, flags, res->owner); 1291 1292 total_locks = 0; 1293 for (i=DLM_GRANTED_LIST; i<=DLM_BLOCKED_LIST; i++) { 1294 queue = dlm_list_idx_to_ptr(res, i); 1295 list_for_each_entry(lock, queue, list) { 1296 /* add another lock. */ 1297 total_locks++; 1298 if (!dlm_add_lock_to_array(lock, mres, i)) 1299 continue; 1300 1301 /* this filled the lock message, 1302 * we must send it immediately. */ 1303 ret = dlm_send_mig_lockres_msg(dlm, mres, send_to, 1304 res, total_locks); 1305 if (ret < 0) 1306 goto error; 1307 } 1308 } 1309 if (total_locks == 0) { 1310 /* send a dummy lock to indicate a mastery reference only */ 1311 mlog(0, "%s:%.*s: sending dummy lock to %u, %s\n", 1312 dlm->name, res->lockname.len, res->lockname.name, 1313 send_to, flags & DLM_MRES_RECOVERY ? "recovery" : 1314 "migration"); 1315 dlm_add_dummy_lock(dlm, mres); 1316 } 1317 /* flush any remaining locks */ 1318 ret = dlm_send_mig_lockres_msg(dlm, mres, send_to, res, total_locks); 1319 if (ret < 0) 1320 goto error; 1321 return ret; 1322 1323 error: 1324 mlog(ML_ERROR, "%s: dlm_send_mig_lockres_msg returned %d\n", 1325 dlm->name, ret); 1326 if (!dlm_is_host_down(ret)) 1327 BUG(); 1328 mlog(0, "%s: node %u went down while sending %s " 1329 "lockres %.*s\n", dlm->name, send_to, 1330 flags & DLM_MRES_RECOVERY ? "recovery" : "migration", 1331 res->lockname.len, res->lockname.name); 1332 return ret; 1333 } 1334 1335 1336 1337 /* 1338 * this message will contain no more than one page worth of 1339 * recovery data, and it will work on only one lockres. 1340 * there may be many locks in this page, and we may need to wait 1341 * for additional packets to complete all the locks (rare, but 1342 * possible). 1343 */ 1344 /* 1345 * NOTE: the allocation error cases here are scary 1346 * we really cannot afford to fail an alloc in recovery 1347 * do we spin? returning an error only delays the problem really 1348 */ 1349 1350 int dlm_mig_lockres_handler(struct o2net_msg *msg, u32 len, void *data, 1351 void **ret_data) 1352 { 1353 struct dlm_ctxt *dlm = data; 1354 struct dlm_migratable_lockres *mres = 1355 (struct dlm_migratable_lockres *)msg->buf; 1356 int ret = 0; 1357 u8 real_master; 1358 u8 extra_refs = 0; 1359 char *buf = NULL; 1360 struct dlm_work_item *item = NULL; 1361 struct dlm_lock_resource *res = NULL; 1362 1363 if (!dlm_grab(dlm)) 1364 return -EINVAL; 1365 1366 BUG_ON(!(mres->flags & (DLM_MRES_RECOVERY|DLM_MRES_MIGRATION))); 1367 1368 real_master = mres->master; 1369 if (real_master == DLM_LOCK_RES_OWNER_UNKNOWN) { 1370 /* cannot migrate a lockres with no master */ 1371 BUG_ON(!(mres->flags & DLM_MRES_RECOVERY)); 1372 } 1373 1374 mlog(0, "%s message received from node %u\n", 1375 (mres->flags & DLM_MRES_RECOVERY) ? 1376 "recovery" : "migration", mres->master); 1377 if (mres->flags & DLM_MRES_ALL_DONE) 1378 mlog(0, "all done flag. all lockres data received!\n"); 1379 1380 ret = -ENOMEM; 1381 buf = kmalloc(be16_to_cpu(msg->data_len), GFP_NOFS); 1382 item = kzalloc(sizeof(*item), GFP_NOFS); 1383 if (!buf || !item) 1384 goto leave; 1385 1386 /* lookup the lock to see if we have a secondary queue for this 1387 * already... just add the locks in and this will have its owner 1388 * and RECOVERY flag changed when it completes. */ 1389 res = dlm_lookup_lockres(dlm, mres->lockname, mres->lockname_len); 1390 if (res) { 1391 /* this will get a ref on res */ 1392 /* mark it as recovering/migrating and hash it */ 1393 spin_lock(&res->spinlock); 1394 if (mres->flags & DLM_MRES_RECOVERY) { 1395 res->state |= DLM_LOCK_RES_RECOVERING; 1396 } else { 1397 if (res->state & DLM_LOCK_RES_MIGRATING) { 1398 /* this is at least the second 1399 * lockres message */ 1400 mlog(0, "lock %.*s is already migrating\n", 1401 mres->lockname_len, 1402 mres->lockname); 1403 } else if (res->state & DLM_LOCK_RES_RECOVERING) { 1404 /* caller should BUG */ 1405 mlog(ML_ERROR, "node is attempting to migrate " 1406 "lock %.*s, but marked as recovering!\n", 1407 mres->lockname_len, mres->lockname); 1408 ret = -EFAULT; 1409 spin_unlock(&res->spinlock); 1410 dlm_lockres_put(res); 1411 goto leave; 1412 } 1413 res->state |= DLM_LOCK_RES_MIGRATING; 1414 } 1415 spin_unlock(&res->spinlock); 1416 } else { 1417 /* need to allocate, just like if it was 1418 * mastered here normally */ 1419 res = dlm_new_lockres(dlm, mres->lockname, mres->lockname_len); 1420 if (!res) 1421 goto leave; 1422 1423 /* to match the ref that we would have gotten if 1424 * dlm_lookup_lockres had succeeded */ 1425 dlm_lockres_get(res); 1426 1427 /* mark it as recovering/migrating and hash it */ 1428 if (mres->flags & DLM_MRES_RECOVERY) 1429 res->state |= DLM_LOCK_RES_RECOVERING; 1430 else 1431 res->state |= DLM_LOCK_RES_MIGRATING; 1432 1433 spin_lock(&dlm->spinlock); 1434 __dlm_insert_lockres(dlm, res); 1435 spin_unlock(&dlm->spinlock); 1436 1437 /* Add an extra ref for this lock-less lockres lest the 1438 * dlm_thread purges it before we get the chance to add 1439 * locks to it */ 1440 dlm_lockres_get(res); 1441 1442 /* There are three refs that need to be put. 1443 * 1. Taken above. 1444 * 2. kref_init in dlm_new_lockres()->dlm_init_lockres(). 1445 * 3. dlm_lookup_lockres() 1446 * The first one is handled at the end of this function. The 1447 * other two are handled in the worker thread after locks have 1448 * been attached. Yes, we don't wait for purge time to match 1449 * kref_init. The lockres will still have atleast one ref 1450 * added because it is in the hash __dlm_insert_lockres() */ 1451 extra_refs++; 1452 1453 /* now that the new lockres is inserted, 1454 * make it usable by other processes */ 1455 spin_lock(&res->spinlock); 1456 res->state &= ~DLM_LOCK_RES_IN_PROGRESS; 1457 spin_unlock(&res->spinlock); 1458 wake_up(&res->wq); 1459 } 1460 1461 /* at this point we have allocated everything we need, 1462 * and we have a hashed lockres with an extra ref and 1463 * the proper res->state flags. */ 1464 ret = 0; 1465 spin_lock(&res->spinlock); 1466 /* drop this either when master requery finds a different master 1467 * or when a lock is added by the recovery worker */ 1468 dlm_lockres_grab_inflight_ref(dlm, res); 1469 if (mres->master == DLM_LOCK_RES_OWNER_UNKNOWN) { 1470 /* migration cannot have an unknown master */ 1471 BUG_ON(!(mres->flags & DLM_MRES_RECOVERY)); 1472 mlog(0, "recovery has passed me a lockres with an " 1473 "unknown owner.. will need to requery: " 1474 "%.*s\n", mres->lockname_len, mres->lockname); 1475 } else { 1476 /* take a reference now to pin the lockres, drop it 1477 * when locks are added in the worker */ 1478 dlm_change_lockres_owner(dlm, res, dlm->node_num); 1479 } 1480 spin_unlock(&res->spinlock); 1481 1482 /* queue up work for dlm_mig_lockres_worker */ 1483 dlm_grab(dlm); /* get an extra ref for the work item */ 1484 memcpy(buf, msg->buf, be16_to_cpu(msg->data_len)); /* copy the whole message */ 1485 dlm_init_work_item(dlm, item, dlm_mig_lockres_worker, buf); 1486 item->u.ml.lockres = res; /* already have a ref */ 1487 item->u.ml.real_master = real_master; 1488 item->u.ml.extra_ref = extra_refs; 1489 spin_lock(&dlm->work_lock); 1490 list_add_tail(&item->list, &dlm->work_list); 1491 spin_unlock(&dlm->work_lock); 1492 queue_work(dlm->dlm_worker, &dlm->dispatched_work); 1493 1494 leave: 1495 /* One extra ref taken needs to be put here */ 1496 if (extra_refs) 1497 dlm_lockres_put(res); 1498 1499 dlm_put(dlm); 1500 if (ret < 0) { 1501 kfree(buf); 1502 kfree(item); 1503 mlog_errno(ret); 1504 } 1505 1506 return ret; 1507 } 1508 1509 1510 static void dlm_mig_lockres_worker(struct dlm_work_item *item, void *data) 1511 { 1512 struct dlm_ctxt *dlm; 1513 struct dlm_migratable_lockres *mres; 1514 int ret = 0; 1515 struct dlm_lock_resource *res; 1516 u8 real_master; 1517 u8 extra_ref; 1518 1519 dlm = item->dlm; 1520 mres = (struct dlm_migratable_lockres *)data; 1521 1522 res = item->u.ml.lockres; 1523 real_master = item->u.ml.real_master; 1524 extra_ref = item->u.ml.extra_ref; 1525 1526 if (real_master == DLM_LOCK_RES_OWNER_UNKNOWN) { 1527 /* this case is super-rare. only occurs if 1528 * node death happens during migration. */ 1529 again: 1530 ret = dlm_lockres_master_requery(dlm, res, &real_master); 1531 if (ret < 0) { 1532 mlog(0, "dlm_lockres_master_requery ret=%d\n", 1533 ret); 1534 goto again; 1535 } 1536 if (real_master == DLM_LOCK_RES_OWNER_UNKNOWN) { 1537 mlog(0, "lockres %.*s not claimed. " 1538 "this node will take it.\n", 1539 res->lockname.len, res->lockname.name); 1540 } else { 1541 spin_lock(&res->spinlock); 1542 dlm_lockres_drop_inflight_ref(dlm, res); 1543 spin_unlock(&res->spinlock); 1544 mlog(0, "master needs to respond to sender " 1545 "that node %u still owns %.*s\n", 1546 real_master, res->lockname.len, 1547 res->lockname.name); 1548 /* cannot touch this lockres */ 1549 goto leave; 1550 } 1551 } 1552 1553 ret = dlm_process_recovery_data(dlm, res, mres); 1554 if (ret < 0) 1555 mlog(0, "dlm_process_recovery_data returned %d\n", ret); 1556 else 1557 mlog(0, "dlm_process_recovery_data succeeded\n"); 1558 1559 if ((mres->flags & (DLM_MRES_MIGRATION|DLM_MRES_ALL_DONE)) == 1560 (DLM_MRES_MIGRATION|DLM_MRES_ALL_DONE)) { 1561 ret = dlm_finish_migration(dlm, res, mres->master); 1562 if (ret < 0) 1563 mlog_errno(ret); 1564 } 1565 1566 leave: 1567 /* See comment in dlm_mig_lockres_handler() */ 1568 if (res) { 1569 if (extra_ref) 1570 dlm_lockres_put(res); 1571 dlm_lockres_put(res); 1572 } 1573 kfree(data); 1574 } 1575 1576 1577 1578 static int dlm_lockres_master_requery(struct dlm_ctxt *dlm, 1579 struct dlm_lock_resource *res, 1580 u8 *real_master) 1581 { 1582 struct dlm_node_iter iter; 1583 int nodenum; 1584 int ret = 0; 1585 1586 *real_master = DLM_LOCK_RES_OWNER_UNKNOWN; 1587 1588 /* we only reach here if one of the two nodes in a 1589 * migration died while the migration was in progress. 1590 * at this point we need to requery the master. we 1591 * know that the new_master got as far as creating 1592 * an mle on at least one node, but we do not know 1593 * if any nodes had actually cleared the mle and set 1594 * the master to the new_master. the old master 1595 * is supposed to set the owner to UNKNOWN in the 1596 * event of a new_master death, so the only possible 1597 * responses that we can get from nodes here are 1598 * that the master is new_master, or that the master 1599 * is UNKNOWN. 1600 * if all nodes come back with UNKNOWN then we know 1601 * the lock needs remastering here. 1602 * if any node comes back with a valid master, check 1603 * to see if that master is the one that we are 1604 * recovering. if so, then the new_master died and 1605 * we need to remaster this lock. if not, then the 1606 * new_master survived and that node will respond to 1607 * other nodes about the owner. 1608 * if there is an owner, this node needs to dump this 1609 * lockres and alert the sender that this lockres 1610 * was rejected. */ 1611 spin_lock(&dlm->spinlock); 1612 dlm_node_iter_init(dlm->domain_map, &iter); 1613 spin_unlock(&dlm->spinlock); 1614 1615 while ((nodenum = dlm_node_iter_next(&iter)) >= 0) { 1616 /* do not send to self */ 1617 if (nodenum == dlm->node_num) 1618 continue; 1619 ret = dlm_do_master_requery(dlm, res, nodenum, real_master); 1620 if (ret < 0) { 1621 mlog_errno(ret); 1622 if (!dlm_is_host_down(ret)) 1623 BUG(); 1624 /* host is down, so answer for that node would be 1625 * DLM_LOCK_RES_OWNER_UNKNOWN. continue. */ 1626 } 1627 if (*real_master != DLM_LOCK_RES_OWNER_UNKNOWN) { 1628 mlog(0, "lock master is %u\n", *real_master); 1629 break; 1630 } 1631 } 1632 return ret; 1633 } 1634 1635 1636 int dlm_do_master_requery(struct dlm_ctxt *dlm, struct dlm_lock_resource *res, 1637 u8 nodenum, u8 *real_master) 1638 { 1639 int ret = -EINVAL; 1640 struct dlm_master_requery req; 1641 int status = DLM_LOCK_RES_OWNER_UNKNOWN; 1642 1643 memset(&req, 0, sizeof(req)); 1644 req.node_idx = dlm->node_num; 1645 req.namelen = res->lockname.len; 1646 memcpy(req.name, res->lockname.name, res->lockname.len); 1647 1648 ret = o2net_send_message(DLM_MASTER_REQUERY_MSG, dlm->key, 1649 &req, sizeof(req), nodenum, &status); 1650 /* XXX: negative status not handled properly here. */ 1651 if (ret < 0) 1652 mlog(ML_ERROR, "Error %d when sending message %u (key " 1653 "0x%x) to node %u\n", ret, DLM_MASTER_REQUERY_MSG, 1654 dlm->key, nodenum); 1655 else { 1656 BUG_ON(status < 0); 1657 BUG_ON(status > DLM_LOCK_RES_OWNER_UNKNOWN); 1658 *real_master = (u8) (status & 0xff); 1659 mlog(0, "node %u responded to master requery with %u\n", 1660 nodenum, *real_master); 1661 ret = 0; 1662 } 1663 return ret; 1664 } 1665 1666 1667 /* this function cannot error, so unless the sending 1668 * or receiving of the message failed, the owner can 1669 * be trusted */ 1670 int dlm_master_requery_handler(struct o2net_msg *msg, u32 len, void *data, 1671 void **ret_data) 1672 { 1673 struct dlm_ctxt *dlm = data; 1674 struct dlm_master_requery *req = (struct dlm_master_requery *)msg->buf; 1675 struct dlm_lock_resource *res = NULL; 1676 unsigned int hash; 1677 int master = DLM_LOCK_RES_OWNER_UNKNOWN; 1678 u32 flags = DLM_ASSERT_MASTER_REQUERY; 1679 1680 if (!dlm_grab(dlm)) { 1681 /* since the domain has gone away on this 1682 * node, the proper response is UNKNOWN */ 1683 return master; 1684 } 1685 1686 hash = dlm_lockid_hash(req->name, req->namelen); 1687 1688 spin_lock(&dlm->spinlock); 1689 res = __dlm_lookup_lockres(dlm, req->name, req->namelen, hash); 1690 if (res) { 1691 spin_lock(&res->spinlock); 1692 master = res->owner; 1693 if (master == dlm->node_num) { 1694 int ret = dlm_dispatch_assert_master(dlm, res, 1695 0, 0, flags); 1696 if (ret < 0) { 1697 mlog_errno(-ENOMEM); 1698 /* retry!? */ 1699 BUG(); 1700 } 1701 } else /* put.. incase we are not the master */ 1702 dlm_lockres_put(res); 1703 spin_unlock(&res->spinlock); 1704 } 1705 spin_unlock(&dlm->spinlock); 1706 1707 dlm_put(dlm); 1708 return master; 1709 } 1710 1711 static inline struct list_head * 1712 dlm_list_num_to_pointer(struct dlm_lock_resource *res, int list_num) 1713 { 1714 struct list_head *ret; 1715 BUG_ON(list_num < 0); 1716 BUG_ON(list_num > 2); 1717 ret = &(res->granted); 1718 ret += list_num; 1719 return ret; 1720 } 1721 /* TODO: do ast flush business 1722 * TODO: do MIGRATING and RECOVERING spinning 1723 */ 1724 1725 /* 1726 * NOTE about in-flight requests during migration: 1727 * 1728 * Before attempting the migrate, the master has marked the lockres as 1729 * MIGRATING and then flushed all of its pending ASTS. So any in-flight 1730 * requests either got queued before the MIGRATING flag got set, in which 1731 * case the lock data will reflect the change and a return message is on 1732 * the way, or the request failed to get in before MIGRATING got set. In 1733 * this case, the caller will be told to spin and wait for the MIGRATING 1734 * flag to be dropped, then recheck the master. 1735 * This holds true for the convert, cancel and unlock cases, and since lvb 1736 * updates are tied to these same messages, it applies to lvb updates as 1737 * well. For the lock case, there is no way a lock can be on the master 1738 * queue and not be on the secondary queue since the lock is always added 1739 * locally first. This means that the new target node will never be sent 1740 * a lock that he doesn't already have on the list. 1741 * In total, this means that the local lock is correct and should not be 1742 * updated to match the one sent by the master. Any messages sent back 1743 * from the master before the MIGRATING flag will bring the lock properly 1744 * up-to-date, and the change will be ordered properly for the waiter. 1745 * We will *not* attempt to modify the lock underneath the waiter. 1746 */ 1747 1748 static int dlm_process_recovery_data(struct dlm_ctxt *dlm, 1749 struct dlm_lock_resource *res, 1750 struct dlm_migratable_lockres *mres) 1751 { 1752 struct dlm_migratable_lock *ml; 1753 struct list_head *queue; 1754 struct list_head *tmpq = NULL; 1755 struct dlm_lock *newlock = NULL; 1756 struct dlm_lockstatus *lksb = NULL; 1757 int ret = 0; 1758 int i, j, bad; 1759 struct dlm_lock *lock = NULL; 1760 u8 from = O2NM_MAX_NODES; 1761 unsigned int added = 0; 1762 __be64 c; 1763 1764 mlog(0, "running %d locks for this lockres\n", mres->num_locks); 1765 for (i=0; i<mres->num_locks; i++) { 1766 ml = &(mres->ml[i]); 1767 1768 if (dlm_is_dummy_lock(dlm, ml, &from)) { 1769 /* placeholder, just need to set the refmap bit */ 1770 BUG_ON(mres->num_locks != 1); 1771 mlog(0, "%s:%.*s: dummy lock for %u\n", 1772 dlm->name, mres->lockname_len, mres->lockname, 1773 from); 1774 spin_lock(&res->spinlock); 1775 dlm_lockres_set_refmap_bit(dlm, res, from); 1776 spin_unlock(&res->spinlock); 1777 added++; 1778 break; 1779 } 1780 BUG_ON(ml->highest_blocked != LKM_IVMODE); 1781 newlock = NULL; 1782 lksb = NULL; 1783 1784 queue = dlm_list_num_to_pointer(res, ml->list); 1785 tmpq = NULL; 1786 1787 /* if the lock is for the local node it needs to 1788 * be moved to the proper location within the queue. 1789 * do not allocate a new lock structure. */ 1790 if (ml->node == dlm->node_num) { 1791 /* MIGRATION ONLY! */ 1792 BUG_ON(!(mres->flags & DLM_MRES_MIGRATION)); 1793 1794 spin_lock(&res->spinlock); 1795 for (j = DLM_GRANTED_LIST; j <= DLM_BLOCKED_LIST; j++) { 1796 tmpq = dlm_list_idx_to_ptr(res, j); 1797 list_for_each_entry(lock, tmpq, list) { 1798 if (lock->ml.cookie != ml->cookie) 1799 lock = NULL; 1800 else 1801 break; 1802 } 1803 if (lock) 1804 break; 1805 } 1806 1807 /* lock is always created locally first, and 1808 * destroyed locally last. it must be on the list */ 1809 if (!lock) { 1810 c = ml->cookie; 1811 mlog(ML_ERROR, "Could not find local lock " 1812 "with cookie %u:%llu, node %u, " 1813 "list %u, flags 0x%x, type %d, " 1814 "conv %d, highest blocked %d\n", 1815 dlm_get_lock_cookie_node(be64_to_cpu(c)), 1816 dlm_get_lock_cookie_seq(be64_to_cpu(c)), 1817 ml->node, ml->list, ml->flags, ml->type, 1818 ml->convert_type, ml->highest_blocked); 1819 __dlm_print_one_lock_resource(res); 1820 BUG(); 1821 } 1822 1823 if (lock->ml.node != ml->node) { 1824 c = lock->ml.cookie; 1825 mlog(ML_ERROR, "Mismatched node# in lock " 1826 "cookie %u:%llu, name %.*s, node %u\n", 1827 dlm_get_lock_cookie_node(be64_to_cpu(c)), 1828 dlm_get_lock_cookie_seq(be64_to_cpu(c)), 1829 res->lockname.len, res->lockname.name, 1830 lock->ml.node); 1831 c = ml->cookie; 1832 mlog(ML_ERROR, "Migrate lock cookie %u:%llu, " 1833 "node %u, list %u, flags 0x%x, type %d, " 1834 "conv %d, highest blocked %d\n", 1835 dlm_get_lock_cookie_node(be64_to_cpu(c)), 1836 dlm_get_lock_cookie_seq(be64_to_cpu(c)), 1837 ml->node, ml->list, ml->flags, ml->type, 1838 ml->convert_type, ml->highest_blocked); 1839 __dlm_print_one_lock_resource(res); 1840 BUG(); 1841 } 1842 1843 if (tmpq != queue) { 1844 c = ml->cookie; 1845 mlog(0, "Lock cookie %u:%llu was on list %u " 1846 "instead of list %u for %.*s\n", 1847 dlm_get_lock_cookie_node(be64_to_cpu(c)), 1848 dlm_get_lock_cookie_seq(be64_to_cpu(c)), 1849 j, ml->list, res->lockname.len, 1850 res->lockname.name); 1851 __dlm_print_one_lock_resource(res); 1852 spin_unlock(&res->spinlock); 1853 continue; 1854 } 1855 1856 /* see NOTE above about why we do not update 1857 * to match the master here */ 1858 1859 /* move the lock to its proper place */ 1860 /* do not alter lock refcount. switching lists. */ 1861 list_move_tail(&lock->list, queue); 1862 spin_unlock(&res->spinlock); 1863 added++; 1864 1865 mlog(0, "just reordered a local lock!\n"); 1866 continue; 1867 } 1868 1869 /* lock is for another node. */ 1870 newlock = dlm_new_lock(ml->type, ml->node, 1871 be64_to_cpu(ml->cookie), NULL); 1872 if (!newlock) { 1873 ret = -ENOMEM; 1874 goto leave; 1875 } 1876 lksb = newlock->lksb; 1877 dlm_lock_attach_lockres(newlock, res); 1878 1879 if (ml->convert_type != LKM_IVMODE) { 1880 BUG_ON(queue != &res->converting); 1881 newlock->ml.convert_type = ml->convert_type; 1882 } 1883 lksb->flags |= (ml->flags & 1884 (DLM_LKSB_PUT_LVB|DLM_LKSB_GET_LVB)); 1885 1886 if (ml->type == LKM_NLMODE) 1887 goto skip_lvb; 1888 1889 /* 1890 * If the lock is in the blocked list it can't have a valid lvb, 1891 * so skip it 1892 */ 1893 if (ml->list == DLM_BLOCKED_LIST) 1894 goto skip_lvb; 1895 1896 if (!dlm_lvb_is_empty(mres->lvb)) { 1897 if (lksb->flags & DLM_LKSB_PUT_LVB) { 1898 /* other node was trying to update 1899 * lvb when node died. recreate the 1900 * lksb with the updated lvb. */ 1901 memcpy(lksb->lvb, mres->lvb, DLM_LVB_LEN); 1902 /* the lock resource lvb update must happen 1903 * NOW, before the spinlock is dropped. 1904 * we no longer wait for the AST to update 1905 * the lvb. */ 1906 memcpy(res->lvb, mres->lvb, DLM_LVB_LEN); 1907 } else { 1908 /* otherwise, the node is sending its 1909 * most recent valid lvb info */ 1910 BUG_ON(ml->type != LKM_EXMODE && 1911 ml->type != LKM_PRMODE); 1912 if (!dlm_lvb_is_empty(res->lvb) && 1913 (ml->type == LKM_EXMODE || 1914 memcmp(res->lvb, mres->lvb, DLM_LVB_LEN))) { 1915 int i; 1916 mlog(ML_ERROR, "%s:%.*s: received bad " 1917 "lvb! type=%d\n", dlm->name, 1918 res->lockname.len, 1919 res->lockname.name, ml->type); 1920 printk("lockres lvb=["); 1921 for (i=0; i<DLM_LVB_LEN; i++) 1922 printk("%02x", res->lvb[i]); 1923 printk("]\nmigrated lvb=["); 1924 for (i=0; i<DLM_LVB_LEN; i++) 1925 printk("%02x", mres->lvb[i]); 1926 printk("]\n"); 1927 dlm_print_one_lock_resource(res); 1928 BUG(); 1929 } 1930 memcpy(res->lvb, mres->lvb, DLM_LVB_LEN); 1931 } 1932 } 1933 skip_lvb: 1934 1935 /* NOTE: 1936 * wrt lock queue ordering and recovery: 1937 * 1. order of locks on granted queue is 1938 * meaningless. 1939 * 2. order of locks on converting queue is 1940 * LOST with the node death. sorry charlie. 1941 * 3. order of locks on the blocked queue is 1942 * also LOST. 1943 * order of locks does not affect integrity, it 1944 * just means that a lock request may get pushed 1945 * back in line as a result of the node death. 1946 * also note that for a given node the lock order 1947 * for its secondary queue locks is preserved 1948 * relative to each other, but clearly *not* 1949 * preserved relative to locks from other nodes. 1950 */ 1951 bad = 0; 1952 spin_lock(&res->spinlock); 1953 list_for_each_entry(lock, queue, list) { 1954 if (lock->ml.cookie == ml->cookie) { 1955 c = lock->ml.cookie; 1956 mlog(ML_ERROR, "%s:%.*s: %u:%llu: lock already " 1957 "exists on this lockres!\n", dlm->name, 1958 res->lockname.len, res->lockname.name, 1959 dlm_get_lock_cookie_node(be64_to_cpu(c)), 1960 dlm_get_lock_cookie_seq(be64_to_cpu(c))); 1961 1962 mlog(ML_NOTICE, "sent lock: type=%d, conv=%d, " 1963 "node=%u, cookie=%u:%llu, queue=%d\n", 1964 ml->type, ml->convert_type, ml->node, 1965 dlm_get_lock_cookie_node(be64_to_cpu(ml->cookie)), 1966 dlm_get_lock_cookie_seq(be64_to_cpu(ml->cookie)), 1967 ml->list); 1968 1969 __dlm_print_one_lock_resource(res); 1970 bad = 1; 1971 break; 1972 } 1973 } 1974 if (!bad) { 1975 dlm_lock_get(newlock); 1976 list_add_tail(&newlock->list, queue); 1977 mlog(0, "%s:%.*s: added lock for node %u, " 1978 "setting refmap bit\n", dlm->name, 1979 res->lockname.len, res->lockname.name, ml->node); 1980 dlm_lockres_set_refmap_bit(dlm, res, ml->node); 1981 added++; 1982 } 1983 spin_unlock(&res->spinlock); 1984 } 1985 mlog(0, "done running all the locks\n"); 1986 1987 leave: 1988 /* balance the ref taken when the work was queued */ 1989 spin_lock(&res->spinlock); 1990 dlm_lockres_drop_inflight_ref(dlm, res); 1991 spin_unlock(&res->spinlock); 1992 1993 if (ret < 0) { 1994 mlog_errno(ret); 1995 if (newlock) 1996 dlm_lock_put(newlock); 1997 } 1998 1999 return ret; 2000 } 2001 2002 void dlm_move_lockres_to_recovery_list(struct dlm_ctxt *dlm, 2003 struct dlm_lock_resource *res) 2004 { 2005 int i; 2006 struct list_head *queue; 2007 struct dlm_lock *lock, *next; 2008 2009 assert_spin_locked(&dlm->spinlock); 2010 assert_spin_locked(&res->spinlock); 2011 res->state |= DLM_LOCK_RES_RECOVERING; 2012 if (!list_empty(&res->recovering)) { 2013 mlog(0, 2014 "Recovering res %s:%.*s, is already on recovery list!\n", 2015 dlm->name, res->lockname.len, res->lockname.name); 2016 list_del_init(&res->recovering); 2017 dlm_lockres_put(res); 2018 } 2019 /* We need to hold a reference while on the recovery list */ 2020 dlm_lockres_get(res); 2021 list_add_tail(&res->recovering, &dlm->reco.resources); 2022 2023 /* find any pending locks and put them back on proper list */ 2024 for (i=DLM_BLOCKED_LIST; i>=DLM_GRANTED_LIST; i--) { 2025 queue = dlm_list_idx_to_ptr(res, i); 2026 list_for_each_entry_safe(lock, next, queue, list) { 2027 dlm_lock_get(lock); 2028 if (lock->convert_pending) { 2029 /* move converting lock back to granted */ 2030 BUG_ON(i != DLM_CONVERTING_LIST); 2031 mlog(0, "node died with convert pending " 2032 "on %.*s. move back to granted list.\n", 2033 res->lockname.len, res->lockname.name); 2034 dlm_revert_pending_convert(res, lock); 2035 lock->convert_pending = 0; 2036 } else if (lock->lock_pending) { 2037 /* remove pending lock requests completely */ 2038 BUG_ON(i != DLM_BLOCKED_LIST); 2039 mlog(0, "node died with lock pending " 2040 "on %.*s. remove from blocked list and skip.\n", 2041 res->lockname.len, res->lockname.name); 2042 /* lock will be floating until ref in 2043 * dlmlock_remote is freed after the network 2044 * call returns. ok for it to not be on any 2045 * list since no ast can be called 2046 * (the master is dead). */ 2047 dlm_revert_pending_lock(res, lock); 2048 lock->lock_pending = 0; 2049 } else if (lock->unlock_pending) { 2050 /* if an unlock was in progress, treat as 2051 * if this had completed successfully 2052 * before sending this lock state to the 2053 * new master. note that the dlm_unlock 2054 * call is still responsible for calling 2055 * the unlockast. that will happen after 2056 * the network call times out. for now, 2057 * just move lists to prepare the new 2058 * recovery master. */ 2059 BUG_ON(i != DLM_GRANTED_LIST); 2060 mlog(0, "node died with unlock pending " 2061 "on %.*s. remove from blocked list and skip.\n", 2062 res->lockname.len, res->lockname.name); 2063 dlm_commit_pending_unlock(res, lock); 2064 lock->unlock_pending = 0; 2065 } else if (lock->cancel_pending) { 2066 /* if a cancel was in progress, treat as 2067 * if this had completed successfully 2068 * before sending this lock state to the 2069 * new master */ 2070 BUG_ON(i != DLM_CONVERTING_LIST); 2071 mlog(0, "node died with cancel pending " 2072 "on %.*s. move back to granted list.\n", 2073 res->lockname.len, res->lockname.name); 2074 dlm_commit_pending_cancel(res, lock); 2075 lock->cancel_pending = 0; 2076 } 2077 dlm_lock_put(lock); 2078 } 2079 } 2080 } 2081 2082 2083 2084 /* removes all recovered locks from the recovery list. 2085 * sets the res->owner to the new master. 2086 * unsets the RECOVERY flag and wakes waiters. */ 2087 static void dlm_finish_local_lockres_recovery(struct dlm_ctxt *dlm, 2088 u8 dead_node, u8 new_master) 2089 { 2090 int i; 2091 struct hlist_head *bucket; 2092 struct dlm_lock_resource *res, *next; 2093 2094 assert_spin_locked(&dlm->spinlock); 2095 2096 list_for_each_entry_safe(res, next, &dlm->reco.resources, recovering) { 2097 if (res->owner == dead_node) { 2098 mlog(0, "%s: res %.*s, Changing owner from %u to %u\n", 2099 dlm->name, res->lockname.len, res->lockname.name, 2100 res->owner, new_master); 2101 list_del_init(&res->recovering); 2102 spin_lock(&res->spinlock); 2103 /* new_master has our reference from 2104 * the lock state sent during recovery */ 2105 dlm_change_lockres_owner(dlm, res, new_master); 2106 res->state &= ~DLM_LOCK_RES_RECOVERING; 2107 if (__dlm_lockres_has_locks(res)) 2108 __dlm_dirty_lockres(dlm, res); 2109 spin_unlock(&res->spinlock); 2110 wake_up(&res->wq); 2111 dlm_lockres_put(res); 2112 } 2113 } 2114 2115 /* this will become unnecessary eventually, but 2116 * for now we need to run the whole hash, clear 2117 * the RECOVERING state and set the owner 2118 * if necessary */ 2119 for (i = 0; i < DLM_HASH_BUCKETS; i++) { 2120 bucket = dlm_lockres_hash(dlm, i); 2121 hlist_for_each_entry(res, bucket, hash_node) { 2122 if (!(res->state & DLM_LOCK_RES_RECOVERING)) 2123 continue; 2124 2125 if (res->owner != dead_node && 2126 res->owner != dlm->node_num) 2127 continue; 2128 2129 if (!list_empty(&res->recovering)) { 2130 list_del_init(&res->recovering); 2131 dlm_lockres_put(res); 2132 } 2133 2134 /* new_master has our reference from 2135 * the lock state sent during recovery */ 2136 mlog(0, "%s: res %.*s, Changing owner from %u to %u\n", 2137 dlm->name, res->lockname.len, res->lockname.name, 2138 res->owner, new_master); 2139 spin_lock(&res->spinlock); 2140 dlm_change_lockres_owner(dlm, res, new_master); 2141 res->state &= ~DLM_LOCK_RES_RECOVERING; 2142 if (__dlm_lockres_has_locks(res)) 2143 __dlm_dirty_lockres(dlm, res); 2144 spin_unlock(&res->spinlock); 2145 wake_up(&res->wq); 2146 } 2147 } 2148 } 2149 2150 static inline int dlm_lvb_needs_invalidation(struct dlm_lock *lock, int local) 2151 { 2152 if (local) { 2153 if (lock->ml.type != LKM_EXMODE && 2154 lock->ml.type != LKM_PRMODE) 2155 return 1; 2156 } else if (lock->ml.type == LKM_EXMODE) 2157 return 1; 2158 return 0; 2159 } 2160 2161 static void dlm_revalidate_lvb(struct dlm_ctxt *dlm, 2162 struct dlm_lock_resource *res, u8 dead_node) 2163 { 2164 struct list_head *queue; 2165 struct dlm_lock *lock; 2166 int blank_lvb = 0, local = 0; 2167 int i; 2168 u8 search_node; 2169 2170 assert_spin_locked(&dlm->spinlock); 2171 assert_spin_locked(&res->spinlock); 2172 2173 if (res->owner == dlm->node_num) 2174 /* if this node owned the lockres, and if the dead node 2175 * had an EX when he died, blank out the lvb */ 2176 search_node = dead_node; 2177 else { 2178 /* if this is a secondary lockres, and we had no EX or PR 2179 * locks granted, we can no longer trust the lvb */ 2180 search_node = dlm->node_num; 2181 local = 1; /* check local state for valid lvb */ 2182 } 2183 2184 for (i=DLM_GRANTED_LIST; i<=DLM_CONVERTING_LIST; i++) { 2185 queue = dlm_list_idx_to_ptr(res, i); 2186 list_for_each_entry(lock, queue, list) { 2187 if (lock->ml.node == search_node) { 2188 if (dlm_lvb_needs_invalidation(lock, local)) { 2189 /* zero the lksb lvb and lockres lvb */ 2190 blank_lvb = 1; 2191 memset(lock->lksb->lvb, 0, DLM_LVB_LEN); 2192 } 2193 } 2194 } 2195 } 2196 2197 if (blank_lvb) { 2198 mlog(0, "clearing %.*s lvb, dead node %u had EX\n", 2199 res->lockname.len, res->lockname.name, dead_node); 2200 memset(res->lvb, 0, DLM_LVB_LEN); 2201 } 2202 } 2203 2204 static void dlm_free_dead_locks(struct dlm_ctxt *dlm, 2205 struct dlm_lock_resource *res, u8 dead_node) 2206 { 2207 struct dlm_lock *lock, *next; 2208 unsigned int freed = 0; 2209 2210 /* this node is the lockres master: 2211 * 1) remove any stale locks for the dead node 2212 * 2) if the dead node had an EX when he died, blank out the lvb 2213 */ 2214 assert_spin_locked(&dlm->spinlock); 2215 assert_spin_locked(&res->spinlock); 2216 2217 /* We do two dlm_lock_put(). One for removing from list and the other is 2218 * to force the DLM_UNLOCK_FREE_LOCK action so as to free the locks */ 2219 2220 /* TODO: check pending_asts, pending_basts here */ 2221 list_for_each_entry_safe(lock, next, &res->granted, list) { 2222 if (lock->ml.node == dead_node) { 2223 list_del_init(&lock->list); 2224 dlm_lock_put(lock); 2225 /* Can't schedule DLM_UNLOCK_FREE_LOCK - do manually */ 2226 dlm_lock_put(lock); 2227 freed++; 2228 } 2229 } 2230 list_for_each_entry_safe(lock, next, &res->converting, list) { 2231 if (lock->ml.node == dead_node) { 2232 list_del_init(&lock->list); 2233 dlm_lock_put(lock); 2234 /* Can't schedule DLM_UNLOCK_FREE_LOCK - do manually */ 2235 dlm_lock_put(lock); 2236 freed++; 2237 } 2238 } 2239 list_for_each_entry_safe(lock, next, &res->blocked, list) { 2240 if (lock->ml.node == dead_node) { 2241 list_del_init(&lock->list); 2242 dlm_lock_put(lock); 2243 /* Can't schedule DLM_UNLOCK_FREE_LOCK - do manually */ 2244 dlm_lock_put(lock); 2245 freed++; 2246 } 2247 } 2248 2249 if (freed) { 2250 mlog(0, "%s:%.*s: freed %u locks for dead node %u, " 2251 "dropping ref from lockres\n", dlm->name, 2252 res->lockname.len, res->lockname.name, freed, dead_node); 2253 if(!test_bit(dead_node, res->refmap)) { 2254 mlog(ML_ERROR, "%s:%.*s: freed %u locks for dead node %u, " 2255 "but ref was not set\n", dlm->name, 2256 res->lockname.len, res->lockname.name, freed, dead_node); 2257 __dlm_print_one_lock_resource(res); 2258 } 2259 dlm_lockres_clear_refmap_bit(dlm, res, dead_node); 2260 } else if (test_bit(dead_node, res->refmap)) { 2261 mlog(0, "%s:%.*s: dead node %u had a ref, but had " 2262 "no locks and had not purged before dying\n", dlm->name, 2263 res->lockname.len, res->lockname.name, dead_node); 2264 dlm_lockres_clear_refmap_bit(dlm, res, dead_node); 2265 } 2266 2267 /* do not kick thread yet */ 2268 __dlm_dirty_lockres(dlm, res); 2269 } 2270 2271 /* if this node is the recovery master, and there are no 2272 * locks for a given lockres owned by this node that are in 2273 * either PR or EX mode, zero out the lvb before requesting. 2274 * 2275 */ 2276 2277 2278 static void dlm_do_local_recovery_cleanup(struct dlm_ctxt *dlm, u8 dead_node) 2279 { 2280 struct dlm_lock_resource *res; 2281 int i; 2282 struct hlist_head *bucket; 2283 struct dlm_lock *lock; 2284 2285 2286 /* purge any stale mles */ 2287 dlm_clean_master_list(dlm, dead_node); 2288 2289 /* 2290 * now clean up all lock resources. there are two rules: 2291 * 2292 * 1) if the dead node was the master, move the lockres 2293 * to the recovering list. set the RECOVERING flag. 2294 * this lockres needs to be cleaned up before it can 2295 * be used further. 2296 * 2297 * 2) if this node was the master, remove all locks from 2298 * each of the lockres queues that were owned by the 2299 * dead node. once recovery finishes, the dlm thread 2300 * can be kicked again to see if any ASTs or BASTs 2301 * need to be fired as a result. 2302 */ 2303 for (i = 0; i < DLM_HASH_BUCKETS; i++) { 2304 bucket = dlm_lockres_hash(dlm, i); 2305 hlist_for_each_entry(res, bucket, hash_node) { 2306 /* always prune any $RECOVERY entries for dead nodes, 2307 * otherwise hangs can occur during later recovery */ 2308 if (dlm_is_recovery_lock(res->lockname.name, 2309 res->lockname.len)) { 2310 spin_lock(&res->spinlock); 2311 list_for_each_entry(lock, &res->granted, list) { 2312 if (lock->ml.node == dead_node) { 2313 mlog(0, "AHA! there was " 2314 "a $RECOVERY lock for dead " 2315 "node %u (%s)!\n", 2316 dead_node, dlm->name); 2317 list_del_init(&lock->list); 2318 dlm_lock_put(lock); 2319 break; 2320 } 2321 } 2322 spin_unlock(&res->spinlock); 2323 continue; 2324 } 2325 spin_lock(&res->spinlock); 2326 /* zero the lvb if necessary */ 2327 dlm_revalidate_lvb(dlm, res, dead_node); 2328 if (res->owner == dead_node) { 2329 if (res->state & DLM_LOCK_RES_DROPPING_REF) { 2330 mlog(ML_NOTICE, "%s: res %.*s, Skip " 2331 "recovery as it is being freed\n", 2332 dlm->name, res->lockname.len, 2333 res->lockname.name); 2334 } else 2335 dlm_move_lockres_to_recovery_list(dlm, 2336 res); 2337 2338 } else if (res->owner == dlm->node_num) { 2339 dlm_free_dead_locks(dlm, res, dead_node); 2340 __dlm_lockres_calc_usage(dlm, res); 2341 } else if (res->owner == DLM_LOCK_RES_OWNER_UNKNOWN) { 2342 if (test_bit(dead_node, res->refmap)) { 2343 mlog(0, "%s:%.*s: dead node %u had a ref, but had " 2344 "no locks and had not purged before dying\n", 2345 dlm->name, res->lockname.len, 2346 res->lockname.name, dead_node); 2347 dlm_lockres_clear_refmap_bit(dlm, res, dead_node); 2348 } 2349 } 2350 spin_unlock(&res->spinlock); 2351 } 2352 } 2353 2354 } 2355 2356 static void __dlm_hb_node_down(struct dlm_ctxt *dlm, int idx) 2357 { 2358 assert_spin_locked(&dlm->spinlock); 2359 2360 if (dlm->reco.new_master == idx) { 2361 mlog(0, "%s: recovery master %d just died\n", 2362 dlm->name, idx); 2363 if (dlm->reco.state & DLM_RECO_STATE_FINALIZE) { 2364 /* finalize1 was reached, so it is safe to clear 2365 * the new_master and dead_node. that recovery 2366 * is complete. */ 2367 mlog(0, "%s: dead master %d had reached " 2368 "finalize1 state, clearing\n", dlm->name, idx); 2369 dlm->reco.state &= ~DLM_RECO_STATE_FINALIZE; 2370 __dlm_reset_recovery(dlm); 2371 } 2372 } 2373 2374 /* Clean up join state on node death. */ 2375 if (dlm->joining_node == idx) { 2376 mlog(0, "Clearing join state for node %u\n", idx); 2377 __dlm_set_joining_node(dlm, DLM_LOCK_RES_OWNER_UNKNOWN); 2378 } 2379 2380 /* check to see if the node is already considered dead */ 2381 if (!test_bit(idx, dlm->live_nodes_map)) { 2382 mlog(0, "for domain %s, node %d is already dead. " 2383 "another node likely did recovery already.\n", 2384 dlm->name, idx); 2385 return; 2386 } 2387 2388 /* check to see if we do not care about this node */ 2389 if (!test_bit(idx, dlm->domain_map)) { 2390 /* This also catches the case that we get a node down 2391 * but haven't joined the domain yet. */ 2392 mlog(0, "node %u already removed from domain!\n", idx); 2393 return; 2394 } 2395 2396 clear_bit(idx, dlm->live_nodes_map); 2397 2398 /* make sure local cleanup occurs before the heartbeat events */ 2399 if (!test_bit(idx, dlm->recovery_map)) 2400 dlm_do_local_recovery_cleanup(dlm, idx); 2401 2402 /* notify anything attached to the heartbeat events */ 2403 dlm_hb_event_notify_attached(dlm, idx, 0); 2404 2405 mlog(0, "node %u being removed from domain map!\n", idx); 2406 clear_bit(idx, dlm->domain_map); 2407 clear_bit(idx, dlm->exit_domain_map); 2408 /* wake up migration waiters if a node goes down. 2409 * perhaps later we can genericize this for other waiters. */ 2410 wake_up(&dlm->migration_wq); 2411 2412 if (test_bit(idx, dlm->recovery_map)) 2413 mlog(0, "domain %s, node %u already added " 2414 "to recovery map!\n", dlm->name, idx); 2415 else 2416 set_bit(idx, dlm->recovery_map); 2417 } 2418 2419 void dlm_hb_node_down_cb(struct o2nm_node *node, int idx, void *data) 2420 { 2421 struct dlm_ctxt *dlm = data; 2422 2423 if (!dlm_grab(dlm)) 2424 return; 2425 2426 /* 2427 * This will notify any dlm users that a node in our domain 2428 * went away without notifying us first. 2429 */ 2430 if (test_bit(idx, dlm->domain_map)) 2431 dlm_fire_domain_eviction_callbacks(dlm, idx); 2432 2433 spin_lock(&dlm->spinlock); 2434 __dlm_hb_node_down(dlm, idx); 2435 spin_unlock(&dlm->spinlock); 2436 2437 dlm_put(dlm); 2438 } 2439 2440 void dlm_hb_node_up_cb(struct o2nm_node *node, int idx, void *data) 2441 { 2442 struct dlm_ctxt *dlm = data; 2443 2444 if (!dlm_grab(dlm)) 2445 return; 2446 2447 spin_lock(&dlm->spinlock); 2448 set_bit(idx, dlm->live_nodes_map); 2449 /* do NOT notify mle attached to the heartbeat events. 2450 * new nodes are not interesting in mastery until joined. */ 2451 spin_unlock(&dlm->spinlock); 2452 2453 dlm_put(dlm); 2454 } 2455 2456 static void dlm_reco_ast(void *astdata) 2457 { 2458 struct dlm_ctxt *dlm = astdata; 2459 mlog(0, "ast for recovery lock fired!, this=%u, dlm=%s\n", 2460 dlm->node_num, dlm->name); 2461 } 2462 static void dlm_reco_bast(void *astdata, int blocked_type) 2463 { 2464 struct dlm_ctxt *dlm = astdata; 2465 mlog(0, "bast for recovery lock fired!, this=%u, dlm=%s\n", 2466 dlm->node_num, dlm->name); 2467 } 2468 static void dlm_reco_unlock_ast(void *astdata, enum dlm_status st) 2469 { 2470 mlog(0, "unlockast for recovery lock fired!\n"); 2471 } 2472 2473 /* 2474 * dlm_pick_recovery_master will continually attempt to use 2475 * dlmlock() on the special "$RECOVERY" lockres with the 2476 * LKM_NOQUEUE flag to get an EX. every thread that enters 2477 * this function on each node racing to become the recovery 2478 * master will not stop attempting this until either: 2479 * a) this node gets the EX (and becomes the recovery master), 2480 * or b) dlm->reco.new_master gets set to some nodenum 2481 * != O2NM_INVALID_NODE_NUM (another node will do the reco). 2482 * so each time a recovery master is needed, the entire cluster 2483 * will sync at this point. if the new master dies, that will 2484 * be detected in dlm_do_recovery */ 2485 static int dlm_pick_recovery_master(struct dlm_ctxt *dlm) 2486 { 2487 enum dlm_status ret; 2488 struct dlm_lockstatus lksb; 2489 int status = -EINVAL; 2490 2491 mlog(0, "starting recovery of %s at %lu, dead=%u, this=%u\n", 2492 dlm->name, jiffies, dlm->reco.dead_node, dlm->node_num); 2493 again: 2494 memset(&lksb, 0, sizeof(lksb)); 2495 2496 ret = dlmlock(dlm, LKM_EXMODE, &lksb, LKM_NOQUEUE|LKM_RECOVERY, 2497 DLM_RECOVERY_LOCK_NAME, DLM_RECOVERY_LOCK_NAME_LEN, 2498 dlm_reco_ast, dlm, dlm_reco_bast); 2499 2500 mlog(0, "%s: dlmlock($RECOVERY) returned %d, lksb=%d\n", 2501 dlm->name, ret, lksb.status); 2502 2503 if (ret == DLM_NORMAL) { 2504 mlog(0, "dlm=%s dlmlock says I got it (this=%u)\n", 2505 dlm->name, dlm->node_num); 2506 2507 /* got the EX lock. check to see if another node 2508 * just became the reco master */ 2509 if (dlm_reco_master_ready(dlm)) { 2510 mlog(0, "%s: got reco EX lock, but %u will " 2511 "do the recovery\n", dlm->name, 2512 dlm->reco.new_master); 2513 status = -EEXIST; 2514 } else { 2515 status = 0; 2516 2517 /* see if recovery was already finished elsewhere */ 2518 spin_lock(&dlm->spinlock); 2519 if (dlm->reco.dead_node == O2NM_INVALID_NODE_NUM) { 2520 status = -EINVAL; 2521 mlog(0, "%s: got reco EX lock, but " 2522 "node got recovered already\n", dlm->name); 2523 if (dlm->reco.new_master != O2NM_INVALID_NODE_NUM) { 2524 mlog(ML_ERROR, "%s: new master is %u " 2525 "but no dead node!\n", 2526 dlm->name, dlm->reco.new_master); 2527 BUG(); 2528 } 2529 } 2530 spin_unlock(&dlm->spinlock); 2531 } 2532 2533 /* if this node has actually become the recovery master, 2534 * set the master and send the messages to begin recovery */ 2535 if (!status) { 2536 mlog(0, "%s: dead=%u, this=%u, sending " 2537 "begin_reco now\n", dlm->name, 2538 dlm->reco.dead_node, dlm->node_num); 2539 status = dlm_send_begin_reco_message(dlm, 2540 dlm->reco.dead_node); 2541 /* this always succeeds */ 2542 BUG_ON(status); 2543 2544 /* set the new_master to this node */ 2545 spin_lock(&dlm->spinlock); 2546 dlm_set_reco_master(dlm, dlm->node_num); 2547 spin_unlock(&dlm->spinlock); 2548 } 2549 2550 /* recovery lock is a special case. ast will not get fired, 2551 * so just go ahead and unlock it. */ 2552 ret = dlmunlock(dlm, &lksb, 0, dlm_reco_unlock_ast, dlm); 2553 if (ret == DLM_DENIED) { 2554 mlog(0, "got DLM_DENIED, trying LKM_CANCEL\n"); 2555 ret = dlmunlock(dlm, &lksb, LKM_CANCEL, dlm_reco_unlock_ast, dlm); 2556 } 2557 if (ret != DLM_NORMAL) { 2558 /* this would really suck. this could only happen 2559 * if there was a network error during the unlock 2560 * because of node death. this means the unlock 2561 * is actually "done" and the lock structure is 2562 * even freed. we can continue, but only 2563 * because this specific lock name is special. */ 2564 mlog(ML_ERROR, "dlmunlock returned %d\n", ret); 2565 } 2566 } else if (ret == DLM_NOTQUEUED) { 2567 mlog(0, "dlm=%s dlmlock says another node got it (this=%u)\n", 2568 dlm->name, dlm->node_num); 2569 /* another node is master. wait on 2570 * reco.new_master != O2NM_INVALID_NODE_NUM 2571 * for at most one second */ 2572 wait_event_timeout(dlm->dlm_reco_thread_wq, 2573 dlm_reco_master_ready(dlm), 2574 msecs_to_jiffies(1000)); 2575 if (!dlm_reco_master_ready(dlm)) { 2576 mlog(0, "%s: reco master taking awhile\n", 2577 dlm->name); 2578 goto again; 2579 } 2580 /* another node has informed this one that it is reco master */ 2581 mlog(0, "%s: reco master %u is ready to recover %u\n", 2582 dlm->name, dlm->reco.new_master, dlm->reco.dead_node); 2583 status = -EEXIST; 2584 } else if (ret == DLM_RECOVERING) { 2585 mlog(0, "dlm=%s dlmlock says master node died (this=%u)\n", 2586 dlm->name, dlm->node_num); 2587 goto again; 2588 } else { 2589 struct dlm_lock_resource *res; 2590 2591 /* dlmlock returned something other than NOTQUEUED or NORMAL */ 2592 mlog(ML_ERROR, "%s: got %s from dlmlock($RECOVERY), " 2593 "lksb.status=%s\n", dlm->name, dlm_errname(ret), 2594 dlm_errname(lksb.status)); 2595 res = dlm_lookup_lockres(dlm, DLM_RECOVERY_LOCK_NAME, 2596 DLM_RECOVERY_LOCK_NAME_LEN); 2597 if (res) { 2598 dlm_print_one_lock_resource(res); 2599 dlm_lockres_put(res); 2600 } else { 2601 mlog(ML_ERROR, "recovery lock not found\n"); 2602 } 2603 BUG(); 2604 } 2605 2606 return status; 2607 } 2608 2609 static int dlm_send_begin_reco_message(struct dlm_ctxt *dlm, u8 dead_node) 2610 { 2611 struct dlm_begin_reco br; 2612 int ret = 0; 2613 struct dlm_node_iter iter; 2614 int nodenum; 2615 int status; 2616 2617 mlog(0, "%s: dead node is %u\n", dlm->name, dead_node); 2618 2619 spin_lock(&dlm->spinlock); 2620 dlm_node_iter_init(dlm->domain_map, &iter); 2621 spin_unlock(&dlm->spinlock); 2622 2623 clear_bit(dead_node, iter.node_map); 2624 2625 memset(&br, 0, sizeof(br)); 2626 br.node_idx = dlm->node_num; 2627 br.dead_node = dead_node; 2628 2629 while ((nodenum = dlm_node_iter_next(&iter)) >= 0) { 2630 ret = 0; 2631 if (nodenum == dead_node) { 2632 mlog(0, "not sending begin reco to dead node " 2633 "%u\n", dead_node); 2634 continue; 2635 } 2636 if (nodenum == dlm->node_num) { 2637 mlog(0, "not sending begin reco to self\n"); 2638 continue; 2639 } 2640 retry: 2641 ret = -EINVAL; 2642 mlog(0, "attempting to send begin reco msg to %d\n", 2643 nodenum); 2644 ret = o2net_send_message(DLM_BEGIN_RECO_MSG, dlm->key, 2645 &br, sizeof(br), nodenum, &status); 2646 /* negative status is handled ok by caller here */ 2647 if (ret >= 0) 2648 ret = status; 2649 if (dlm_is_host_down(ret)) { 2650 /* node is down. not involved in recovery 2651 * so just keep going */ 2652 mlog(ML_NOTICE, "%s: node %u was down when sending " 2653 "begin reco msg (%d)\n", dlm->name, nodenum, ret); 2654 ret = 0; 2655 } 2656 2657 /* 2658 * Prior to commit aad1b15310b9bcd59fa81ab8f2b1513b59553ea8, 2659 * dlm_begin_reco_handler() returned EAGAIN and not -EAGAIN. 2660 * We are handling both for compatibility reasons. 2661 */ 2662 if (ret == -EAGAIN || ret == EAGAIN) { 2663 mlog(0, "%s: trying to start recovery of node " 2664 "%u, but node %u is waiting for last recovery " 2665 "to complete, backoff for a bit\n", dlm->name, 2666 dead_node, nodenum); 2667 msleep(100); 2668 goto retry; 2669 } 2670 if (ret < 0) { 2671 struct dlm_lock_resource *res; 2672 2673 /* this is now a serious problem, possibly ENOMEM 2674 * in the network stack. must retry */ 2675 mlog_errno(ret); 2676 mlog(ML_ERROR, "begin reco of dlm %s to node %u " 2677 "returned %d\n", dlm->name, nodenum, ret); 2678 res = dlm_lookup_lockres(dlm, DLM_RECOVERY_LOCK_NAME, 2679 DLM_RECOVERY_LOCK_NAME_LEN); 2680 if (res) { 2681 dlm_print_one_lock_resource(res); 2682 dlm_lockres_put(res); 2683 } else { 2684 mlog(ML_ERROR, "recovery lock not found\n"); 2685 } 2686 /* sleep for a bit in hopes that we can avoid 2687 * another ENOMEM */ 2688 msleep(100); 2689 goto retry; 2690 } 2691 } 2692 2693 return ret; 2694 } 2695 2696 int dlm_begin_reco_handler(struct o2net_msg *msg, u32 len, void *data, 2697 void **ret_data) 2698 { 2699 struct dlm_ctxt *dlm = data; 2700 struct dlm_begin_reco *br = (struct dlm_begin_reco *)msg->buf; 2701 2702 /* ok to return 0, domain has gone away */ 2703 if (!dlm_grab(dlm)) 2704 return 0; 2705 2706 spin_lock(&dlm->spinlock); 2707 if (dlm->reco.state & DLM_RECO_STATE_FINALIZE) { 2708 mlog(0, "%s: node %u wants to recover node %u (%u:%u) " 2709 "but this node is in finalize state, waiting on finalize2\n", 2710 dlm->name, br->node_idx, br->dead_node, 2711 dlm->reco.dead_node, dlm->reco.new_master); 2712 spin_unlock(&dlm->spinlock); 2713 dlm_put(dlm); 2714 return -EAGAIN; 2715 } 2716 spin_unlock(&dlm->spinlock); 2717 2718 mlog(0, "%s: node %u wants to recover node %u (%u:%u)\n", 2719 dlm->name, br->node_idx, br->dead_node, 2720 dlm->reco.dead_node, dlm->reco.new_master); 2721 2722 dlm_fire_domain_eviction_callbacks(dlm, br->dead_node); 2723 2724 spin_lock(&dlm->spinlock); 2725 if (dlm->reco.new_master != O2NM_INVALID_NODE_NUM) { 2726 if (test_bit(dlm->reco.new_master, dlm->recovery_map)) { 2727 mlog(0, "%s: new_master %u died, changing " 2728 "to %u\n", dlm->name, dlm->reco.new_master, 2729 br->node_idx); 2730 } else { 2731 mlog(0, "%s: new_master %u NOT DEAD, changing " 2732 "to %u\n", dlm->name, dlm->reco.new_master, 2733 br->node_idx); 2734 /* may not have seen the new master as dead yet */ 2735 } 2736 } 2737 if (dlm->reco.dead_node != O2NM_INVALID_NODE_NUM) { 2738 mlog(ML_NOTICE, "%s: dead_node previously set to %u, " 2739 "node %u changing it to %u\n", dlm->name, 2740 dlm->reco.dead_node, br->node_idx, br->dead_node); 2741 } 2742 dlm_set_reco_master(dlm, br->node_idx); 2743 dlm_set_reco_dead_node(dlm, br->dead_node); 2744 if (!test_bit(br->dead_node, dlm->recovery_map)) { 2745 mlog(0, "recovery master %u sees %u as dead, but this " 2746 "node has not yet. marking %u as dead\n", 2747 br->node_idx, br->dead_node, br->dead_node); 2748 if (!test_bit(br->dead_node, dlm->domain_map) || 2749 !test_bit(br->dead_node, dlm->live_nodes_map)) 2750 mlog(0, "%u not in domain/live_nodes map " 2751 "so setting it in reco map manually\n", 2752 br->dead_node); 2753 /* force the recovery cleanup in __dlm_hb_node_down 2754 * both of these will be cleared in a moment */ 2755 set_bit(br->dead_node, dlm->domain_map); 2756 set_bit(br->dead_node, dlm->live_nodes_map); 2757 __dlm_hb_node_down(dlm, br->dead_node); 2758 } 2759 spin_unlock(&dlm->spinlock); 2760 2761 dlm_kick_recovery_thread(dlm); 2762 2763 mlog(0, "%s: recovery started by node %u, for %u (%u:%u)\n", 2764 dlm->name, br->node_idx, br->dead_node, 2765 dlm->reco.dead_node, dlm->reco.new_master); 2766 2767 dlm_put(dlm); 2768 return 0; 2769 } 2770 2771 #define DLM_FINALIZE_STAGE2 0x01 2772 static int dlm_send_finalize_reco_message(struct dlm_ctxt *dlm) 2773 { 2774 int ret = 0; 2775 struct dlm_finalize_reco fr; 2776 struct dlm_node_iter iter; 2777 int nodenum; 2778 int status; 2779 int stage = 1; 2780 2781 mlog(0, "finishing recovery for node %s:%u, " 2782 "stage %d\n", dlm->name, dlm->reco.dead_node, stage); 2783 2784 spin_lock(&dlm->spinlock); 2785 dlm_node_iter_init(dlm->domain_map, &iter); 2786 spin_unlock(&dlm->spinlock); 2787 2788 stage2: 2789 memset(&fr, 0, sizeof(fr)); 2790 fr.node_idx = dlm->node_num; 2791 fr.dead_node = dlm->reco.dead_node; 2792 if (stage == 2) 2793 fr.flags |= DLM_FINALIZE_STAGE2; 2794 2795 while ((nodenum = dlm_node_iter_next(&iter)) >= 0) { 2796 if (nodenum == dlm->node_num) 2797 continue; 2798 ret = o2net_send_message(DLM_FINALIZE_RECO_MSG, dlm->key, 2799 &fr, sizeof(fr), nodenum, &status); 2800 if (ret >= 0) 2801 ret = status; 2802 if (ret < 0) { 2803 mlog(ML_ERROR, "Error %d when sending message %u (key " 2804 "0x%x) to node %u\n", ret, DLM_FINALIZE_RECO_MSG, 2805 dlm->key, nodenum); 2806 if (dlm_is_host_down(ret)) { 2807 /* this has no effect on this recovery 2808 * session, so set the status to zero to 2809 * finish out the last recovery */ 2810 mlog(ML_ERROR, "node %u went down after this " 2811 "node finished recovery.\n", nodenum); 2812 ret = 0; 2813 continue; 2814 } 2815 break; 2816 } 2817 } 2818 if (stage == 1) { 2819 /* reset the node_iter back to the top and send finalize2 */ 2820 iter.curnode = -1; 2821 stage = 2; 2822 goto stage2; 2823 } 2824 2825 return ret; 2826 } 2827 2828 int dlm_finalize_reco_handler(struct o2net_msg *msg, u32 len, void *data, 2829 void **ret_data) 2830 { 2831 struct dlm_ctxt *dlm = data; 2832 struct dlm_finalize_reco *fr = (struct dlm_finalize_reco *)msg->buf; 2833 int stage = 1; 2834 2835 /* ok to return 0, domain has gone away */ 2836 if (!dlm_grab(dlm)) 2837 return 0; 2838 2839 if (fr->flags & DLM_FINALIZE_STAGE2) 2840 stage = 2; 2841 2842 mlog(0, "%s: node %u finalizing recovery stage%d of " 2843 "node %u (%u:%u)\n", dlm->name, fr->node_idx, stage, 2844 fr->dead_node, dlm->reco.dead_node, dlm->reco.new_master); 2845 2846 spin_lock(&dlm->spinlock); 2847 2848 if (dlm->reco.new_master != fr->node_idx) { 2849 mlog(ML_ERROR, "node %u sent recovery finalize msg, but node " 2850 "%u is supposed to be the new master, dead=%u\n", 2851 fr->node_idx, dlm->reco.new_master, fr->dead_node); 2852 BUG(); 2853 } 2854 if (dlm->reco.dead_node != fr->dead_node) { 2855 mlog(ML_ERROR, "node %u sent recovery finalize msg for dead " 2856 "node %u, but node %u is supposed to be dead\n", 2857 fr->node_idx, fr->dead_node, dlm->reco.dead_node); 2858 BUG(); 2859 } 2860 2861 switch (stage) { 2862 case 1: 2863 dlm_finish_local_lockres_recovery(dlm, fr->dead_node, fr->node_idx); 2864 if (dlm->reco.state & DLM_RECO_STATE_FINALIZE) { 2865 mlog(ML_ERROR, "%s: received finalize1 from " 2866 "new master %u for dead node %u, but " 2867 "this node has already received it!\n", 2868 dlm->name, fr->node_idx, fr->dead_node); 2869 dlm_print_reco_node_status(dlm); 2870 BUG(); 2871 } 2872 dlm->reco.state |= DLM_RECO_STATE_FINALIZE; 2873 spin_unlock(&dlm->spinlock); 2874 break; 2875 case 2: 2876 if (!(dlm->reco.state & DLM_RECO_STATE_FINALIZE)) { 2877 mlog(ML_ERROR, "%s: received finalize2 from " 2878 "new master %u for dead node %u, but " 2879 "this node did not have finalize1!\n", 2880 dlm->name, fr->node_idx, fr->dead_node); 2881 dlm_print_reco_node_status(dlm); 2882 BUG(); 2883 } 2884 dlm->reco.state &= ~DLM_RECO_STATE_FINALIZE; 2885 spin_unlock(&dlm->spinlock); 2886 dlm_reset_recovery(dlm); 2887 dlm_kick_recovery_thread(dlm); 2888 break; 2889 default: 2890 BUG(); 2891 } 2892 2893 mlog(0, "%s: recovery done, reco master was %u, dead now %u, master now %u\n", 2894 dlm->name, fr->node_idx, dlm->reco.dead_node, dlm->reco.new_master); 2895 2896 dlm_put(dlm); 2897 return 0; 2898 } 2899