1 /* -*- mode: c; c-basic-offset: 8; -*- 2 * vim: noexpandtab sw=8 ts=8 sts=0: 3 * 4 * dlmrecovery.c 5 * 6 * recovery stuff 7 * 8 * Copyright (C) 2004 Oracle. All rights reserved. 9 * 10 * This program is free software; you can redistribute it and/or 11 * modify it under the terms of the GNU General Public 12 * License as published by the Free Software Foundation; either 13 * version 2 of the License, or (at your option) any later version. 14 * 15 * This program is distributed in the hope that it will be useful, 16 * but WITHOUT ANY WARRANTY; without even the implied warranty of 17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 18 * General Public License for more details. 19 * 20 * You should have received a copy of the GNU General Public 21 * License along with this program; if not, write to the 22 * Free Software Foundation, Inc., 59 Temple Place - Suite 330, 23 * Boston, MA 021110-1307, USA. 24 * 25 */ 26 27 28 #include <linux/module.h> 29 #include <linux/fs.h> 30 #include <linux/types.h> 31 #include <linux/slab.h> 32 #include <linux/highmem.h> 33 #include <linux/init.h> 34 #include <linux/sysctl.h> 35 #include <linux/random.h> 36 #include <linux/blkdev.h> 37 #include <linux/socket.h> 38 #include <linux/inet.h> 39 #include <linux/timer.h> 40 #include <linux/kthread.h> 41 #include <linux/delay.h> 42 43 44 #include "cluster/heartbeat.h" 45 #include "cluster/nodemanager.h" 46 #include "cluster/tcp.h" 47 48 #include "dlmapi.h" 49 #include "dlmcommon.h" 50 #include "dlmdomain.h" 51 52 #define MLOG_MASK_PREFIX (ML_DLM|ML_DLM_RECOVERY) 53 #include "cluster/masklog.h" 54 55 static void dlm_do_local_recovery_cleanup(struct dlm_ctxt *dlm, u8 dead_node); 56 57 static int dlm_recovery_thread(void *data); 58 void dlm_complete_recovery_thread(struct dlm_ctxt *dlm); 59 int dlm_launch_recovery_thread(struct dlm_ctxt *dlm); 60 void dlm_kick_recovery_thread(struct dlm_ctxt *dlm); 61 static int dlm_do_recovery(struct dlm_ctxt *dlm); 62 63 static int dlm_pick_recovery_master(struct dlm_ctxt *dlm); 64 static int dlm_remaster_locks(struct dlm_ctxt *dlm, u8 dead_node); 65 static int dlm_init_recovery_area(struct dlm_ctxt *dlm, u8 dead_node); 66 static int dlm_request_all_locks(struct dlm_ctxt *dlm, 67 u8 request_from, u8 dead_node); 68 static void dlm_destroy_recovery_area(struct dlm_ctxt *dlm, u8 dead_node); 69 70 static inline int dlm_num_locks_in_lockres(struct dlm_lock_resource *res); 71 static void dlm_init_migratable_lockres(struct dlm_migratable_lockres *mres, 72 const char *lockname, int namelen, 73 int total_locks, u64 cookie, 74 u8 flags, u8 master); 75 static int dlm_send_mig_lockres_msg(struct dlm_ctxt *dlm, 76 struct dlm_migratable_lockres *mres, 77 u8 send_to, 78 struct dlm_lock_resource *res, 79 int total_locks); 80 static int dlm_process_recovery_data(struct dlm_ctxt *dlm, 81 struct dlm_lock_resource *res, 82 struct dlm_migratable_lockres *mres); 83 static int dlm_send_finalize_reco_message(struct dlm_ctxt *dlm); 84 static int dlm_send_all_done_msg(struct dlm_ctxt *dlm, 85 u8 dead_node, u8 send_to); 86 static int dlm_send_begin_reco_message(struct dlm_ctxt *dlm, u8 dead_node); 87 static void dlm_move_reco_locks_to_list(struct dlm_ctxt *dlm, 88 struct list_head *list, u8 dead_node); 89 static void dlm_finish_local_lockres_recovery(struct dlm_ctxt *dlm, 90 u8 dead_node, u8 new_master); 91 static void dlm_reco_ast(void *astdata); 92 static void dlm_reco_bast(void *astdata, int blocked_type); 93 static void dlm_reco_unlock_ast(void *astdata, enum dlm_status st); 94 static void dlm_request_all_locks_worker(struct dlm_work_item *item, 95 void *data); 96 static void dlm_mig_lockres_worker(struct dlm_work_item *item, void *data); 97 static int dlm_lockres_master_requery(struct dlm_ctxt *dlm, 98 struct dlm_lock_resource *res, 99 u8 *real_master); 100 101 static u64 dlm_get_next_mig_cookie(void); 102 103 static DEFINE_SPINLOCK(dlm_reco_state_lock); 104 static DEFINE_SPINLOCK(dlm_mig_cookie_lock); 105 static u64 dlm_mig_cookie = 1; 106 107 static u64 dlm_get_next_mig_cookie(void) 108 { 109 u64 c; 110 spin_lock(&dlm_mig_cookie_lock); 111 c = dlm_mig_cookie; 112 if (dlm_mig_cookie == (~0ULL)) 113 dlm_mig_cookie = 1; 114 else 115 dlm_mig_cookie++; 116 spin_unlock(&dlm_mig_cookie_lock); 117 return c; 118 } 119 120 static inline void dlm_set_reco_dead_node(struct dlm_ctxt *dlm, 121 u8 dead_node) 122 { 123 assert_spin_locked(&dlm->spinlock); 124 if (dlm->reco.dead_node != dead_node) 125 mlog(0, "%s: changing dead_node from %u to %u\n", 126 dlm->name, dlm->reco.dead_node, dead_node); 127 dlm->reco.dead_node = dead_node; 128 } 129 130 static inline void dlm_set_reco_master(struct dlm_ctxt *dlm, 131 u8 master) 132 { 133 assert_spin_locked(&dlm->spinlock); 134 mlog(0, "%s: changing new_master from %u to %u\n", 135 dlm->name, dlm->reco.new_master, master); 136 dlm->reco.new_master = master; 137 } 138 139 static inline void __dlm_reset_recovery(struct dlm_ctxt *dlm) 140 { 141 assert_spin_locked(&dlm->spinlock); 142 clear_bit(dlm->reco.dead_node, dlm->recovery_map); 143 dlm_set_reco_dead_node(dlm, O2NM_INVALID_NODE_NUM); 144 dlm_set_reco_master(dlm, O2NM_INVALID_NODE_NUM); 145 } 146 147 static inline void dlm_reset_recovery(struct dlm_ctxt *dlm) 148 { 149 spin_lock(&dlm->spinlock); 150 __dlm_reset_recovery(dlm); 151 spin_unlock(&dlm->spinlock); 152 } 153 154 /* Worker function used during recovery. */ 155 void dlm_dispatch_work(struct work_struct *work) 156 { 157 struct dlm_ctxt *dlm = 158 container_of(work, struct dlm_ctxt, dispatched_work); 159 LIST_HEAD(tmp_list); 160 struct dlm_work_item *item, *next; 161 dlm_workfunc_t *workfunc; 162 int tot=0; 163 164 spin_lock(&dlm->work_lock); 165 list_splice_init(&dlm->work_list, &tmp_list); 166 spin_unlock(&dlm->work_lock); 167 168 list_for_each_entry(item, &tmp_list, list) { 169 tot++; 170 } 171 mlog(0, "%s: work thread has %d work items\n", dlm->name, tot); 172 173 list_for_each_entry_safe(item, next, &tmp_list, list) { 174 workfunc = item->func; 175 list_del_init(&item->list); 176 177 /* already have ref on dlm to avoid having 178 * it disappear. just double-check. */ 179 BUG_ON(item->dlm != dlm); 180 181 /* this is allowed to sleep and 182 * call network stuff */ 183 workfunc(item, item->data); 184 185 dlm_put(dlm); 186 kfree(item); 187 } 188 } 189 190 /* 191 * RECOVERY THREAD 192 */ 193 194 void dlm_kick_recovery_thread(struct dlm_ctxt *dlm) 195 { 196 /* wake the recovery thread 197 * this will wake the reco thread in one of three places 198 * 1) sleeping with no recovery happening 199 * 2) sleeping with recovery mastered elsewhere 200 * 3) recovery mastered here, waiting on reco data */ 201 202 wake_up(&dlm->dlm_reco_thread_wq); 203 } 204 205 /* Launch the recovery thread */ 206 int dlm_launch_recovery_thread(struct dlm_ctxt *dlm) 207 { 208 mlog(0, "starting dlm recovery thread...\n"); 209 210 dlm->dlm_reco_thread_task = kthread_run(dlm_recovery_thread, dlm, 211 "dlm_reco_thread"); 212 if (IS_ERR(dlm->dlm_reco_thread_task)) { 213 mlog_errno(PTR_ERR(dlm->dlm_reco_thread_task)); 214 dlm->dlm_reco_thread_task = NULL; 215 return -EINVAL; 216 } 217 218 return 0; 219 } 220 221 void dlm_complete_recovery_thread(struct dlm_ctxt *dlm) 222 { 223 if (dlm->dlm_reco_thread_task) { 224 mlog(0, "waiting for dlm recovery thread to exit\n"); 225 kthread_stop(dlm->dlm_reco_thread_task); 226 dlm->dlm_reco_thread_task = NULL; 227 } 228 } 229 230 231 232 /* 233 * this is lame, but here's how recovery works... 234 * 1) all recovery threads cluster wide will work on recovering 235 * ONE node at a time 236 * 2) negotiate who will take over all the locks for the dead node. 237 * thats right... ALL the locks. 238 * 3) once a new master is chosen, everyone scans all locks 239 * and moves aside those mastered by the dead guy 240 * 4) each of these locks should be locked until recovery is done 241 * 5) the new master collects up all of secondary lock queue info 242 * one lock at a time, forcing each node to communicate back 243 * before continuing 244 * 6) each secondary lock queue responds with the full known lock info 245 * 7) once the new master has run all its locks, it sends a ALLDONE! 246 * message to everyone 247 * 8) upon receiving this message, the secondary queue node unlocks 248 * and responds to the ALLDONE 249 * 9) once the new master gets responses from everyone, he unlocks 250 * everything and recovery for this dead node is done 251 *10) go back to 2) while there are still dead nodes 252 * 253 */ 254 255 static void dlm_print_reco_node_status(struct dlm_ctxt *dlm) 256 { 257 struct dlm_reco_node_data *ndata; 258 struct dlm_lock_resource *res; 259 260 mlog(ML_NOTICE, "%s(%d): recovery info, state=%s, dead=%u, master=%u\n", 261 dlm->name, task_pid_nr(dlm->dlm_reco_thread_task), 262 dlm->reco.state & DLM_RECO_STATE_ACTIVE ? "ACTIVE" : "inactive", 263 dlm->reco.dead_node, dlm->reco.new_master); 264 265 list_for_each_entry(ndata, &dlm->reco.node_data, list) { 266 char *st = "unknown"; 267 switch (ndata->state) { 268 case DLM_RECO_NODE_DATA_INIT: 269 st = "init"; 270 break; 271 case DLM_RECO_NODE_DATA_REQUESTING: 272 st = "requesting"; 273 break; 274 case DLM_RECO_NODE_DATA_DEAD: 275 st = "dead"; 276 break; 277 case DLM_RECO_NODE_DATA_RECEIVING: 278 st = "receiving"; 279 break; 280 case DLM_RECO_NODE_DATA_REQUESTED: 281 st = "requested"; 282 break; 283 case DLM_RECO_NODE_DATA_DONE: 284 st = "done"; 285 break; 286 case DLM_RECO_NODE_DATA_FINALIZE_SENT: 287 st = "finalize-sent"; 288 break; 289 default: 290 st = "bad"; 291 break; 292 } 293 mlog(ML_NOTICE, "%s: reco state, node %u, state=%s\n", 294 dlm->name, ndata->node_num, st); 295 } 296 list_for_each_entry(res, &dlm->reco.resources, recovering) { 297 mlog(ML_NOTICE, "%s: lockres %.*s on recovering list\n", 298 dlm->name, res->lockname.len, res->lockname.name); 299 } 300 } 301 302 #define DLM_RECO_THREAD_TIMEOUT_MS (5 * 1000) 303 304 static int dlm_recovery_thread(void *data) 305 { 306 int status; 307 struct dlm_ctxt *dlm = data; 308 unsigned long timeout = msecs_to_jiffies(DLM_RECO_THREAD_TIMEOUT_MS); 309 310 mlog(0, "dlm thread running for %s...\n", dlm->name); 311 312 while (!kthread_should_stop()) { 313 if (dlm_joined(dlm)) { 314 status = dlm_do_recovery(dlm); 315 if (status == -EAGAIN) { 316 /* do not sleep, recheck immediately. */ 317 continue; 318 } 319 if (status < 0) 320 mlog_errno(status); 321 } 322 323 wait_event_interruptible_timeout(dlm->dlm_reco_thread_wq, 324 kthread_should_stop(), 325 timeout); 326 } 327 328 mlog(0, "quitting DLM recovery thread\n"); 329 return 0; 330 } 331 332 /* returns true when the recovery master has contacted us */ 333 static int dlm_reco_master_ready(struct dlm_ctxt *dlm) 334 { 335 int ready; 336 spin_lock(&dlm->spinlock); 337 ready = (dlm->reco.new_master != O2NM_INVALID_NODE_NUM); 338 spin_unlock(&dlm->spinlock); 339 return ready; 340 } 341 342 /* returns true if node is no longer in the domain 343 * could be dead or just not joined */ 344 int dlm_is_node_dead(struct dlm_ctxt *dlm, u8 node) 345 { 346 int dead; 347 spin_lock(&dlm->spinlock); 348 dead = !test_bit(node, dlm->domain_map); 349 spin_unlock(&dlm->spinlock); 350 return dead; 351 } 352 353 /* returns true if node is no longer in the domain 354 * could be dead or just not joined */ 355 static int dlm_is_node_recovered(struct dlm_ctxt *dlm, u8 node) 356 { 357 int recovered; 358 spin_lock(&dlm->spinlock); 359 recovered = !test_bit(node, dlm->recovery_map); 360 spin_unlock(&dlm->spinlock); 361 return recovered; 362 } 363 364 365 int dlm_wait_for_node_death(struct dlm_ctxt *dlm, u8 node, int timeout) 366 { 367 if (timeout) { 368 mlog(ML_NOTICE, "%s: waiting %dms for notification of " 369 "death of node %u\n", dlm->name, timeout, node); 370 wait_event_timeout(dlm->dlm_reco_thread_wq, 371 dlm_is_node_dead(dlm, node), 372 msecs_to_jiffies(timeout)); 373 } else { 374 mlog(ML_NOTICE, "%s: waiting indefinitely for notification " 375 "of death of node %u\n", dlm->name, node); 376 wait_event(dlm->dlm_reco_thread_wq, 377 dlm_is_node_dead(dlm, node)); 378 } 379 /* for now, return 0 */ 380 return 0; 381 } 382 383 int dlm_wait_for_node_recovery(struct dlm_ctxt *dlm, u8 node, int timeout) 384 { 385 if (timeout) { 386 mlog(0, "%s: waiting %dms for notification of " 387 "recovery of node %u\n", dlm->name, timeout, node); 388 wait_event_timeout(dlm->dlm_reco_thread_wq, 389 dlm_is_node_recovered(dlm, node), 390 msecs_to_jiffies(timeout)); 391 } else { 392 mlog(0, "%s: waiting indefinitely for notification " 393 "of recovery of node %u\n", dlm->name, node); 394 wait_event(dlm->dlm_reco_thread_wq, 395 dlm_is_node_recovered(dlm, node)); 396 } 397 /* for now, return 0 */ 398 return 0; 399 } 400 401 /* callers of the top-level api calls (dlmlock/dlmunlock) should 402 * block on the dlm->reco.event when recovery is in progress. 403 * the dlm recovery thread will set this state when it begins 404 * recovering a dead node (as the new master or not) and clear 405 * the state and wake as soon as all affected lock resources have 406 * been marked with the RECOVERY flag */ 407 static int dlm_in_recovery(struct dlm_ctxt *dlm) 408 { 409 int in_recovery; 410 spin_lock(&dlm->spinlock); 411 in_recovery = !!(dlm->reco.state & DLM_RECO_STATE_ACTIVE); 412 spin_unlock(&dlm->spinlock); 413 return in_recovery; 414 } 415 416 417 void dlm_wait_for_recovery(struct dlm_ctxt *dlm) 418 { 419 if (dlm_in_recovery(dlm)) { 420 mlog(0, "%s: reco thread %d in recovery: " 421 "state=%d, master=%u, dead=%u\n", 422 dlm->name, task_pid_nr(dlm->dlm_reco_thread_task), 423 dlm->reco.state, dlm->reco.new_master, 424 dlm->reco.dead_node); 425 } 426 wait_event(dlm->reco.event, !dlm_in_recovery(dlm)); 427 } 428 429 static void dlm_begin_recovery(struct dlm_ctxt *dlm) 430 { 431 spin_lock(&dlm->spinlock); 432 BUG_ON(dlm->reco.state & DLM_RECO_STATE_ACTIVE); 433 dlm->reco.state |= DLM_RECO_STATE_ACTIVE; 434 spin_unlock(&dlm->spinlock); 435 } 436 437 static void dlm_end_recovery(struct dlm_ctxt *dlm) 438 { 439 spin_lock(&dlm->spinlock); 440 BUG_ON(!(dlm->reco.state & DLM_RECO_STATE_ACTIVE)); 441 dlm->reco.state &= ~DLM_RECO_STATE_ACTIVE; 442 spin_unlock(&dlm->spinlock); 443 wake_up(&dlm->reco.event); 444 } 445 446 static int dlm_do_recovery(struct dlm_ctxt *dlm) 447 { 448 int status = 0; 449 int ret; 450 451 spin_lock(&dlm->spinlock); 452 453 /* check to see if the new master has died */ 454 if (dlm->reco.new_master != O2NM_INVALID_NODE_NUM && 455 test_bit(dlm->reco.new_master, dlm->recovery_map)) { 456 mlog(0, "new master %u died while recovering %u!\n", 457 dlm->reco.new_master, dlm->reco.dead_node); 458 /* unset the new_master, leave dead_node */ 459 dlm_set_reco_master(dlm, O2NM_INVALID_NODE_NUM); 460 } 461 462 /* select a target to recover */ 463 if (dlm->reco.dead_node == O2NM_INVALID_NODE_NUM) { 464 int bit; 465 466 bit = find_next_bit (dlm->recovery_map, O2NM_MAX_NODES+1, 0); 467 if (bit >= O2NM_MAX_NODES || bit < 0) 468 dlm_set_reco_dead_node(dlm, O2NM_INVALID_NODE_NUM); 469 else 470 dlm_set_reco_dead_node(dlm, bit); 471 } else if (!test_bit(dlm->reco.dead_node, dlm->recovery_map)) { 472 /* BUG? */ 473 mlog(ML_ERROR, "dead_node %u no longer in recovery map!\n", 474 dlm->reco.dead_node); 475 dlm_set_reco_dead_node(dlm, O2NM_INVALID_NODE_NUM); 476 } 477 478 if (dlm->reco.dead_node == O2NM_INVALID_NODE_NUM) { 479 // mlog(0, "nothing to recover! sleeping now!\n"); 480 spin_unlock(&dlm->spinlock); 481 /* return to main thread loop and sleep. */ 482 return 0; 483 } 484 mlog(0, "%s(%d):recovery thread found node %u in the recovery map!\n", 485 dlm->name, task_pid_nr(dlm->dlm_reco_thread_task), 486 dlm->reco.dead_node); 487 spin_unlock(&dlm->spinlock); 488 489 /* take write barrier */ 490 /* (stops the list reshuffling thread, proxy ast handling) */ 491 dlm_begin_recovery(dlm); 492 493 if (dlm->reco.new_master == dlm->node_num) 494 goto master_here; 495 496 if (dlm->reco.new_master == O2NM_INVALID_NODE_NUM) { 497 /* choose a new master, returns 0 if this node 498 * is the master, -EEXIST if it's another node. 499 * this does not return until a new master is chosen 500 * or recovery completes entirely. */ 501 ret = dlm_pick_recovery_master(dlm); 502 if (!ret) { 503 /* already notified everyone. go. */ 504 goto master_here; 505 } 506 mlog(0, "another node will master this recovery session.\n"); 507 } 508 mlog(0, "dlm=%s (%d), new_master=%u, this node=%u, dead_node=%u\n", 509 dlm->name, task_pid_nr(dlm->dlm_reco_thread_task), dlm->reco.new_master, 510 dlm->node_num, dlm->reco.dead_node); 511 512 /* it is safe to start everything back up here 513 * because all of the dead node's lock resources 514 * have been marked as in-recovery */ 515 dlm_end_recovery(dlm); 516 517 /* sleep out in main dlm_recovery_thread loop. */ 518 return 0; 519 520 master_here: 521 mlog(ML_NOTICE, "(%d) Node %u is the Recovery Master for the Dead Node " 522 "%u for Domain %s\n", task_pid_nr(dlm->dlm_reco_thread_task), 523 dlm->node_num, dlm->reco.dead_node, dlm->name); 524 525 status = dlm_remaster_locks(dlm, dlm->reco.dead_node); 526 if (status < 0) { 527 /* we should never hit this anymore */ 528 mlog(ML_ERROR, "error %d remastering locks for node %u, " 529 "retrying.\n", status, dlm->reco.dead_node); 530 /* yield a bit to allow any final network messages 531 * to get handled on remaining nodes */ 532 msleep(100); 533 } else { 534 /* success! see if any other nodes need recovery */ 535 mlog(0, "DONE mastering recovery of %s:%u here(this=%u)!\n", 536 dlm->name, dlm->reco.dead_node, dlm->node_num); 537 dlm_reset_recovery(dlm); 538 } 539 dlm_end_recovery(dlm); 540 541 /* continue and look for another dead node */ 542 return -EAGAIN; 543 } 544 545 static int dlm_remaster_locks(struct dlm_ctxt *dlm, u8 dead_node) 546 { 547 int status = 0; 548 struct dlm_reco_node_data *ndata; 549 int all_nodes_done; 550 int destroy = 0; 551 int pass = 0; 552 553 do { 554 /* we have become recovery master. there is no escaping 555 * this, so just keep trying until we get it. */ 556 status = dlm_init_recovery_area(dlm, dead_node); 557 if (status < 0) { 558 mlog(ML_ERROR, "%s: failed to alloc recovery area, " 559 "retrying\n", dlm->name); 560 msleep(1000); 561 } 562 } while (status != 0); 563 564 /* safe to access the node data list without a lock, since this 565 * process is the only one to change the list */ 566 list_for_each_entry(ndata, &dlm->reco.node_data, list) { 567 BUG_ON(ndata->state != DLM_RECO_NODE_DATA_INIT); 568 ndata->state = DLM_RECO_NODE_DATA_REQUESTING; 569 570 mlog(0, "requesting lock info from node %u\n", 571 ndata->node_num); 572 573 if (ndata->node_num == dlm->node_num) { 574 ndata->state = DLM_RECO_NODE_DATA_DONE; 575 continue; 576 } 577 578 do { 579 status = dlm_request_all_locks(dlm, ndata->node_num, 580 dead_node); 581 if (status < 0) { 582 mlog_errno(status); 583 if (dlm_is_host_down(status)) { 584 /* node died, ignore it for recovery */ 585 status = 0; 586 ndata->state = DLM_RECO_NODE_DATA_DEAD; 587 /* wait for the domain map to catch up 588 * with the network state. */ 589 wait_event_timeout(dlm->dlm_reco_thread_wq, 590 dlm_is_node_dead(dlm, 591 ndata->node_num), 592 msecs_to_jiffies(1000)); 593 mlog(0, "waited 1 sec for %u, " 594 "dead? %s\n", ndata->node_num, 595 dlm_is_node_dead(dlm, ndata->node_num) ? 596 "yes" : "no"); 597 } else { 598 /* -ENOMEM on the other node */ 599 mlog(0, "%s: node %u returned " 600 "%d during recovery, retrying " 601 "after a short wait\n", 602 dlm->name, ndata->node_num, 603 status); 604 msleep(100); 605 } 606 } 607 } while (status != 0); 608 609 spin_lock(&dlm_reco_state_lock); 610 switch (ndata->state) { 611 case DLM_RECO_NODE_DATA_INIT: 612 case DLM_RECO_NODE_DATA_FINALIZE_SENT: 613 case DLM_RECO_NODE_DATA_REQUESTED: 614 BUG(); 615 break; 616 case DLM_RECO_NODE_DATA_DEAD: 617 mlog(0, "node %u died after requesting " 618 "recovery info for node %u\n", 619 ndata->node_num, dead_node); 620 /* fine. don't need this node's info. 621 * continue without it. */ 622 break; 623 case DLM_RECO_NODE_DATA_REQUESTING: 624 ndata->state = DLM_RECO_NODE_DATA_REQUESTED; 625 mlog(0, "now receiving recovery data from " 626 "node %u for dead node %u\n", 627 ndata->node_num, dead_node); 628 break; 629 case DLM_RECO_NODE_DATA_RECEIVING: 630 mlog(0, "already receiving recovery data from " 631 "node %u for dead node %u\n", 632 ndata->node_num, dead_node); 633 break; 634 case DLM_RECO_NODE_DATA_DONE: 635 mlog(0, "already DONE receiving recovery data " 636 "from node %u for dead node %u\n", 637 ndata->node_num, dead_node); 638 break; 639 } 640 spin_unlock(&dlm_reco_state_lock); 641 } 642 643 mlog(0, "done requesting all lock info\n"); 644 645 /* nodes should be sending reco data now 646 * just need to wait */ 647 648 while (1) { 649 /* check all the nodes now to see if we are 650 * done, or if anyone died */ 651 all_nodes_done = 1; 652 spin_lock(&dlm_reco_state_lock); 653 list_for_each_entry(ndata, &dlm->reco.node_data, list) { 654 mlog(0, "checking recovery state of node %u\n", 655 ndata->node_num); 656 switch (ndata->state) { 657 case DLM_RECO_NODE_DATA_INIT: 658 case DLM_RECO_NODE_DATA_REQUESTING: 659 mlog(ML_ERROR, "bad ndata state for " 660 "node %u: state=%d\n", 661 ndata->node_num, ndata->state); 662 BUG(); 663 break; 664 case DLM_RECO_NODE_DATA_DEAD: 665 mlog(0, "node %u died after " 666 "requesting recovery info for " 667 "node %u\n", ndata->node_num, 668 dead_node); 669 break; 670 case DLM_RECO_NODE_DATA_RECEIVING: 671 case DLM_RECO_NODE_DATA_REQUESTED: 672 mlog(0, "%s: node %u still in state %s\n", 673 dlm->name, ndata->node_num, 674 ndata->state==DLM_RECO_NODE_DATA_RECEIVING ? 675 "receiving" : "requested"); 676 all_nodes_done = 0; 677 break; 678 case DLM_RECO_NODE_DATA_DONE: 679 mlog(0, "%s: node %u state is done\n", 680 dlm->name, ndata->node_num); 681 break; 682 case DLM_RECO_NODE_DATA_FINALIZE_SENT: 683 mlog(0, "%s: node %u state is finalize\n", 684 dlm->name, ndata->node_num); 685 break; 686 } 687 } 688 spin_unlock(&dlm_reco_state_lock); 689 690 mlog(0, "pass #%d, all_nodes_done?: %s\n", ++pass, 691 all_nodes_done?"yes":"no"); 692 if (all_nodes_done) { 693 int ret; 694 695 /* all nodes are now in DLM_RECO_NODE_DATA_DONE state 696 * just send a finalize message to everyone and 697 * clean up */ 698 mlog(0, "all nodes are done! send finalize\n"); 699 ret = dlm_send_finalize_reco_message(dlm); 700 if (ret < 0) 701 mlog_errno(ret); 702 703 spin_lock(&dlm->spinlock); 704 dlm_finish_local_lockres_recovery(dlm, dead_node, 705 dlm->node_num); 706 spin_unlock(&dlm->spinlock); 707 mlog(0, "should be done with recovery!\n"); 708 709 mlog(0, "finishing recovery of %s at %lu, " 710 "dead=%u, this=%u, new=%u\n", dlm->name, 711 jiffies, dlm->reco.dead_node, 712 dlm->node_num, dlm->reco.new_master); 713 destroy = 1; 714 status = 0; 715 /* rescan everything marked dirty along the way */ 716 dlm_kick_thread(dlm, NULL); 717 break; 718 } 719 /* wait to be signalled, with periodic timeout 720 * to check for node death */ 721 wait_event_interruptible_timeout(dlm->dlm_reco_thread_wq, 722 kthread_should_stop(), 723 msecs_to_jiffies(DLM_RECO_THREAD_TIMEOUT_MS)); 724 725 } 726 727 if (destroy) 728 dlm_destroy_recovery_area(dlm, dead_node); 729 730 mlog_exit(status); 731 return status; 732 } 733 734 static int dlm_init_recovery_area(struct dlm_ctxt *dlm, u8 dead_node) 735 { 736 int num=0; 737 struct dlm_reco_node_data *ndata; 738 739 spin_lock(&dlm->spinlock); 740 memcpy(dlm->reco.node_map, dlm->domain_map, sizeof(dlm->domain_map)); 741 /* nodes can only be removed (by dying) after dropping 742 * this lock, and death will be trapped later, so this should do */ 743 spin_unlock(&dlm->spinlock); 744 745 while (1) { 746 num = find_next_bit (dlm->reco.node_map, O2NM_MAX_NODES, num); 747 if (num >= O2NM_MAX_NODES) { 748 break; 749 } 750 BUG_ON(num == dead_node); 751 752 ndata = kzalloc(sizeof(*ndata), GFP_NOFS); 753 if (!ndata) { 754 dlm_destroy_recovery_area(dlm, dead_node); 755 return -ENOMEM; 756 } 757 ndata->node_num = num; 758 ndata->state = DLM_RECO_NODE_DATA_INIT; 759 spin_lock(&dlm_reco_state_lock); 760 list_add_tail(&ndata->list, &dlm->reco.node_data); 761 spin_unlock(&dlm_reco_state_lock); 762 num++; 763 } 764 765 return 0; 766 } 767 768 static void dlm_destroy_recovery_area(struct dlm_ctxt *dlm, u8 dead_node) 769 { 770 struct dlm_reco_node_data *ndata, *next; 771 LIST_HEAD(tmplist); 772 773 spin_lock(&dlm_reco_state_lock); 774 list_splice_init(&dlm->reco.node_data, &tmplist); 775 spin_unlock(&dlm_reco_state_lock); 776 777 list_for_each_entry_safe(ndata, next, &tmplist, list) { 778 list_del_init(&ndata->list); 779 kfree(ndata); 780 } 781 } 782 783 static int dlm_request_all_locks(struct dlm_ctxt *dlm, u8 request_from, 784 u8 dead_node) 785 { 786 struct dlm_lock_request lr; 787 enum dlm_status ret; 788 789 mlog(0, "\n"); 790 791 792 mlog(0, "dlm_request_all_locks: dead node is %u, sending request " 793 "to %u\n", dead_node, request_from); 794 795 memset(&lr, 0, sizeof(lr)); 796 lr.node_idx = dlm->node_num; 797 lr.dead_node = dead_node; 798 799 // send message 800 ret = DLM_NOLOCKMGR; 801 ret = o2net_send_message(DLM_LOCK_REQUEST_MSG, dlm->key, 802 &lr, sizeof(lr), request_from, NULL); 803 804 /* negative status is handled by caller */ 805 if (ret < 0) 806 mlog_errno(ret); 807 808 // return from here, then 809 // sleep until all received or error 810 return ret; 811 812 } 813 814 int dlm_request_all_locks_handler(struct o2net_msg *msg, u32 len, void *data, 815 void **ret_data) 816 { 817 struct dlm_ctxt *dlm = data; 818 struct dlm_lock_request *lr = (struct dlm_lock_request *)msg->buf; 819 char *buf = NULL; 820 struct dlm_work_item *item = NULL; 821 822 if (!dlm_grab(dlm)) 823 return -EINVAL; 824 825 if (lr->dead_node != dlm->reco.dead_node) { 826 mlog(ML_ERROR, "%s: node %u sent dead_node=%u, but local " 827 "dead_node is %u\n", dlm->name, lr->node_idx, 828 lr->dead_node, dlm->reco.dead_node); 829 dlm_print_reco_node_status(dlm); 830 /* this is a hack */ 831 dlm_put(dlm); 832 return -ENOMEM; 833 } 834 BUG_ON(lr->dead_node != dlm->reco.dead_node); 835 836 item = kzalloc(sizeof(*item), GFP_NOFS); 837 if (!item) { 838 dlm_put(dlm); 839 return -ENOMEM; 840 } 841 842 /* this will get freed by dlm_request_all_locks_worker */ 843 buf = (char *) __get_free_page(GFP_NOFS); 844 if (!buf) { 845 kfree(item); 846 dlm_put(dlm); 847 return -ENOMEM; 848 } 849 850 /* queue up work for dlm_request_all_locks_worker */ 851 dlm_grab(dlm); /* get an extra ref for the work item */ 852 dlm_init_work_item(dlm, item, dlm_request_all_locks_worker, buf); 853 item->u.ral.reco_master = lr->node_idx; 854 item->u.ral.dead_node = lr->dead_node; 855 spin_lock(&dlm->work_lock); 856 list_add_tail(&item->list, &dlm->work_list); 857 spin_unlock(&dlm->work_lock); 858 queue_work(dlm->dlm_worker, &dlm->dispatched_work); 859 860 dlm_put(dlm); 861 return 0; 862 } 863 864 static void dlm_request_all_locks_worker(struct dlm_work_item *item, void *data) 865 { 866 struct dlm_migratable_lockres *mres; 867 struct dlm_lock_resource *res; 868 struct dlm_ctxt *dlm; 869 LIST_HEAD(resources); 870 int ret; 871 u8 dead_node, reco_master; 872 int skip_all_done = 0; 873 874 dlm = item->dlm; 875 dead_node = item->u.ral.dead_node; 876 reco_master = item->u.ral.reco_master; 877 mres = (struct dlm_migratable_lockres *)data; 878 879 mlog(0, "%s: recovery worker started, dead=%u, master=%u\n", 880 dlm->name, dead_node, reco_master); 881 882 if (dead_node != dlm->reco.dead_node || 883 reco_master != dlm->reco.new_master) { 884 /* worker could have been created before the recovery master 885 * died. if so, do not continue, but do not error. */ 886 if (dlm->reco.new_master == O2NM_INVALID_NODE_NUM) { 887 mlog(ML_NOTICE, "%s: will not send recovery state, " 888 "recovery master %u died, thread=(dead=%u,mas=%u)" 889 " current=(dead=%u,mas=%u)\n", dlm->name, 890 reco_master, dead_node, reco_master, 891 dlm->reco.dead_node, dlm->reco.new_master); 892 } else { 893 mlog(ML_NOTICE, "%s: reco state invalid: reco(dead=%u, " 894 "master=%u), request(dead=%u, master=%u)\n", 895 dlm->name, dlm->reco.dead_node, 896 dlm->reco.new_master, dead_node, reco_master); 897 } 898 goto leave; 899 } 900 901 /* lock resources should have already been moved to the 902 * dlm->reco.resources list. now move items from that list 903 * to a temp list if the dead owner matches. note that the 904 * whole cluster recovers only one node at a time, so we 905 * can safely move UNKNOWN lock resources for each recovery 906 * session. */ 907 dlm_move_reco_locks_to_list(dlm, &resources, dead_node); 908 909 /* now we can begin blasting lockreses without the dlm lock */ 910 911 /* any errors returned will be due to the new_master dying, 912 * the dlm_reco_thread should detect this */ 913 list_for_each_entry(res, &resources, recovering) { 914 ret = dlm_send_one_lockres(dlm, res, mres, reco_master, 915 DLM_MRES_RECOVERY); 916 if (ret < 0) { 917 mlog(ML_ERROR, "%s: node %u went down while sending " 918 "recovery state for dead node %u, ret=%d\n", dlm->name, 919 reco_master, dead_node, ret); 920 skip_all_done = 1; 921 break; 922 } 923 } 924 925 /* move the resources back to the list */ 926 spin_lock(&dlm->spinlock); 927 list_splice_init(&resources, &dlm->reco.resources); 928 spin_unlock(&dlm->spinlock); 929 930 if (!skip_all_done) { 931 ret = dlm_send_all_done_msg(dlm, dead_node, reco_master); 932 if (ret < 0) { 933 mlog(ML_ERROR, "%s: node %u went down while sending " 934 "recovery all-done for dead node %u, ret=%d\n", 935 dlm->name, reco_master, dead_node, ret); 936 } 937 } 938 leave: 939 free_page((unsigned long)data); 940 } 941 942 943 static int dlm_send_all_done_msg(struct dlm_ctxt *dlm, u8 dead_node, u8 send_to) 944 { 945 int ret, tmpret; 946 struct dlm_reco_data_done done_msg; 947 948 memset(&done_msg, 0, sizeof(done_msg)); 949 done_msg.node_idx = dlm->node_num; 950 done_msg.dead_node = dead_node; 951 mlog(0, "sending DATA DONE message to %u, " 952 "my node=%u, dead node=%u\n", send_to, done_msg.node_idx, 953 done_msg.dead_node); 954 955 ret = o2net_send_message(DLM_RECO_DATA_DONE_MSG, dlm->key, &done_msg, 956 sizeof(done_msg), send_to, &tmpret); 957 if (ret < 0) { 958 if (!dlm_is_host_down(ret)) { 959 mlog_errno(ret); 960 mlog(ML_ERROR, "%s: unknown error sending data-done " 961 "to %u\n", dlm->name, send_to); 962 BUG(); 963 } 964 } else 965 ret = tmpret; 966 return ret; 967 } 968 969 970 int dlm_reco_data_done_handler(struct o2net_msg *msg, u32 len, void *data, 971 void **ret_data) 972 { 973 struct dlm_ctxt *dlm = data; 974 struct dlm_reco_data_done *done = (struct dlm_reco_data_done *)msg->buf; 975 struct dlm_reco_node_data *ndata = NULL; 976 int ret = -EINVAL; 977 978 if (!dlm_grab(dlm)) 979 return -EINVAL; 980 981 mlog(0, "got DATA DONE: dead_node=%u, reco.dead_node=%u, " 982 "node_idx=%u, this node=%u\n", done->dead_node, 983 dlm->reco.dead_node, done->node_idx, dlm->node_num); 984 985 mlog_bug_on_msg((done->dead_node != dlm->reco.dead_node), 986 "Got DATA DONE: dead_node=%u, reco.dead_node=%u, " 987 "node_idx=%u, this node=%u\n", done->dead_node, 988 dlm->reco.dead_node, done->node_idx, dlm->node_num); 989 990 spin_lock(&dlm_reco_state_lock); 991 list_for_each_entry(ndata, &dlm->reco.node_data, list) { 992 if (ndata->node_num != done->node_idx) 993 continue; 994 995 switch (ndata->state) { 996 /* should have moved beyond INIT but not to FINALIZE yet */ 997 case DLM_RECO_NODE_DATA_INIT: 998 case DLM_RECO_NODE_DATA_DEAD: 999 case DLM_RECO_NODE_DATA_FINALIZE_SENT: 1000 mlog(ML_ERROR, "bad ndata state for node %u:" 1001 " state=%d\n", ndata->node_num, 1002 ndata->state); 1003 BUG(); 1004 break; 1005 /* these states are possible at this point, anywhere along 1006 * the line of recovery */ 1007 case DLM_RECO_NODE_DATA_DONE: 1008 case DLM_RECO_NODE_DATA_RECEIVING: 1009 case DLM_RECO_NODE_DATA_REQUESTED: 1010 case DLM_RECO_NODE_DATA_REQUESTING: 1011 mlog(0, "node %u is DONE sending " 1012 "recovery data!\n", 1013 ndata->node_num); 1014 1015 ndata->state = DLM_RECO_NODE_DATA_DONE; 1016 ret = 0; 1017 break; 1018 } 1019 } 1020 spin_unlock(&dlm_reco_state_lock); 1021 1022 /* wake the recovery thread, some node is done */ 1023 if (!ret) 1024 dlm_kick_recovery_thread(dlm); 1025 1026 if (ret < 0) 1027 mlog(ML_ERROR, "failed to find recovery node data for node " 1028 "%u\n", done->node_idx); 1029 dlm_put(dlm); 1030 1031 mlog(0, "leaving reco data done handler, ret=%d\n", ret); 1032 return ret; 1033 } 1034 1035 static void dlm_move_reco_locks_to_list(struct dlm_ctxt *dlm, 1036 struct list_head *list, 1037 u8 dead_node) 1038 { 1039 struct dlm_lock_resource *res, *next; 1040 struct dlm_lock *lock; 1041 1042 spin_lock(&dlm->spinlock); 1043 list_for_each_entry_safe(res, next, &dlm->reco.resources, recovering) { 1044 /* always prune any $RECOVERY entries for dead nodes, 1045 * otherwise hangs can occur during later recovery */ 1046 if (dlm_is_recovery_lock(res->lockname.name, 1047 res->lockname.len)) { 1048 spin_lock(&res->spinlock); 1049 list_for_each_entry(lock, &res->granted, list) { 1050 if (lock->ml.node == dead_node) { 1051 mlog(0, "AHA! there was " 1052 "a $RECOVERY lock for dead " 1053 "node %u (%s)!\n", 1054 dead_node, dlm->name); 1055 list_del_init(&lock->list); 1056 dlm_lock_put(lock); 1057 break; 1058 } 1059 } 1060 spin_unlock(&res->spinlock); 1061 continue; 1062 } 1063 1064 if (res->owner == dead_node) { 1065 mlog(0, "found lockres owned by dead node while " 1066 "doing recovery for node %u. sending it.\n", 1067 dead_node); 1068 list_move_tail(&res->recovering, list); 1069 } else if (res->owner == DLM_LOCK_RES_OWNER_UNKNOWN) { 1070 mlog(0, "found UNKNOWN owner while doing recovery " 1071 "for node %u. sending it.\n", dead_node); 1072 list_move_tail(&res->recovering, list); 1073 } 1074 } 1075 spin_unlock(&dlm->spinlock); 1076 } 1077 1078 static inline int dlm_num_locks_in_lockres(struct dlm_lock_resource *res) 1079 { 1080 int total_locks = 0; 1081 struct list_head *iter, *queue = &res->granted; 1082 int i; 1083 1084 for (i=0; i<3; i++) { 1085 list_for_each(iter, queue) 1086 total_locks++; 1087 queue++; 1088 } 1089 return total_locks; 1090 } 1091 1092 1093 static int dlm_send_mig_lockres_msg(struct dlm_ctxt *dlm, 1094 struct dlm_migratable_lockres *mres, 1095 u8 send_to, 1096 struct dlm_lock_resource *res, 1097 int total_locks) 1098 { 1099 u64 mig_cookie = be64_to_cpu(mres->mig_cookie); 1100 int mres_total_locks = be32_to_cpu(mres->total_locks); 1101 int sz, ret = 0, status = 0; 1102 u8 orig_flags = mres->flags, 1103 orig_master = mres->master; 1104 1105 BUG_ON(mres->num_locks > DLM_MAX_MIGRATABLE_LOCKS); 1106 if (!mres->num_locks) 1107 return 0; 1108 1109 sz = sizeof(struct dlm_migratable_lockres) + 1110 (mres->num_locks * sizeof(struct dlm_migratable_lock)); 1111 1112 /* add an all-done flag if we reached the last lock */ 1113 orig_flags = mres->flags; 1114 BUG_ON(total_locks > mres_total_locks); 1115 if (total_locks == mres_total_locks) 1116 mres->flags |= DLM_MRES_ALL_DONE; 1117 1118 mlog(0, "%s:%.*s: sending mig lockres (%s) to %u\n", 1119 dlm->name, res->lockname.len, res->lockname.name, 1120 orig_flags & DLM_MRES_MIGRATION ? "migration" : "recovery", 1121 send_to); 1122 1123 /* send it */ 1124 ret = o2net_send_message(DLM_MIG_LOCKRES_MSG, dlm->key, mres, 1125 sz, send_to, &status); 1126 if (ret < 0) { 1127 /* XXX: negative status is not handled. 1128 * this will end up killing this node. */ 1129 mlog_errno(ret); 1130 } else { 1131 /* might get an -ENOMEM back here */ 1132 ret = status; 1133 if (ret < 0) { 1134 mlog_errno(ret); 1135 1136 if (ret == -EFAULT) { 1137 mlog(ML_ERROR, "node %u told me to kill " 1138 "myself!\n", send_to); 1139 BUG(); 1140 } 1141 } 1142 } 1143 1144 /* zero and reinit the message buffer */ 1145 dlm_init_migratable_lockres(mres, res->lockname.name, 1146 res->lockname.len, mres_total_locks, 1147 mig_cookie, orig_flags, orig_master); 1148 return ret; 1149 } 1150 1151 static void dlm_init_migratable_lockres(struct dlm_migratable_lockres *mres, 1152 const char *lockname, int namelen, 1153 int total_locks, u64 cookie, 1154 u8 flags, u8 master) 1155 { 1156 /* mres here is one full page */ 1157 clear_page(mres); 1158 mres->lockname_len = namelen; 1159 memcpy(mres->lockname, lockname, namelen); 1160 mres->num_locks = 0; 1161 mres->total_locks = cpu_to_be32(total_locks); 1162 mres->mig_cookie = cpu_to_be64(cookie); 1163 mres->flags = flags; 1164 mres->master = master; 1165 } 1166 1167 static void dlm_prepare_lvb_for_migration(struct dlm_lock *lock, 1168 struct dlm_migratable_lockres *mres, 1169 int queue) 1170 { 1171 if (!lock->lksb) 1172 return; 1173 1174 /* Ignore lvb in all locks in the blocked list */ 1175 if (queue == DLM_BLOCKED_LIST) 1176 return; 1177 1178 /* Only consider lvbs in locks with granted EX or PR lock levels */ 1179 if (lock->ml.type != LKM_EXMODE && lock->ml.type != LKM_PRMODE) 1180 return; 1181 1182 if (dlm_lvb_is_empty(mres->lvb)) { 1183 memcpy(mres->lvb, lock->lksb->lvb, DLM_LVB_LEN); 1184 return; 1185 } 1186 1187 /* Ensure the lvb copied for migration matches in other valid locks */ 1188 if (!memcmp(mres->lvb, lock->lksb->lvb, DLM_LVB_LEN)) 1189 return; 1190 1191 mlog(ML_ERROR, "Mismatched lvb in lock cookie=%u:%llu, name=%.*s, " 1192 "node=%u\n", 1193 dlm_get_lock_cookie_node(be64_to_cpu(lock->ml.cookie)), 1194 dlm_get_lock_cookie_seq(be64_to_cpu(lock->ml.cookie)), 1195 lock->lockres->lockname.len, lock->lockres->lockname.name, 1196 lock->ml.node); 1197 dlm_print_one_lock_resource(lock->lockres); 1198 BUG(); 1199 } 1200 1201 /* returns 1 if this lock fills the network structure, 1202 * 0 otherwise */ 1203 static int dlm_add_lock_to_array(struct dlm_lock *lock, 1204 struct dlm_migratable_lockres *mres, int queue) 1205 { 1206 struct dlm_migratable_lock *ml; 1207 int lock_num = mres->num_locks; 1208 1209 ml = &(mres->ml[lock_num]); 1210 ml->cookie = lock->ml.cookie; 1211 ml->type = lock->ml.type; 1212 ml->convert_type = lock->ml.convert_type; 1213 ml->highest_blocked = lock->ml.highest_blocked; 1214 ml->list = queue; 1215 if (lock->lksb) { 1216 ml->flags = lock->lksb->flags; 1217 dlm_prepare_lvb_for_migration(lock, mres, queue); 1218 } 1219 ml->node = lock->ml.node; 1220 mres->num_locks++; 1221 /* we reached the max, send this network message */ 1222 if (mres->num_locks == DLM_MAX_MIGRATABLE_LOCKS) 1223 return 1; 1224 return 0; 1225 } 1226 1227 static void dlm_add_dummy_lock(struct dlm_ctxt *dlm, 1228 struct dlm_migratable_lockres *mres) 1229 { 1230 struct dlm_lock dummy; 1231 memset(&dummy, 0, sizeof(dummy)); 1232 dummy.ml.cookie = 0; 1233 dummy.ml.type = LKM_IVMODE; 1234 dummy.ml.convert_type = LKM_IVMODE; 1235 dummy.ml.highest_blocked = LKM_IVMODE; 1236 dummy.lksb = NULL; 1237 dummy.ml.node = dlm->node_num; 1238 dlm_add_lock_to_array(&dummy, mres, DLM_BLOCKED_LIST); 1239 } 1240 1241 static inline int dlm_is_dummy_lock(struct dlm_ctxt *dlm, 1242 struct dlm_migratable_lock *ml, 1243 u8 *nodenum) 1244 { 1245 if (unlikely(ml->cookie == 0 && 1246 ml->type == LKM_IVMODE && 1247 ml->convert_type == LKM_IVMODE && 1248 ml->highest_blocked == LKM_IVMODE && 1249 ml->list == DLM_BLOCKED_LIST)) { 1250 *nodenum = ml->node; 1251 return 1; 1252 } 1253 return 0; 1254 } 1255 1256 int dlm_send_one_lockres(struct dlm_ctxt *dlm, struct dlm_lock_resource *res, 1257 struct dlm_migratable_lockres *mres, 1258 u8 send_to, u8 flags) 1259 { 1260 struct list_head *queue; 1261 int total_locks, i; 1262 u64 mig_cookie = 0; 1263 struct dlm_lock *lock; 1264 int ret = 0; 1265 1266 BUG_ON(!(flags & (DLM_MRES_RECOVERY|DLM_MRES_MIGRATION))); 1267 1268 mlog(0, "sending to %u\n", send_to); 1269 1270 total_locks = dlm_num_locks_in_lockres(res); 1271 if (total_locks > DLM_MAX_MIGRATABLE_LOCKS) { 1272 /* rare, but possible */ 1273 mlog(0, "argh. lockres has %d locks. this will " 1274 "require more than one network packet to " 1275 "migrate\n", total_locks); 1276 mig_cookie = dlm_get_next_mig_cookie(); 1277 } 1278 1279 dlm_init_migratable_lockres(mres, res->lockname.name, 1280 res->lockname.len, total_locks, 1281 mig_cookie, flags, res->owner); 1282 1283 total_locks = 0; 1284 for (i=DLM_GRANTED_LIST; i<=DLM_BLOCKED_LIST; i++) { 1285 queue = dlm_list_idx_to_ptr(res, i); 1286 list_for_each_entry(lock, queue, list) { 1287 /* add another lock. */ 1288 total_locks++; 1289 if (!dlm_add_lock_to_array(lock, mres, i)) 1290 continue; 1291 1292 /* this filled the lock message, 1293 * we must send it immediately. */ 1294 ret = dlm_send_mig_lockres_msg(dlm, mres, send_to, 1295 res, total_locks); 1296 if (ret < 0) 1297 goto error; 1298 } 1299 } 1300 if (total_locks == 0) { 1301 /* send a dummy lock to indicate a mastery reference only */ 1302 mlog(0, "%s:%.*s: sending dummy lock to %u, %s\n", 1303 dlm->name, res->lockname.len, res->lockname.name, 1304 send_to, flags & DLM_MRES_RECOVERY ? "recovery" : 1305 "migration"); 1306 dlm_add_dummy_lock(dlm, mres); 1307 } 1308 /* flush any remaining locks */ 1309 ret = dlm_send_mig_lockres_msg(dlm, mres, send_to, res, total_locks); 1310 if (ret < 0) 1311 goto error; 1312 return ret; 1313 1314 error: 1315 mlog(ML_ERROR, "%s: dlm_send_mig_lockres_msg returned %d\n", 1316 dlm->name, ret); 1317 if (!dlm_is_host_down(ret)) 1318 BUG(); 1319 mlog(0, "%s: node %u went down while sending %s " 1320 "lockres %.*s\n", dlm->name, send_to, 1321 flags & DLM_MRES_RECOVERY ? "recovery" : "migration", 1322 res->lockname.len, res->lockname.name); 1323 return ret; 1324 } 1325 1326 1327 1328 /* 1329 * this message will contain no more than one page worth of 1330 * recovery data, and it will work on only one lockres. 1331 * there may be many locks in this page, and we may need to wait 1332 * for additional packets to complete all the locks (rare, but 1333 * possible). 1334 */ 1335 /* 1336 * NOTE: the allocation error cases here are scary 1337 * we really cannot afford to fail an alloc in recovery 1338 * do we spin? returning an error only delays the problem really 1339 */ 1340 1341 int dlm_mig_lockres_handler(struct o2net_msg *msg, u32 len, void *data, 1342 void **ret_data) 1343 { 1344 struct dlm_ctxt *dlm = data; 1345 struct dlm_migratable_lockres *mres = 1346 (struct dlm_migratable_lockres *)msg->buf; 1347 int ret = 0; 1348 u8 real_master; 1349 u8 extra_refs = 0; 1350 char *buf = NULL; 1351 struct dlm_work_item *item = NULL; 1352 struct dlm_lock_resource *res = NULL; 1353 1354 if (!dlm_grab(dlm)) 1355 return -EINVAL; 1356 1357 BUG_ON(!(mres->flags & (DLM_MRES_RECOVERY|DLM_MRES_MIGRATION))); 1358 1359 real_master = mres->master; 1360 if (real_master == DLM_LOCK_RES_OWNER_UNKNOWN) { 1361 /* cannot migrate a lockres with no master */ 1362 BUG_ON(!(mres->flags & DLM_MRES_RECOVERY)); 1363 } 1364 1365 mlog(0, "%s message received from node %u\n", 1366 (mres->flags & DLM_MRES_RECOVERY) ? 1367 "recovery" : "migration", mres->master); 1368 if (mres->flags & DLM_MRES_ALL_DONE) 1369 mlog(0, "all done flag. all lockres data received!\n"); 1370 1371 ret = -ENOMEM; 1372 buf = kmalloc(be16_to_cpu(msg->data_len), GFP_NOFS); 1373 item = kzalloc(sizeof(*item), GFP_NOFS); 1374 if (!buf || !item) 1375 goto leave; 1376 1377 /* lookup the lock to see if we have a secondary queue for this 1378 * already... just add the locks in and this will have its owner 1379 * and RECOVERY flag changed when it completes. */ 1380 res = dlm_lookup_lockres(dlm, mres->lockname, mres->lockname_len); 1381 if (res) { 1382 /* this will get a ref on res */ 1383 /* mark it as recovering/migrating and hash it */ 1384 spin_lock(&res->spinlock); 1385 if (mres->flags & DLM_MRES_RECOVERY) { 1386 res->state |= DLM_LOCK_RES_RECOVERING; 1387 } else { 1388 if (res->state & DLM_LOCK_RES_MIGRATING) { 1389 /* this is at least the second 1390 * lockres message */ 1391 mlog(0, "lock %.*s is already migrating\n", 1392 mres->lockname_len, 1393 mres->lockname); 1394 } else if (res->state & DLM_LOCK_RES_RECOVERING) { 1395 /* caller should BUG */ 1396 mlog(ML_ERROR, "node is attempting to migrate " 1397 "lock %.*s, but marked as recovering!\n", 1398 mres->lockname_len, mres->lockname); 1399 ret = -EFAULT; 1400 spin_unlock(&res->spinlock); 1401 goto leave; 1402 } 1403 res->state |= DLM_LOCK_RES_MIGRATING; 1404 } 1405 spin_unlock(&res->spinlock); 1406 } else { 1407 /* need to allocate, just like if it was 1408 * mastered here normally */ 1409 res = dlm_new_lockres(dlm, mres->lockname, mres->lockname_len); 1410 if (!res) 1411 goto leave; 1412 1413 /* to match the ref that we would have gotten if 1414 * dlm_lookup_lockres had succeeded */ 1415 dlm_lockres_get(res); 1416 1417 /* mark it as recovering/migrating and hash it */ 1418 if (mres->flags & DLM_MRES_RECOVERY) 1419 res->state |= DLM_LOCK_RES_RECOVERING; 1420 else 1421 res->state |= DLM_LOCK_RES_MIGRATING; 1422 1423 spin_lock(&dlm->spinlock); 1424 __dlm_insert_lockres(dlm, res); 1425 spin_unlock(&dlm->spinlock); 1426 1427 /* Add an extra ref for this lock-less lockres lest the 1428 * dlm_thread purges it before we get the chance to add 1429 * locks to it */ 1430 dlm_lockres_get(res); 1431 1432 /* There are three refs that need to be put. 1433 * 1. Taken above. 1434 * 2. kref_init in dlm_new_lockres()->dlm_init_lockres(). 1435 * 3. dlm_lookup_lockres() 1436 * The first one is handled at the end of this function. The 1437 * other two are handled in the worker thread after locks have 1438 * been attached. Yes, we don't wait for purge time to match 1439 * kref_init. The lockres will still have atleast one ref 1440 * added because it is in the hash __dlm_insert_lockres() */ 1441 extra_refs++; 1442 1443 /* now that the new lockres is inserted, 1444 * make it usable by other processes */ 1445 spin_lock(&res->spinlock); 1446 res->state &= ~DLM_LOCK_RES_IN_PROGRESS; 1447 spin_unlock(&res->spinlock); 1448 wake_up(&res->wq); 1449 } 1450 1451 /* at this point we have allocated everything we need, 1452 * and we have a hashed lockres with an extra ref and 1453 * the proper res->state flags. */ 1454 ret = 0; 1455 spin_lock(&res->spinlock); 1456 /* drop this either when master requery finds a different master 1457 * or when a lock is added by the recovery worker */ 1458 dlm_lockres_grab_inflight_ref(dlm, res); 1459 if (mres->master == DLM_LOCK_RES_OWNER_UNKNOWN) { 1460 /* migration cannot have an unknown master */ 1461 BUG_ON(!(mres->flags & DLM_MRES_RECOVERY)); 1462 mlog(0, "recovery has passed me a lockres with an " 1463 "unknown owner.. will need to requery: " 1464 "%.*s\n", mres->lockname_len, mres->lockname); 1465 } else { 1466 /* take a reference now to pin the lockres, drop it 1467 * when locks are added in the worker */ 1468 dlm_change_lockres_owner(dlm, res, dlm->node_num); 1469 } 1470 spin_unlock(&res->spinlock); 1471 1472 /* queue up work for dlm_mig_lockres_worker */ 1473 dlm_grab(dlm); /* get an extra ref for the work item */ 1474 memcpy(buf, msg->buf, be16_to_cpu(msg->data_len)); /* copy the whole message */ 1475 dlm_init_work_item(dlm, item, dlm_mig_lockres_worker, buf); 1476 item->u.ml.lockres = res; /* already have a ref */ 1477 item->u.ml.real_master = real_master; 1478 item->u.ml.extra_ref = extra_refs; 1479 spin_lock(&dlm->work_lock); 1480 list_add_tail(&item->list, &dlm->work_list); 1481 spin_unlock(&dlm->work_lock); 1482 queue_work(dlm->dlm_worker, &dlm->dispatched_work); 1483 1484 leave: 1485 /* One extra ref taken needs to be put here */ 1486 if (extra_refs) 1487 dlm_lockres_put(res); 1488 1489 dlm_put(dlm); 1490 if (ret < 0) { 1491 if (buf) 1492 kfree(buf); 1493 if (item) 1494 kfree(item); 1495 } 1496 1497 mlog_exit(ret); 1498 return ret; 1499 } 1500 1501 1502 static void dlm_mig_lockres_worker(struct dlm_work_item *item, void *data) 1503 { 1504 struct dlm_ctxt *dlm; 1505 struct dlm_migratable_lockres *mres; 1506 int ret = 0; 1507 struct dlm_lock_resource *res; 1508 u8 real_master; 1509 u8 extra_ref; 1510 1511 dlm = item->dlm; 1512 mres = (struct dlm_migratable_lockres *)data; 1513 1514 res = item->u.ml.lockres; 1515 real_master = item->u.ml.real_master; 1516 extra_ref = item->u.ml.extra_ref; 1517 1518 if (real_master == DLM_LOCK_RES_OWNER_UNKNOWN) { 1519 /* this case is super-rare. only occurs if 1520 * node death happens during migration. */ 1521 again: 1522 ret = dlm_lockres_master_requery(dlm, res, &real_master); 1523 if (ret < 0) { 1524 mlog(0, "dlm_lockres_master_requery ret=%d\n", 1525 ret); 1526 goto again; 1527 } 1528 if (real_master == DLM_LOCK_RES_OWNER_UNKNOWN) { 1529 mlog(0, "lockres %.*s not claimed. " 1530 "this node will take it.\n", 1531 res->lockname.len, res->lockname.name); 1532 } else { 1533 spin_lock(&res->spinlock); 1534 dlm_lockres_drop_inflight_ref(dlm, res); 1535 spin_unlock(&res->spinlock); 1536 mlog(0, "master needs to respond to sender " 1537 "that node %u still owns %.*s\n", 1538 real_master, res->lockname.len, 1539 res->lockname.name); 1540 /* cannot touch this lockres */ 1541 goto leave; 1542 } 1543 } 1544 1545 ret = dlm_process_recovery_data(dlm, res, mres); 1546 if (ret < 0) 1547 mlog(0, "dlm_process_recovery_data returned %d\n", ret); 1548 else 1549 mlog(0, "dlm_process_recovery_data succeeded\n"); 1550 1551 if ((mres->flags & (DLM_MRES_MIGRATION|DLM_MRES_ALL_DONE)) == 1552 (DLM_MRES_MIGRATION|DLM_MRES_ALL_DONE)) { 1553 ret = dlm_finish_migration(dlm, res, mres->master); 1554 if (ret < 0) 1555 mlog_errno(ret); 1556 } 1557 1558 leave: 1559 /* See comment in dlm_mig_lockres_handler() */ 1560 if (res) { 1561 if (extra_ref) 1562 dlm_lockres_put(res); 1563 dlm_lockres_put(res); 1564 } 1565 kfree(data); 1566 mlog_exit(ret); 1567 } 1568 1569 1570 1571 static int dlm_lockres_master_requery(struct dlm_ctxt *dlm, 1572 struct dlm_lock_resource *res, 1573 u8 *real_master) 1574 { 1575 struct dlm_node_iter iter; 1576 int nodenum; 1577 int ret = 0; 1578 1579 *real_master = DLM_LOCK_RES_OWNER_UNKNOWN; 1580 1581 /* we only reach here if one of the two nodes in a 1582 * migration died while the migration was in progress. 1583 * at this point we need to requery the master. we 1584 * know that the new_master got as far as creating 1585 * an mle on at least one node, but we do not know 1586 * if any nodes had actually cleared the mle and set 1587 * the master to the new_master. the old master 1588 * is supposed to set the owner to UNKNOWN in the 1589 * event of a new_master death, so the only possible 1590 * responses that we can get from nodes here are 1591 * that the master is new_master, or that the master 1592 * is UNKNOWN. 1593 * if all nodes come back with UNKNOWN then we know 1594 * the lock needs remastering here. 1595 * if any node comes back with a valid master, check 1596 * to see if that master is the one that we are 1597 * recovering. if so, then the new_master died and 1598 * we need to remaster this lock. if not, then the 1599 * new_master survived and that node will respond to 1600 * other nodes about the owner. 1601 * if there is an owner, this node needs to dump this 1602 * lockres and alert the sender that this lockres 1603 * was rejected. */ 1604 spin_lock(&dlm->spinlock); 1605 dlm_node_iter_init(dlm->domain_map, &iter); 1606 spin_unlock(&dlm->spinlock); 1607 1608 while ((nodenum = dlm_node_iter_next(&iter)) >= 0) { 1609 /* do not send to self */ 1610 if (nodenum == dlm->node_num) 1611 continue; 1612 ret = dlm_do_master_requery(dlm, res, nodenum, real_master); 1613 if (ret < 0) { 1614 mlog_errno(ret); 1615 if (!dlm_is_host_down(ret)) 1616 BUG(); 1617 /* host is down, so answer for that node would be 1618 * DLM_LOCK_RES_OWNER_UNKNOWN. continue. */ 1619 } 1620 if (*real_master != DLM_LOCK_RES_OWNER_UNKNOWN) { 1621 mlog(0, "lock master is %u\n", *real_master); 1622 break; 1623 } 1624 } 1625 return ret; 1626 } 1627 1628 1629 int dlm_do_master_requery(struct dlm_ctxt *dlm, struct dlm_lock_resource *res, 1630 u8 nodenum, u8 *real_master) 1631 { 1632 int ret = -EINVAL; 1633 struct dlm_master_requery req; 1634 int status = DLM_LOCK_RES_OWNER_UNKNOWN; 1635 1636 memset(&req, 0, sizeof(req)); 1637 req.node_idx = dlm->node_num; 1638 req.namelen = res->lockname.len; 1639 memcpy(req.name, res->lockname.name, res->lockname.len); 1640 1641 ret = o2net_send_message(DLM_MASTER_REQUERY_MSG, dlm->key, 1642 &req, sizeof(req), nodenum, &status); 1643 /* XXX: negative status not handled properly here. */ 1644 if (ret < 0) 1645 mlog_errno(ret); 1646 else { 1647 BUG_ON(status < 0); 1648 BUG_ON(status > DLM_LOCK_RES_OWNER_UNKNOWN); 1649 *real_master = (u8) (status & 0xff); 1650 mlog(0, "node %u responded to master requery with %u\n", 1651 nodenum, *real_master); 1652 ret = 0; 1653 } 1654 return ret; 1655 } 1656 1657 1658 /* this function cannot error, so unless the sending 1659 * or receiving of the message failed, the owner can 1660 * be trusted */ 1661 int dlm_master_requery_handler(struct o2net_msg *msg, u32 len, void *data, 1662 void **ret_data) 1663 { 1664 struct dlm_ctxt *dlm = data; 1665 struct dlm_master_requery *req = (struct dlm_master_requery *)msg->buf; 1666 struct dlm_lock_resource *res = NULL; 1667 unsigned int hash; 1668 int master = DLM_LOCK_RES_OWNER_UNKNOWN; 1669 u32 flags = DLM_ASSERT_MASTER_REQUERY; 1670 1671 if (!dlm_grab(dlm)) { 1672 /* since the domain has gone away on this 1673 * node, the proper response is UNKNOWN */ 1674 return master; 1675 } 1676 1677 hash = dlm_lockid_hash(req->name, req->namelen); 1678 1679 spin_lock(&dlm->spinlock); 1680 res = __dlm_lookup_lockres(dlm, req->name, req->namelen, hash); 1681 if (res) { 1682 spin_lock(&res->spinlock); 1683 master = res->owner; 1684 if (master == dlm->node_num) { 1685 int ret = dlm_dispatch_assert_master(dlm, res, 1686 0, 0, flags); 1687 if (ret < 0) { 1688 mlog_errno(-ENOMEM); 1689 /* retry!? */ 1690 BUG(); 1691 } 1692 } else /* put.. incase we are not the master */ 1693 dlm_lockres_put(res); 1694 spin_unlock(&res->spinlock); 1695 } 1696 spin_unlock(&dlm->spinlock); 1697 1698 dlm_put(dlm); 1699 return master; 1700 } 1701 1702 static inline struct list_head * 1703 dlm_list_num_to_pointer(struct dlm_lock_resource *res, int list_num) 1704 { 1705 struct list_head *ret; 1706 BUG_ON(list_num < 0); 1707 BUG_ON(list_num > 2); 1708 ret = &(res->granted); 1709 ret += list_num; 1710 return ret; 1711 } 1712 /* TODO: do ast flush business 1713 * TODO: do MIGRATING and RECOVERING spinning 1714 */ 1715 1716 /* 1717 * NOTE about in-flight requests during migration: 1718 * 1719 * Before attempting the migrate, the master has marked the lockres as 1720 * MIGRATING and then flushed all of its pending ASTS. So any in-flight 1721 * requests either got queued before the MIGRATING flag got set, in which 1722 * case the lock data will reflect the change and a return message is on 1723 * the way, or the request failed to get in before MIGRATING got set. In 1724 * this case, the caller will be told to spin and wait for the MIGRATING 1725 * flag to be dropped, then recheck the master. 1726 * This holds true for the convert, cancel and unlock cases, and since lvb 1727 * updates are tied to these same messages, it applies to lvb updates as 1728 * well. For the lock case, there is no way a lock can be on the master 1729 * queue and not be on the secondary queue since the lock is always added 1730 * locally first. This means that the new target node will never be sent 1731 * a lock that he doesn't already have on the list. 1732 * In total, this means that the local lock is correct and should not be 1733 * updated to match the one sent by the master. Any messages sent back 1734 * from the master before the MIGRATING flag will bring the lock properly 1735 * up-to-date, and the change will be ordered properly for the waiter. 1736 * We will *not* attempt to modify the lock underneath the waiter. 1737 */ 1738 1739 static int dlm_process_recovery_data(struct dlm_ctxt *dlm, 1740 struct dlm_lock_resource *res, 1741 struct dlm_migratable_lockres *mres) 1742 { 1743 struct dlm_migratable_lock *ml; 1744 struct list_head *queue; 1745 struct list_head *tmpq = NULL; 1746 struct dlm_lock *newlock = NULL; 1747 struct dlm_lockstatus *lksb = NULL; 1748 int ret = 0; 1749 int i, j, bad; 1750 struct dlm_lock *lock = NULL; 1751 u8 from = O2NM_MAX_NODES; 1752 unsigned int added = 0; 1753 __be64 c; 1754 1755 mlog(0, "running %d locks for this lockres\n", mres->num_locks); 1756 for (i=0; i<mres->num_locks; i++) { 1757 ml = &(mres->ml[i]); 1758 1759 if (dlm_is_dummy_lock(dlm, ml, &from)) { 1760 /* placeholder, just need to set the refmap bit */ 1761 BUG_ON(mres->num_locks != 1); 1762 mlog(0, "%s:%.*s: dummy lock for %u\n", 1763 dlm->name, mres->lockname_len, mres->lockname, 1764 from); 1765 spin_lock(&res->spinlock); 1766 dlm_lockres_set_refmap_bit(from, res); 1767 spin_unlock(&res->spinlock); 1768 added++; 1769 break; 1770 } 1771 BUG_ON(ml->highest_blocked != LKM_IVMODE); 1772 newlock = NULL; 1773 lksb = NULL; 1774 1775 queue = dlm_list_num_to_pointer(res, ml->list); 1776 tmpq = NULL; 1777 1778 /* if the lock is for the local node it needs to 1779 * be moved to the proper location within the queue. 1780 * do not allocate a new lock structure. */ 1781 if (ml->node == dlm->node_num) { 1782 /* MIGRATION ONLY! */ 1783 BUG_ON(!(mres->flags & DLM_MRES_MIGRATION)); 1784 1785 spin_lock(&res->spinlock); 1786 for (j = DLM_GRANTED_LIST; j <= DLM_BLOCKED_LIST; j++) { 1787 tmpq = dlm_list_idx_to_ptr(res, j); 1788 list_for_each_entry(lock, tmpq, list) { 1789 if (lock->ml.cookie != ml->cookie) 1790 lock = NULL; 1791 else 1792 break; 1793 } 1794 if (lock) 1795 break; 1796 } 1797 1798 /* lock is always created locally first, and 1799 * destroyed locally last. it must be on the list */ 1800 if (!lock) { 1801 c = ml->cookie; 1802 mlog(ML_ERROR, "Could not find local lock " 1803 "with cookie %u:%llu, node %u, " 1804 "list %u, flags 0x%x, type %d, " 1805 "conv %d, highest blocked %d\n", 1806 dlm_get_lock_cookie_node(be64_to_cpu(c)), 1807 dlm_get_lock_cookie_seq(be64_to_cpu(c)), 1808 ml->node, ml->list, ml->flags, ml->type, 1809 ml->convert_type, ml->highest_blocked); 1810 __dlm_print_one_lock_resource(res); 1811 BUG(); 1812 } 1813 1814 if (lock->ml.node != ml->node) { 1815 c = lock->ml.cookie; 1816 mlog(ML_ERROR, "Mismatched node# in lock " 1817 "cookie %u:%llu, name %.*s, node %u\n", 1818 dlm_get_lock_cookie_node(be64_to_cpu(c)), 1819 dlm_get_lock_cookie_seq(be64_to_cpu(c)), 1820 res->lockname.len, res->lockname.name, 1821 lock->ml.node); 1822 c = ml->cookie; 1823 mlog(ML_ERROR, "Migrate lock cookie %u:%llu, " 1824 "node %u, list %u, flags 0x%x, type %d, " 1825 "conv %d, highest blocked %d\n", 1826 dlm_get_lock_cookie_node(be64_to_cpu(c)), 1827 dlm_get_lock_cookie_seq(be64_to_cpu(c)), 1828 ml->node, ml->list, ml->flags, ml->type, 1829 ml->convert_type, ml->highest_blocked); 1830 __dlm_print_one_lock_resource(res); 1831 BUG(); 1832 } 1833 1834 if (tmpq != queue) { 1835 c = ml->cookie; 1836 mlog(0, "Lock cookie %u:%llu was on list %u " 1837 "instead of list %u for %.*s\n", 1838 dlm_get_lock_cookie_node(be64_to_cpu(c)), 1839 dlm_get_lock_cookie_seq(be64_to_cpu(c)), 1840 j, ml->list, res->lockname.len, 1841 res->lockname.name); 1842 __dlm_print_one_lock_resource(res); 1843 spin_unlock(&res->spinlock); 1844 continue; 1845 } 1846 1847 /* see NOTE above about why we do not update 1848 * to match the master here */ 1849 1850 /* move the lock to its proper place */ 1851 /* do not alter lock refcount. switching lists. */ 1852 list_move_tail(&lock->list, queue); 1853 spin_unlock(&res->spinlock); 1854 added++; 1855 1856 mlog(0, "just reordered a local lock!\n"); 1857 continue; 1858 } 1859 1860 /* lock is for another node. */ 1861 newlock = dlm_new_lock(ml->type, ml->node, 1862 be64_to_cpu(ml->cookie), NULL); 1863 if (!newlock) { 1864 ret = -ENOMEM; 1865 goto leave; 1866 } 1867 lksb = newlock->lksb; 1868 dlm_lock_attach_lockres(newlock, res); 1869 1870 if (ml->convert_type != LKM_IVMODE) { 1871 BUG_ON(queue != &res->converting); 1872 newlock->ml.convert_type = ml->convert_type; 1873 } 1874 lksb->flags |= (ml->flags & 1875 (DLM_LKSB_PUT_LVB|DLM_LKSB_GET_LVB)); 1876 1877 if (ml->type == LKM_NLMODE) 1878 goto skip_lvb; 1879 1880 if (!dlm_lvb_is_empty(mres->lvb)) { 1881 if (lksb->flags & DLM_LKSB_PUT_LVB) { 1882 /* other node was trying to update 1883 * lvb when node died. recreate the 1884 * lksb with the updated lvb. */ 1885 memcpy(lksb->lvb, mres->lvb, DLM_LVB_LEN); 1886 /* the lock resource lvb update must happen 1887 * NOW, before the spinlock is dropped. 1888 * we no longer wait for the AST to update 1889 * the lvb. */ 1890 memcpy(res->lvb, mres->lvb, DLM_LVB_LEN); 1891 } else { 1892 /* otherwise, the node is sending its 1893 * most recent valid lvb info */ 1894 BUG_ON(ml->type != LKM_EXMODE && 1895 ml->type != LKM_PRMODE); 1896 if (!dlm_lvb_is_empty(res->lvb) && 1897 (ml->type == LKM_EXMODE || 1898 memcmp(res->lvb, mres->lvb, DLM_LVB_LEN))) { 1899 int i; 1900 mlog(ML_ERROR, "%s:%.*s: received bad " 1901 "lvb! type=%d\n", dlm->name, 1902 res->lockname.len, 1903 res->lockname.name, ml->type); 1904 printk("lockres lvb=["); 1905 for (i=0; i<DLM_LVB_LEN; i++) 1906 printk("%02x", res->lvb[i]); 1907 printk("]\nmigrated lvb=["); 1908 for (i=0; i<DLM_LVB_LEN; i++) 1909 printk("%02x", mres->lvb[i]); 1910 printk("]\n"); 1911 dlm_print_one_lock_resource(res); 1912 BUG(); 1913 } 1914 memcpy(res->lvb, mres->lvb, DLM_LVB_LEN); 1915 } 1916 } 1917 skip_lvb: 1918 1919 /* NOTE: 1920 * wrt lock queue ordering and recovery: 1921 * 1. order of locks on granted queue is 1922 * meaningless. 1923 * 2. order of locks on converting queue is 1924 * LOST with the node death. sorry charlie. 1925 * 3. order of locks on the blocked queue is 1926 * also LOST. 1927 * order of locks does not affect integrity, it 1928 * just means that a lock request may get pushed 1929 * back in line as a result of the node death. 1930 * also note that for a given node the lock order 1931 * for its secondary queue locks is preserved 1932 * relative to each other, but clearly *not* 1933 * preserved relative to locks from other nodes. 1934 */ 1935 bad = 0; 1936 spin_lock(&res->spinlock); 1937 list_for_each_entry(lock, queue, list) { 1938 if (lock->ml.cookie == ml->cookie) { 1939 c = lock->ml.cookie; 1940 mlog(ML_ERROR, "%s:%.*s: %u:%llu: lock already " 1941 "exists on this lockres!\n", dlm->name, 1942 res->lockname.len, res->lockname.name, 1943 dlm_get_lock_cookie_node(be64_to_cpu(c)), 1944 dlm_get_lock_cookie_seq(be64_to_cpu(c))); 1945 1946 mlog(ML_NOTICE, "sent lock: type=%d, conv=%d, " 1947 "node=%u, cookie=%u:%llu, queue=%d\n", 1948 ml->type, ml->convert_type, ml->node, 1949 dlm_get_lock_cookie_node(be64_to_cpu(ml->cookie)), 1950 dlm_get_lock_cookie_seq(be64_to_cpu(ml->cookie)), 1951 ml->list); 1952 1953 __dlm_print_one_lock_resource(res); 1954 bad = 1; 1955 break; 1956 } 1957 } 1958 if (!bad) { 1959 dlm_lock_get(newlock); 1960 list_add_tail(&newlock->list, queue); 1961 mlog(0, "%s:%.*s: added lock for node %u, " 1962 "setting refmap bit\n", dlm->name, 1963 res->lockname.len, res->lockname.name, ml->node); 1964 dlm_lockres_set_refmap_bit(ml->node, res); 1965 added++; 1966 } 1967 spin_unlock(&res->spinlock); 1968 } 1969 mlog(0, "done running all the locks\n"); 1970 1971 leave: 1972 /* balance the ref taken when the work was queued */ 1973 spin_lock(&res->spinlock); 1974 dlm_lockres_drop_inflight_ref(dlm, res); 1975 spin_unlock(&res->spinlock); 1976 1977 if (ret < 0) { 1978 mlog_errno(ret); 1979 if (newlock) 1980 dlm_lock_put(newlock); 1981 } 1982 1983 mlog_exit(ret); 1984 return ret; 1985 } 1986 1987 void dlm_move_lockres_to_recovery_list(struct dlm_ctxt *dlm, 1988 struct dlm_lock_resource *res) 1989 { 1990 int i; 1991 struct list_head *queue; 1992 struct dlm_lock *lock, *next; 1993 1994 res->state |= DLM_LOCK_RES_RECOVERING; 1995 if (!list_empty(&res->recovering)) { 1996 mlog(0, 1997 "Recovering res %s:%.*s, is already on recovery list!\n", 1998 dlm->name, res->lockname.len, res->lockname.name); 1999 list_del_init(&res->recovering); 2000 dlm_lockres_put(res); 2001 } 2002 /* We need to hold a reference while on the recovery list */ 2003 dlm_lockres_get(res); 2004 list_add_tail(&res->recovering, &dlm->reco.resources); 2005 2006 /* find any pending locks and put them back on proper list */ 2007 for (i=DLM_BLOCKED_LIST; i>=DLM_GRANTED_LIST; i--) { 2008 queue = dlm_list_idx_to_ptr(res, i); 2009 list_for_each_entry_safe(lock, next, queue, list) { 2010 dlm_lock_get(lock); 2011 if (lock->convert_pending) { 2012 /* move converting lock back to granted */ 2013 BUG_ON(i != DLM_CONVERTING_LIST); 2014 mlog(0, "node died with convert pending " 2015 "on %.*s. move back to granted list.\n", 2016 res->lockname.len, res->lockname.name); 2017 dlm_revert_pending_convert(res, lock); 2018 lock->convert_pending = 0; 2019 } else if (lock->lock_pending) { 2020 /* remove pending lock requests completely */ 2021 BUG_ON(i != DLM_BLOCKED_LIST); 2022 mlog(0, "node died with lock pending " 2023 "on %.*s. remove from blocked list and skip.\n", 2024 res->lockname.len, res->lockname.name); 2025 /* lock will be floating until ref in 2026 * dlmlock_remote is freed after the network 2027 * call returns. ok for it to not be on any 2028 * list since no ast can be called 2029 * (the master is dead). */ 2030 dlm_revert_pending_lock(res, lock); 2031 lock->lock_pending = 0; 2032 } else if (lock->unlock_pending) { 2033 /* if an unlock was in progress, treat as 2034 * if this had completed successfully 2035 * before sending this lock state to the 2036 * new master. note that the dlm_unlock 2037 * call is still responsible for calling 2038 * the unlockast. that will happen after 2039 * the network call times out. for now, 2040 * just move lists to prepare the new 2041 * recovery master. */ 2042 BUG_ON(i != DLM_GRANTED_LIST); 2043 mlog(0, "node died with unlock pending " 2044 "on %.*s. remove from blocked list and skip.\n", 2045 res->lockname.len, res->lockname.name); 2046 dlm_commit_pending_unlock(res, lock); 2047 lock->unlock_pending = 0; 2048 } else if (lock->cancel_pending) { 2049 /* if a cancel was in progress, treat as 2050 * if this had completed successfully 2051 * before sending this lock state to the 2052 * new master */ 2053 BUG_ON(i != DLM_CONVERTING_LIST); 2054 mlog(0, "node died with cancel pending " 2055 "on %.*s. move back to granted list.\n", 2056 res->lockname.len, res->lockname.name); 2057 dlm_commit_pending_cancel(res, lock); 2058 lock->cancel_pending = 0; 2059 } 2060 dlm_lock_put(lock); 2061 } 2062 } 2063 } 2064 2065 2066 2067 /* removes all recovered locks from the recovery list. 2068 * sets the res->owner to the new master. 2069 * unsets the RECOVERY flag and wakes waiters. */ 2070 static void dlm_finish_local_lockres_recovery(struct dlm_ctxt *dlm, 2071 u8 dead_node, u8 new_master) 2072 { 2073 int i; 2074 struct hlist_node *hash_iter; 2075 struct hlist_head *bucket; 2076 struct dlm_lock_resource *res, *next; 2077 2078 mlog_entry_void(); 2079 2080 assert_spin_locked(&dlm->spinlock); 2081 2082 list_for_each_entry_safe(res, next, &dlm->reco.resources, recovering) { 2083 if (res->owner == dead_node) { 2084 list_del_init(&res->recovering); 2085 spin_lock(&res->spinlock); 2086 /* new_master has our reference from 2087 * the lock state sent during recovery */ 2088 dlm_change_lockres_owner(dlm, res, new_master); 2089 res->state &= ~DLM_LOCK_RES_RECOVERING; 2090 if (__dlm_lockres_has_locks(res)) 2091 __dlm_dirty_lockres(dlm, res); 2092 spin_unlock(&res->spinlock); 2093 wake_up(&res->wq); 2094 dlm_lockres_put(res); 2095 } 2096 } 2097 2098 /* this will become unnecessary eventually, but 2099 * for now we need to run the whole hash, clear 2100 * the RECOVERING state and set the owner 2101 * if necessary */ 2102 for (i = 0; i < DLM_HASH_BUCKETS; i++) { 2103 bucket = dlm_lockres_hash(dlm, i); 2104 hlist_for_each_entry(res, hash_iter, bucket, hash_node) { 2105 if (res->state & DLM_LOCK_RES_RECOVERING) { 2106 if (res->owner == dead_node) { 2107 mlog(0, "(this=%u) res %.*s owner=%u " 2108 "was not on recovering list, but " 2109 "clearing state anyway\n", 2110 dlm->node_num, res->lockname.len, 2111 res->lockname.name, new_master); 2112 } else if (res->owner == dlm->node_num) { 2113 mlog(0, "(this=%u) res %.*s owner=%u " 2114 "was not on recovering list, " 2115 "owner is THIS node, clearing\n", 2116 dlm->node_num, res->lockname.len, 2117 res->lockname.name, new_master); 2118 } else 2119 continue; 2120 2121 if (!list_empty(&res->recovering)) { 2122 mlog(0, "%s:%.*s: lockres was " 2123 "marked RECOVERING, owner=%u\n", 2124 dlm->name, res->lockname.len, 2125 res->lockname.name, res->owner); 2126 list_del_init(&res->recovering); 2127 dlm_lockres_put(res); 2128 } 2129 spin_lock(&res->spinlock); 2130 /* new_master has our reference from 2131 * the lock state sent during recovery */ 2132 dlm_change_lockres_owner(dlm, res, new_master); 2133 res->state &= ~DLM_LOCK_RES_RECOVERING; 2134 if (__dlm_lockres_has_locks(res)) 2135 __dlm_dirty_lockres(dlm, res); 2136 spin_unlock(&res->spinlock); 2137 wake_up(&res->wq); 2138 } 2139 } 2140 } 2141 } 2142 2143 static inline int dlm_lvb_needs_invalidation(struct dlm_lock *lock, int local) 2144 { 2145 if (local) { 2146 if (lock->ml.type != LKM_EXMODE && 2147 lock->ml.type != LKM_PRMODE) 2148 return 1; 2149 } else if (lock->ml.type == LKM_EXMODE) 2150 return 1; 2151 return 0; 2152 } 2153 2154 static void dlm_revalidate_lvb(struct dlm_ctxt *dlm, 2155 struct dlm_lock_resource *res, u8 dead_node) 2156 { 2157 struct list_head *queue; 2158 struct dlm_lock *lock; 2159 int blank_lvb = 0, local = 0; 2160 int i; 2161 u8 search_node; 2162 2163 assert_spin_locked(&dlm->spinlock); 2164 assert_spin_locked(&res->spinlock); 2165 2166 if (res->owner == dlm->node_num) 2167 /* if this node owned the lockres, and if the dead node 2168 * had an EX when he died, blank out the lvb */ 2169 search_node = dead_node; 2170 else { 2171 /* if this is a secondary lockres, and we had no EX or PR 2172 * locks granted, we can no longer trust the lvb */ 2173 search_node = dlm->node_num; 2174 local = 1; /* check local state for valid lvb */ 2175 } 2176 2177 for (i=DLM_GRANTED_LIST; i<=DLM_CONVERTING_LIST; i++) { 2178 queue = dlm_list_idx_to_ptr(res, i); 2179 list_for_each_entry(lock, queue, list) { 2180 if (lock->ml.node == search_node) { 2181 if (dlm_lvb_needs_invalidation(lock, local)) { 2182 /* zero the lksb lvb and lockres lvb */ 2183 blank_lvb = 1; 2184 memset(lock->lksb->lvb, 0, DLM_LVB_LEN); 2185 } 2186 } 2187 } 2188 } 2189 2190 if (blank_lvb) { 2191 mlog(0, "clearing %.*s lvb, dead node %u had EX\n", 2192 res->lockname.len, res->lockname.name, dead_node); 2193 memset(res->lvb, 0, DLM_LVB_LEN); 2194 } 2195 } 2196 2197 static void dlm_free_dead_locks(struct dlm_ctxt *dlm, 2198 struct dlm_lock_resource *res, u8 dead_node) 2199 { 2200 struct dlm_lock *lock, *next; 2201 unsigned int freed = 0; 2202 2203 /* this node is the lockres master: 2204 * 1) remove any stale locks for the dead node 2205 * 2) if the dead node had an EX when he died, blank out the lvb 2206 */ 2207 assert_spin_locked(&dlm->spinlock); 2208 assert_spin_locked(&res->spinlock); 2209 2210 /* We do two dlm_lock_put(). One for removing from list and the other is 2211 * to force the DLM_UNLOCK_FREE_LOCK action so as to free the locks */ 2212 2213 /* TODO: check pending_asts, pending_basts here */ 2214 list_for_each_entry_safe(lock, next, &res->granted, list) { 2215 if (lock->ml.node == dead_node) { 2216 list_del_init(&lock->list); 2217 dlm_lock_put(lock); 2218 /* Can't schedule DLM_UNLOCK_FREE_LOCK - do manually */ 2219 dlm_lock_put(lock); 2220 freed++; 2221 } 2222 } 2223 list_for_each_entry_safe(lock, next, &res->converting, list) { 2224 if (lock->ml.node == dead_node) { 2225 list_del_init(&lock->list); 2226 dlm_lock_put(lock); 2227 /* Can't schedule DLM_UNLOCK_FREE_LOCK - do manually */ 2228 dlm_lock_put(lock); 2229 freed++; 2230 } 2231 } 2232 list_for_each_entry_safe(lock, next, &res->blocked, list) { 2233 if (lock->ml.node == dead_node) { 2234 list_del_init(&lock->list); 2235 dlm_lock_put(lock); 2236 /* Can't schedule DLM_UNLOCK_FREE_LOCK - do manually */ 2237 dlm_lock_put(lock); 2238 freed++; 2239 } 2240 } 2241 2242 if (freed) { 2243 mlog(0, "%s:%.*s: freed %u locks for dead node %u, " 2244 "dropping ref from lockres\n", dlm->name, 2245 res->lockname.len, res->lockname.name, freed, dead_node); 2246 BUG_ON(!test_bit(dead_node, res->refmap)); 2247 dlm_lockres_clear_refmap_bit(dead_node, res); 2248 } else if (test_bit(dead_node, res->refmap)) { 2249 mlog(0, "%s:%.*s: dead node %u had a ref, but had " 2250 "no locks and had not purged before dying\n", dlm->name, 2251 res->lockname.len, res->lockname.name, dead_node); 2252 dlm_lockres_clear_refmap_bit(dead_node, res); 2253 } 2254 2255 /* do not kick thread yet */ 2256 __dlm_dirty_lockres(dlm, res); 2257 } 2258 2259 /* if this node is the recovery master, and there are no 2260 * locks for a given lockres owned by this node that are in 2261 * either PR or EX mode, zero out the lvb before requesting. 2262 * 2263 */ 2264 2265 2266 static void dlm_do_local_recovery_cleanup(struct dlm_ctxt *dlm, u8 dead_node) 2267 { 2268 struct hlist_node *iter; 2269 struct dlm_lock_resource *res; 2270 int i; 2271 struct hlist_head *bucket; 2272 struct dlm_lock *lock; 2273 2274 2275 /* purge any stale mles */ 2276 dlm_clean_master_list(dlm, dead_node); 2277 2278 /* 2279 * now clean up all lock resources. there are two rules: 2280 * 2281 * 1) if the dead node was the master, move the lockres 2282 * to the recovering list. set the RECOVERING flag. 2283 * this lockres needs to be cleaned up before it can 2284 * be used further. 2285 * 2286 * 2) if this node was the master, remove all locks from 2287 * each of the lockres queues that were owned by the 2288 * dead node. once recovery finishes, the dlm thread 2289 * can be kicked again to see if any ASTs or BASTs 2290 * need to be fired as a result. 2291 */ 2292 for (i = 0; i < DLM_HASH_BUCKETS; i++) { 2293 bucket = dlm_lockres_hash(dlm, i); 2294 hlist_for_each_entry(res, iter, bucket, hash_node) { 2295 /* always prune any $RECOVERY entries for dead nodes, 2296 * otherwise hangs can occur during later recovery */ 2297 if (dlm_is_recovery_lock(res->lockname.name, 2298 res->lockname.len)) { 2299 spin_lock(&res->spinlock); 2300 list_for_each_entry(lock, &res->granted, list) { 2301 if (lock->ml.node == dead_node) { 2302 mlog(0, "AHA! there was " 2303 "a $RECOVERY lock for dead " 2304 "node %u (%s)!\n", 2305 dead_node, dlm->name); 2306 list_del_init(&lock->list); 2307 dlm_lock_put(lock); 2308 break; 2309 } 2310 } 2311 spin_unlock(&res->spinlock); 2312 continue; 2313 } 2314 spin_lock(&res->spinlock); 2315 /* zero the lvb if necessary */ 2316 dlm_revalidate_lvb(dlm, res, dead_node); 2317 if (res->owner == dead_node) { 2318 if (res->state & DLM_LOCK_RES_DROPPING_REF) 2319 mlog(0, "%s:%.*s: owned by " 2320 "dead node %u, this node was " 2321 "dropping its ref when it died. " 2322 "continue, dropping the flag.\n", 2323 dlm->name, res->lockname.len, 2324 res->lockname.name, dead_node); 2325 2326 /* the wake_up for this will happen when the 2327 * RECOVERING flag is dropped later */ 2328 res->state &= ~DLM_LOCK_RES_DROPPING_REF; 2329 2330 dlm_move_lockres_to_recovery_list(dlm, res); 2331 } else if (res->owner == dlm->node_num) { 2332 dlm_free_dead_locks(dlm, res, dead_node); 2333 __dlm_lockres_calc_usage(dlm, res); 2334 } 2335 spin_unlock(&res->spinlock); 2336 } 2337 } 2338 2339 } 2340 2341 static void __dlm_hb_node_down(struct dlm_ctxt *dlm, int idx) 2342 { 2343 assert_spin_locked(&dlm->spinlock); 2344 2345 if (dlm->reco.new_master == idx) { 2346 mlog(0, "%s: recovery master %d just died\n", 2347 dlm->name, idx); 2348 if (dlm->reco.state & DLM_RECO_STATE_FINALIZE) { 2349 /* finalize1 was reached, so it is safe to clear 2350 * the new_master and dead_node. that recovery 2351 * is complete. */ 2352 mlog(0, "%s: dead master %d had reached " 2353 "finalize1 state, clearing\n", dlm->name, idx); 2354 dlm->reco.state &= ~DLM_RECO_STATE_FINALIZE; 2355 __dlm_reset_recovery(dlm); 2356 } 2357 } 2358 2359 /* Clean up join state on node death. */ 2360 if (dlm->joining_node == idx) { 2361 mlog(0, "Clearing join state for node %u\n", idx); 2362 __dlm_set_joining_node(dlm, DLM_LOCK_RES_OWNER_UNKNOWN); 2363 } 2364 2365 /* check to see if the node is already considered dead */ 2366 if (!test_bit(idx, dlm->live_nodes_map)) { 2367 mlog(0, "for domain %s, node %d is already dead. " 2368 "another node likely did recovery already.\n", 2369 dlm->name, idx); 2370 return; 2371 } 2372 2373 /* check to see if we do not care about this node */ 2374 if (!test_bit(idx, dlm->domain_map)) { 2375 /* This also catches the case that we get a node down 2376 * but haven't joined the domain yet. */ 2377 mlog(0, "node %u already removed from domain!\n", idx); 2378 return; 2379 } 2380 2381 clear_bit(idx, dlm->live_nodes_map); 2382 2383 /* make sure local cleanup occurs before the heartbeat events */ 2384 if (!test_bit(idx, dlm->recovery_map)) 2385 dlm_do_local_recovery_cleanup(dlm, idx); 2386 2387 /* notify anything attached to the heartbeat events */ 2388 dlm_hb_event_notify_attached(dlm, idx, 0); 2389 2390 mlog(0, "node %u being removed from domain map!\n", idx); 2391 clear_bit(idx, dlm->domain_map); 2392 /* wake up migration waiters if a node goes down. 2393 * perhaps later we can genericize this for other waiters. */ 2394 wake_up(&dlm->migration_wq); 2395 2396 if (test_bit(idx, dlm->recovery_map)) 2397 mlog(0, "domain %s, node %u already added " 2398 "to recovery map!\n", dlm->name, idx); 2399 else 2400 set_bit(idx, dlm->recovery_map); 2401 } 2402 2403 void dlm_hb_node_down_cb(struct o2nm_node *node, int idx, void *data) 2404 { 2405 struct dlm_ctxt *dlm = data; 2406 2407 if (!dlm_grab(dlm)) 2408 return; 2409 2410 /* 2411 * This will notify any dlm users that a node in our domain 2412 * went away without notifying us first. 2413 */ 2414 if (test_bit(idx, dlm->domain_map)) 2415 dlm_fire_domain_eviction_callbacks(dlm, idx); 2416 2417 spin_lock(&dlm->spinlock); 2418 __dlm_hb_node_down(dlm, idx); 2419 spin_unlock(&dlm->spinlock); 2420 2421 dlm_put(dlm); 2422 } 2423 2424 void dlm_hb_node_up_cb(struct o2nm_node *node, int idx, void *data) 2425 { 2426 struct dlm_ctxt *dlm = data; 2427 2428 if (!dlm_grab(dlm)) 2429 return; 2430 2431 spin_lock(&dlm->spinlock); 2432 set_bit(idx, dlm->live_nodes_map); 2433 /* do NOT notify mle attached to the heartbeat events. 2434 * new nodes are not interesting in mastery until joined. */ 2435 spin_unlock(&dlm->spinlock); 2436 2437 dlm_put(dlm); 2438 } 2439 2440 static void dlm_reco_ast(void *astdata) 2441 { 2442 struct dlm_ctxt *dlm = astdata; 2443 mlog(0, "ast for recovery lock fired!, this=%u, dlm=%s\n", 2444 dlm->node_num, dlm->name); 2445 } 2446 static void dlm_reco_bast(void *astdata, int blocked_type) 2447 { 2448 struct dlm_ctxt *dlm = astdata; 2449 mlog(0, "bast for recovery lock fired!, this=%u, dlm=%s\n", 2450 dlm->node_num, dlm->name); 2451 } 2452 static void dlm_reco_unlock_ast(void *astdata, enum dlm_status st) 2453 { 2454 mlog(0, "unlockast for recovery lock fired!\n"); 2455 } 2456 2457 /* 2458 * dlm_pick_recovery_master will continually attempt to use 2459 * dlmlock() on the special "$RECOVERY" lockres with the 2460 * LKM_NOQUEUE flag to get an EX. every thread that enters 2461 * this function on each node racing to become the recovery 2462 * master will not stop attempting this until either: 2463 * a) this node gets the EX (and becomes the recovery master), 2464 * or b) dlm->reco.new_master gets set to some nodenum 2465 * != O2NM_INVALID_NODE_NUM (another node will do the reco). 2466 * so each time a recovery master is needed, the entire cluster 2467 * will sync at this point. if the new master dies, that will 2468 * be detected in dlm_do_recovery */ 2469 static int dlm_pick_recovery_master(struct dlm_ctxt *dlm) 2470 { 2471 enum dlm_status ret; 2472 struct dlm_lockstatus lksb; 2473 int status = -EINVAL; 2474 2475 mlog(0, "starting recovery of %s at %lu, dead=%u, this=%u\n", 2476 dlm->name, jiffies, dlm->reco.dead_node, dlm->node_num); 2477 again: 2478 memset(&lksb, 0, sizeof(lksb)); 2479 2480 ret = dlmlock(dlm, LKM_EXMODE, &lksb, LKM_NOQUEUE|LKM_RECOVERY, 2481 DLM_RECOVERY_LOCK_NAME, DLM_RECOVERY_LOCK_NAME_LEN, 2482 dlm_reco_ast, dlm, dlm_reco_bast); 2483 2484 mlog(0, "%s: dlmlock($RECOVERY) returned %d, lksb=%d\n", 2485 dlm->name, ret, lksb.status); 2486 2487 if (ret == DLM_NORMAL) { 2488 mlog(0, "dlm=%s dlmlock says I got it (this=%u)\n", 2489 dlm->name, dlm->node_num); 2490 2491 /* got the EX lock. check to see if another node 2492 * just became the reco master */ 2493 if (dlm_reco_master_ready(dlm)) { 2494 mlog(0, "%s: got reco EX lock, but %u will " 2495 "do the recovery\n", dlm->name, 2496 dlm->reco.new_master); 2497 status = -EEXIST; 2498 } else { 2499 status = 0; 2500 2501 /* see if recovery was already finished elsewhere */ 2502 spin_lock(&dlm->spinlock); 2503 if (dlm->reco.dead_node == O2NM_INVALID_NODE_NUM) { 2504 status = -EINVAL; 2505 mlog(0, "%s: got reco EX lock, but " 2506 "node got recovered already\n", dlm->name); 2507 if (dlm->reco.new_master != O2NM_INVALID_NODE_NUM) { 2508 mlog(ML_ERROR, "%s: new master is %u " 2509 "but no dead node!\n", 2510 dlm->name, dlm->reco.new_master); 2511 BUG(); 2512 } 2513 } 2514 spin_unlock(&dlm->spinlock); 2515 } 2516 2517 /* if this node has actually become the recovery master, 2518 * set the master and send the messages to begin recovery */ 2519 if (!status) { 2520 mlog(0, "%s: dead=%u, this=%u, sending " 2521 "begin_reco now\n", dlm->name, 2522 dlm->reco.dead_node, dlm->node_num); 2523 status = dlm_send_begin_reco_message(dlm, 2524 dlm->reco.dead_node); 2525 /* this always succeeds */ 2526 BUG_ON(status); 2527 2528 /* set the new_master to this node */ 2529 spin_lock(&dlm->spinlock); 2530 dlm_set_reco_master(dlm, dlm->node_num); 2531 spin_unlock(&dlm->spinlock); 2532 } 2533 2534 /* recovery lock is a special case. ast will not get fired, 2535 * so just go ahead and unlock it. */ 2536 ret = dlmunlock(dlm, &lksb, 0, dlm_reco_unlock_ast, dlm); 2537 if (ret == DLM_DENIED) { 2538 mlog(0, "got DLM_DENIED, trying LKM_CANCEL\n"); 2539 ret = dlmunlock(dlm, &lksb, LKM_CANCEL, dlm_reco_unlock_ast, dlm); 2540 } 2541 if (ret != DLM_NORMAL) { 2542 /* this would really suck. this could only happen 2543 * if there was a network error during the unlock 2544 * because of node death. this means the unlock 2545 * is actually "done" and the lock structure is 2546 * even freed. we can continue, but only 2547 * because this specific lock name is special. */ 2548 mlog(ML_ERROR, "dlmunlock returned %d\n", ret); 2549 } 2550 } else if (ret == DLM_NOTQUEUED) { 2551 mlog(0, "dlm=%s dlmlock says another node got it (this=%u)\n", 2552 dlm->name, dlm->node_num); 2553 /* another node is master. wait on 2554 * reco.new_master != O2NM_INVALID_NODE_NUM 2555 * for at most one second */ 2556 wait_event_timeout(dlm->dlm_reco_thread_wq, 2557 dlm_reco_master_ready(dlm), 2558 msecs_to_jiffies(1000)); 2559 if (!dlm_reco_master_ready(dlm)) { 2560 mlog(0, "%s: reco master taking awhile\n", 2561 dlm->name); 2562 goto again; 2563 } 2564 /* another node has informed this one that it is reco master */ 2565 mlog(0, "%s: reco master %u is ready to recover %u\n", 2566 dlm->name, dlm->reco.new_master, dlm->reco.dead_node); 2567 status = -EEXIST; 2568 } else if (ret == DLM_RECOVERING) { 2569 mlog(0, "dlm=%s dlmlock says master node died (this=%u)\n", 2570 dlm->name, dlm->node_num); 2571 goto again; 2572 } else { 2573 struct dlm_lock_resource *res; 2574 2575 /* dlmlock returned something other than NOTQUEUED or NORMAL */ 2576 mlog(ML_ERROR, "%s: got %s from dlmlock($RECOVERY), " 2577 "lksb.status=%s\n", dlm->name, dlm_errname(ret), 2578 dlm_errname(lksb.status)); 2579 res = dlm_lookup_lockres(dlm, DLM_RECOVERY_LOCK_NAME, 2580 DLM_RECOVERY_LOCK_NAME_LEN); 2581 if (res) { 2582 dlm_print_one_lock_resource(res); 2583 dlm_lockres_put(res); 2584 } else { 2585 mlog(ML_ERROR, "recovery lock not found\n"); 2586 } 2587 BUG(); 2588 } 2589 2590 return status; 2591 } 2592 2593 static int dlm_send_begin_reco_message(struct dlm_ctxt *dlm, u8 dead_node) 2594 { 2595 struct dlm_begin_reco br; 2596 int ret = 0; 2597 struct dlm_node_iter iter; 2598 int nodenum; 2599 int status; 2600 2601 mlog_entry("%u\n", dead_node); 2602 2603 mlog(0, "%s: dead node is %u\n", dlm->name, dead_node); 2604 2605 spin_lock(&dlm->spinlock); 2606 dlm_node_iter_init(dlm->domain_map, &iter); 2607 spin_unlock(&dlm->spinlock); 2608 2609 clear_bit(dead_node, iter.node_map); 2610 2611 memset(&br, 0, sizeof(br)); 2612 br.node_idx = dlm->node_num; 2613 br.dead_node = dead_node; 2614 2615 while ((nodenum = dlm_node_iter_next(&iter)) >= 0) { 2616 ret = 0; 2617 if (nodenum == dead_node) { 2618 mlog(0, "not sending begin reco to dead node " 2619 "%u\n", dead_node); 2620 continue; 2621 } 2622 if (nodenum == dlm->node_num) { 2623 mlog(0, "not sending begin reco to self\n"); 2624 continue; 2625 } 2626 retry: 2627 ret = -EINVAL; 2628 mlog(0, "attempting to send begin reco msg to %d\n", 2629 nodenum); 2630 ret = o2net_send_message(DLM_BEGIN_RECO_MSG, dlm->key, 2631 &br, sizeof(br), nodenum, &status); 2632 /* negative status is handled ok by caller here */ 2633 if (ret >= 0) 2634 ret = status; 2635 if (dlm_is_host_down(ret)) { 2636 /* node is down. not involved in recovery 2637 * so just keep going */ 2638 mlog(0, "%s: node %u was down when sending " 2639 "begin reco msg (%d)\n", dlm->name, nodenum, ret); 2640 ret = 0; 2641 } 2642 2643 /* 2644 * Prior to commit aad1b15310b9bcd59fa81ab8f2b1513b59553ea8, 2645 * dlm_begin_reco_handler() returned EAGAIN and not -EAGAIN. 2646 * We are handling both for compatibility reasons. 2647 */ 2648 if (ret == -EAGAIN || ret == EAGAIN) { 2649 mlog(0, "%s: trying to start recovery of node " 2650 "%u, but node %u is waiting for last recovery " 2651 "to complete, backoff for a bit\n", dlm->name, 2652 dead_node, nodenum); 2653 msleep(100); 2654 goto retry; 2655 } 2656 if (ret < 0) { 2657 struct dlm_lock_resource *res; 2658 /* this is now a serious problem, possibly ENOMEM 2659 * in the network stack. must retry */ 2660 mlog_errno(ret); 2661 mlog(ML_ERROR, "begin reco of dlm %s to node %u " 2662 " returned %d\n", dlm->name, nodenum, ret); 2663 res = dlm_lookup_lockres(dlm, DLM_RECOVERY_LOCK_NAME, 2664 DLM_RECOVERY_LOCK_NAME_LEN); 2665 if (res) { 2666 dlm_print_one_lock_resource(res); 2667 dlm_lockres_put(res); 2668 } else { 2669 mlog(ML_ERROR, "recovery lock not found\n"); 2670 } 2671 /* sleep for a bit in hopes that we can avoid 2672 * another ENOMEM */ 2673 msleep(100); 2674 goto retry; 2675 } 2676 } 2677 2678 return ret; 2679 } 2680 2681 int dlm_begin_reco_handler(struct o2net_msg *msg, u32 len, void *data, 2682 void **ret_data) 2683 { 2684 struct dlm_ctxt *dlm = data; 2685 struct dlm_begin_reco *br = (struct dlm_begin_reco *)msg->buf; 2686 2687 /* ok to return 0, domain has gone away */ 2688 if (!dlm_grab(dlm)) 2689 return 0; 2690 2691 spin_lock(&dlm->spinlock); 2692 if (dlm->reco.state & DLM_RECO_STATE_FINALIZE) { 2693 mlog(0, "%s: node %u wants to recover node %u (%u:%u) " 2694 "but this node is in finalize state, waiting on finalize2\n", 2695 dlm->name, br->node_idx, br->dead_node, 2696 dlm->reco.dead_node, dlm->reco.new_master); 2697 spin_unlock(&dlm->spinlock); 2698 return -EAGAIN; 2699 } 2700 spin_unlock(&dlm->spinlock); 2701 2702 mlog(0, "%s: node %u wants to recover node %u (%u:%u)\n", 2703 dlm->name, br->node_idx, br->dead_node, 2704 dlm->reco.dead_node, dlm->reco.new_master); 2705 2706 dlm_fire_domain_eviction_callbacks(dlm, br->dead_node); 2707 2708 spin_lock(&dlm->spinlock); 2709 if (dlm->reco.new_master != O2NM_INVALID_NODE_NUM) { 2710 if (test_bit(dlm->reco.new_master, dlm->recovery_map)) { 2711 mlog(0, "%s: new_master %u died, changing " 2712 "to %u\n", dlm->name, dlm->reco.new_master, 2713 br->node_idx); 2714 } else { 2715 mlog(0, "%s: new_master %u NOT DEAD, changing " 2716 "to %u\n", dlm->name, dlm->reco.new_master, 2717 br->node_idx); 2718 /* may not have seen the new master as dead yet */ 2719 } 2720 } 2721 if (dlm->reco.dead_node != O2NM_INVALID_NODE_NUM) { 2722 mlog(ML_NOTICE, "%s: dead_node previously set to %u, " 2723 "node %u changing it to %u\n", dlm->name, 2724 dlm->reco.dead_node, br->node_idx, br->dead_node); 2725 } 2726 dlm_set_reco_master(dlm, br->node_idx); 2727 dlm_set_reco_dead_node(dlm, br->dead_node); 2728 if (!test_bit(br->dead_node, dlm->recovery_map)) { 2729 mlog(0, "recovery master %u sees %u as dead, but this " 2730 "node has not yet. marking %u as dead\n", 2731 br->node_idx, br->dead_node, br->dead_node); 2732 if (!test_bit(br->dead_node, dlm->domain_map) || 2733 !test_bit(br->dead_node, dlm->live_nodes_map)) 2734 mlog(0, "%u not in domain/live_nodes map " 2735 "so setting it in reco map manually\n", 2736 br->dead_node); 2737 /* force the recovery cleanup in __dlm_hb_node_down 2738 * both of these will be cleared in a moment */ 2739 set_bit(br->dead_node, dlm->domain_map); 2740 set_bit(br->dead_node, dlm->live_nodes_map); 2741 __dlm_hb_node_down(dlm, br->dead_node); 2742 } 2743 spin_unlock(&dlm->spinlock); 2744 2745 dlm_kick_recovery_thread(dlm); 2746 2747 mlog(0, "%s: recovery started by node %u, for %u (%u:%u)\n", 2748 dlm->name, br->node_idx, br->dead_node, 2749 dlm->reco.dead_node, dlm->reco.new_master); 2750 2751 dlm_put(dlm); 2752 return 0; 2753 } 2754 2755 #define DLM_FINALIZE_STAGE2 0x01 2756 static int dlm_send_finalize_reco_message(struct dlm_ctxt *dlm) 2757 { 2758 int ret = 0; 2759 struct dlm_finalize_reco fr; 2760 struct dlm_node_iter iter; 2761 int nodenum; 2762 int status; 2763 int stage = 1; 2764 2765 mlog(0, "finishing recovery for node %s:%u, " 2766 "stage %d\n", dlm->name, dlm->reco.dead_node, stage); 2767 2768 spin_lock(&dlm->spinlock); 2769 dlm_node_iter_init(dlm->domain_map, &iter); 2770 spin_unlock(&dlm->spinlock); 2771 2772 stage2: 2773 memset(&fr, 0, sizeof(fr)); 2774 fr.node_idx = dlm->node_num; 2775 fr.dead_node = dlm->reco.dead_node; 2776 if (stage == 2) 2777 fr.flags |= DLM_FINALIZE_STAGE2; 2778 2779 while ((nodenum = dlm_node_iter_next(&iter)) >= 0) { 2780 if (nodenum == dlm->node_num) 2781 continue; 2782 ret = o2net_send_message(DLM_FINALIZE_RECO_MSG, dlm->key, 2783 &fr, sizeof(fr), nodenum, &status); 2784 if (ret >= 0) 2785 ret = status; 2786 if (ret < 0) { 2787 mlog_errno(ret); 2788 if (dlm_is_host_down(ret)) { 2789 /* this has no effect on this recovery 2790 * session, so set the status to zero to 2791 * finish out the last recovery */ 2792 mlog(ML_ERROR, "node %u went down after this " 2793 "node finished recovery.\n", nodenum); 2794 ret = 0; 2795 continue; 2796 } 2797 break; 2798 } 2799 } 2800 if (stage == 1) { 2801 /* reset the node_iter back to the top and send finalize2 */ 2802 iter.curnode = -1; 2803 stage = 2; 2804 goto stage2; 2805 } 2806 2807 return ret; 2808 } 2809 2810 int dlm_finalize_reco_handler(struct o2net_msg *msg, u32 len, void *data, 2811 void **ret_data) 2812 { 2813 struct dlm_ctxt *dlm = data; 2814 struct dlm_finalize_reco *fr = (struct dlm_finalize_reco *)msg->buf; 2815 int stage = 1; 2816 2817 /* ok to return 0, domain has gone away */ 2818 if (!dlm_grab(dlm)) 2819 return 0; 2820 2821 if (fr->flags & DLM_FINALIZE_STAGE2) 2822 stage = 2; 2823 2824 mlog(0, "%s: node %u finalizing recovery stage%d of " 2825 "node %u (%u:%u)\n", dlm->name, fr->node_idx, stage, 2826 fr->dead_node, dlm->reco.dead_node, dlm->reco.new_master); 2827 2828 spin_lock(&dlm->spinlock); 2829 2830 if (dlm->reco.new_master != fr->node_idx) { 2831 mlog(ML_ERROR, "node %u sent recovery finalize msg, but node " 2832 "%u is supposed to be the new master, dead=%u\n", 2833 fr->node_idx, dlm->reco.new_master, fr->dead_node); 2834 BUG(); 2835 } 2836 if (dlm->reco.dead_node != fr->dead_node) { 2837 mlog(ML_ERROR, "node %u sent recovery finalize msg for dead " 2838 "node %u, but node %u is supposed to be dead\n", 2839 fr->node_idx, fr->dead_node, dlm->reco.dead_node); 2840 BUG(); 2841 } 2842 2843 switch (stage) { 2844 case 1: 2845 dlm_finish_local_lockres_recovery(dlm, fr->dead_node, fr->node_idx); 2846 if (dlm->reco.state & DLM_RECO_STATE_FINALIZE) { 2847 mlog(ML_ERROR, "%s: received finalize1 from " 2848 "new master %u for dead node %u, but " 2849 "this node has already received it!\n", 2850 dlm->name, fr->node_idx, fr->dead_node); 2851 dlm_print_reco_node_status(dlm); 2852 BUG(); 2853 } 2854 dlm->reco.state |= DLM_RECO_STATE_FINALIZE; 2855 spin_unlock(&dlm->spinlock); 2856 break; 2857 case 2: 2858 if (!(dlm->reco.state & DLM_RECO_STATE_FINALIZE)) { 2859 mlog(ML_ERROR, "%s: received finalize2 from " 2860 "new master %u for dead node %u, but " 2861 "this node did not have finalize1!\n", 2862 dlm->name, fr->node_idx, fr->dead_node); 2863 dlm_print_reco_node_status(dlm); 2864 BUG(); 2865 } 2866 dlm->reco.state &= ~DLM_RECO_STATE_FINALIZE; 2867 spin_unlock(&dlm->spinlock); 2868 dlm_reset_recovery(dlm); 2869 dlm_kick_recovery_thread(dlm); 2870 break; 2871 default: 2872 BUG(); 2873 } 2874 2875 mlog(0, "%s: recovery done, reco master was %u, dead now %u, master now %u\n", 2876 dlm->name, fr->node_idx, dlm->reco.dead_node, dlm->reco.new_master); 2877 2878 dlm_put(dlm); 2879 return 0; 2880 } 2881