1 /* -*- mode: c; c-basic-offset: 8; -*- 2 * vim: noexpandtab sw=8 ts=8 sts=0: 3 * 4 * dlmrecovery.c 5 * 6 * recovery stuff 7 * 8 * Copyright (C) 2004 Oracle. All rights reserved. 9 * 10 * This program is free software; you can redistribute it and/or 11 * modify it under the terms of the GNU General Public 12 * License as published by the Free Software Foundation; either 13 * version 2 of the License, or (at your option) any later version. 14 * 15 * This program is distributed in the hope that it will be useful, 16 * but WITHOUT ANY WARRANTY; without even the implied warranty of 17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 18 * General Public License for more details. 19 * 20 * You should have received a copy of the GNU General Public 21 * License along with this program; if not, write to the 22 * Free Software Foundation, Inc., 59 Temple Place - Suite 330, 23 * Boston, MA 021110-1307, USA. 24 * 25 */ 26 27 28 #include <linux/module.h> 29 #include <linux/fs.h> 30 #include <linux/types.h> 31 #include <linux/slab.h> 32 #include <linux/highmem.h> 33 #include <linux/init.h> 34 #include <linux/sysctl.h> 35 #include <linux/random.h> 36 #include <linux/blkdev.h> 37 #include <linux/socket.h> 38 #include <linux/inet.h> 39 #include <linux/timer.h> 40 #include <linux/kthread.h> 41 #include <linux/delay.h> 42 43 44 #include "cluster/heartbeat.h" 45 #include "cluster/nodemanager.h" 46 #include "cluster/tcp.h" 47 48 #include "dlmapi.h" 49 #include "dlmcommon.h" 50 #include "dlmdomain.h" 51 52 #define MLOG_MASK_PREFIX (ML_DLM|ML_DLM_RECOVERY) 53 #include "cluster/masklog.h" 54 55 static void dlm_do_local_recovery_cleanup(struct dlm_ctxt *dlm, u8 dead_node); 56 57 static int dlm_recovery_thread(void *data); 58 void dlm_complete_recovery_thread(struct dlm_ctxt *dlm); 59 int dlm_launch_recovery_thread(struct dlm_ctxt *dlm); 60 void dlm_kick_recovery_thread(struct dlm_ctxt *dlm); 61 static int dlm_do_recovery(struct dlm_ctxt *dlm); 62 63 static int dlm_pick_recovery_master(struct dlm_ctxt *dlm); 64 static int dlm_remaster_locks(struct dlm_ctxt *dlm, u8 dead_node); 65 static int dlm_init_recovery_area(struct dlm_ctxt *dlm, u8 dead_node); 66 static int dlm_request_all_locks(struct dlm_ctxt *dlm, 67 u8 request_from, u8 dead_node); 68 static void dlm_destroy_recovery_area(struct dlm_ctxt *dlm, u8 dead_node); 69 70 static inline int dlm_num_locks_in_lockres(struct dlm_lock_resource *res); 71 static void dlm_init_migratable_lockres(struct dlm_migratable_lockres *mres, 72 const char *lockname, int namelen, 73 int total_locks, u64 cookie, 74 u8 flags, u8 master); 75 static int dlm_send_mig_lockres_msg(struct dlm_ctxt *dlm, 76 struct dlm_migratable_lockres *mres, 77 u8 send_to, 78 struct dlm_lock_resource *res, 79 int total_locks); 80 static int dlm_process_recovery_data(struct dlm_ctxt *dlm, 81 struct dlm_lock_resource *res, 82 struct dlm_migratable_lockres *mres); 83 static int dlm_send_finalize_reco_message(struct dlm_ctxt *dlm); 84 static int dlm_send_all_done_msg(struct dlm_ctxt *dlm, 85 u8 dead_node, u8 send_to); 86 static int dlm_send_begin_reco_message(struct dlm_ctxt *dlm, u8 dead_node); 87 static void dlm_move_reco_locks_to_list(struct dlm_ctxt *dlm, 88 struct list_head *list, u8 dead_node); 89 static void dlm_finish_local_lockres_recovery(struct dlm_ctxt *dlm, 90 u8 dead_node, u8 new_master); 91 static void dlm_reco_ast(void *astdata); 92 static void dlm_reco_bast(void *astdata, int blocked_type); 93 static void dlm_reco_unlock_ast(void *astdata, enum dlm_status st); 94 static void dlm_request_all_locks_worker(struct dlm_work_item *item, 95 void *data); 96 static void dlm_mig_lockres_worker(struct dlm_work_item *item, void *data); 97 static int dlm_lockres_master_requery(struct dlm_ctxt *dlm, 98 struct dlm_lock_resource *res, 99 u8 *real_master); 100 101 static u64 dlm_get_next_mig_cookie(void); 102 103 static DEFINE_SPINLOCK(dlm_reco_state_lock); 104 static DEFINE_SPINLOCK(dlm_mig_cookie_lock); 105 static u64 dlm_mig_cookie = 1; 106 107 static u64 dlm_get_next_mig_cookie(void) 108 { 109 u64 c; 110 spin_lock(&dlm_mig_cookie_lock); 111 c = dlm_mig_cookie; 112 if (dlm_mig_cookie == (~0ULL)) 113 dlm_mig_cookie = 1; 114 else 115 dlm_mig_cookie++; 116 spin_unlock(&dlm_mig_cookie_lock); 117 return c; 118 } 119 120 static inline void dlm_set_reco_dead_node(struct dlm_ctxt *dlm, 121 u8 dead_node) 122 { 123 assert_spin_locked(&dlm->spinlock); 124 if (dlm->reco.dead_node != dead_node) 125 mlog(0, "%s: changing dead_node from %u to %u\n", 126 dlm->name, dlm->reco.dead_node, dead_node); 127 dlm->reco.dead_node = dead_node; 128 } 129 130 static inline void dlm_set_reco_master(struct dlm_ctxt *dlm, 131 u8 master) 132 { 133 assert_spin_locked(&dlm->spinlock); 134 mlog(0, "%s: changing new_master from %u to %u\n", 135 dlm->name, dlm->reco.new_master, master); 136 dlm->reco.new_master = master; 137 } 138 139 static inline void __dlm_reset_recovery(struct dlm_ctxt *dlm) 140 { 141 assert_spin_locked(&dlm->spinlock); 142 clear_bit(dlm->reco.dead_node, dlm->recovery_map); 143 dlm_set_reco_dead_node(dlm, O2NM_INVALID_NODE_NUM); 144 dlm_set_reco_master(dlm, O2NM_INVALID_NODE_NUM); 145 } 146 147 static inline void dlm_reset_recovery(struct dlm_ctxt *dlm) 148 { 149 spin_lock(&dlm->spinlock); 150 __dlm_reset_recovery(dlm); 151 spin_unlock(&dlm->spinlock); 152 } 153 154 /* Worker function used during recovery. */ 155 void dlm_dispatch_work(struct work_struct *work) 156 { 157 struct dlm_ctxt *dlm = 158 container_of(work, struct dlm_ctxt, dispatched_work); 159 LIST_HEAD(tmp_list); 160 struct dlm_work_item *item, *next; 161 dlm_workfunc_t *workfunc; 162 int tot=0; 163 164 spin_lock(&dlm->work_lock); 165 list_splice_init(&dlm->work_list, &tmp_list); 166 spin_unlock(&dlm->work_lock); 167 168 list_for_each_entry(item, &tmp_list, list) { 169 tot++; 170 } 171 mlog(0, "%s: work thread has %d work items\n", dlm->name, tot); 172 173 list_for_each_entry_safe(item, next, &tmp_list, list) { 174 workfunc = item->func; 175 list_del_init(&item->list); 176 177 /* already have ref on dlm to avoid having 178 * it disappear. just double-check. */ 179 BUG_ON(item->dlm != dlm); 180 181 /* this is allowed to sleep and 182 * call network stuff */ 183 workfunc(item, item->data); 184 185 dlm_put(dlm); 186 kfree(item); 187 } 188 } 189 190 /* 191 * RECOVERY THREAD 192 */ 193 194 void dlm_kick_recovery_thread(struct dlm_ctxt *dlm) 195 { 196 /* wake the recovery thread 197 * this will wake the reco thread in one of three places 198 * 1) sleeping with no recovery happening 199 * 2) sleeping with recovery mastered elsewhere 200 * 3) recovery mastered here, waiting on reco data */ 201 202 wake_up(&dlm->dlm_reco_thread_wq); 203 } 204 205 /* Launch the recovery thread */ 206 int dlm_launch_recovery_thread(struct dlm_ctxt *dlm) 207 { 208 mlog(0, "starting dlm recovery thread...\n"); 209 210 dlm->dlm_reco_thread_task = kthread_run(dlm_recovery_thread, dlm, 211 "dlm_reco_thread"); 212 if (IS_ERR(dlm->dlm_reco_thread_task)) { 213 mlog_errno(PTR_ERR(dlm->dlm_reco_thread_task)); 214 dlm->dlm_reco_thread_task = NULL; 215 return -EINVAL; 216 } 217 218 return 0; 219 } 220 221 void dlm_complete_recovery_thread(struct dlm_ctxt *dlm) 222 { 223 if (dlm->dlm_reco_thread_task) { 224 mlog(0, "waiting for dlm recovery thread to exit\n"); 225 kthread_stop(dlm->dlm_reco_thread_task); 226 dlm->dlm_reco_thread_task = NULL; 227 } 228 } 229 230 231 232 /* 233 * this is lame, but here's how recovery works... 234 * 1) all recovery threads cluster wide will work on recovering 235 * ONE node at a time 236 * 2) negotiate who will take over all the locks for the dead node. 237 * thats right... ALL the locks. 238 * 3) once a new master is chosen, everyone scans all locks 239 * and moves aside those mastered by the dead guy 240 * 4) each of these locks should be locked until recovery is done 241 * 5) the new master collects up all of secondary lock queue info 242 * one lock at a time, forcing each node to communicate back 243 * before continuing 244 * 6) each secondary lock queue responds with the full known lock info 245 * 7) once the new master has run all its locks, it sends a ALLDONE! 246 * message to everyone 247 * 8) upon receiving this message, the secondary queue node unlocks 248 * and responds to the ALLDONE 249 * 9) once the new master gets responses from everyone, he unlocks 250 * everything and recovery for this dead node is done 251 *10) go back to 2) while there are still dead nodes 252 * 253 */ 254 255 static void dlm_print_reco_node_status(struct dlm_ctxt *dlm) 256 { 257 struct dlm_reco_node_data *ndata; 258 struct dlm_lock_resource *res; 259 260 mlog(ML_NOTICE, "%s(%d): recovery info, state=%s, dead=%u, master=%u\n", 261 dlm->name, task_pid_nr(dlm->dlm_reco_thread_task), 262 dlm->reco.state & DLM_RECO_STATE_ACTIVE ? "ACTIVE" : "inactive", 263 dlm->reco.dead_node, dlm->reco.new_master); 264 265 list_for_each_entry(ndata, &dlm->reco.node_data, list) { 266 char *st = "unknown"; 267 switch (ndata->state) { 268 case DLM_RECO_NODE_DATA_INIT: 269 st = "init"; 270 break; 271 case DLM_RECO_NODE_DATA_REQUESTING: 272 st = "requesting"; 273 break; 274 case DLM_RECO_NODE_DATA_DEAD: 275 st = "dead"; 276 break; 277 case DLM_RECO_NODE_DATA_RECEIVING: 278 st = "receiving"; 279 break; 280 case DLM_RECO_NODE_DATA_REQUESTED: 281 st = "requested"; 282 break; 283 case DLM_RECO_NODE_DATA_DONE: 284 st = "done"; 285 break; 286 case DLM_RECO_NODE_DATA_FINALIZE_SENT: 287 st = "finalize-sent"; 288 break; 289 default: 290 st = "bad"; 291 break; 292 } 293 mlog(ML_NOTICE, "%s: reco state, node %u, state=%s\n", 294 dlm->name, ndata->node_num, st); 295 } 296 list_for_each_entry(res, &dlm->reco.resources, recovering) { 297 mlog(ML_NOTICE, "%s: lockres %.*s on recovering list\n", 298 dlm->name, res->lockname.len, res->lockname.name); 299 } 300 } 301 302 #define DLM_RECO_THREAD_TIMEOUT_MS (5 * 1000) 303 304 static int dlm_recovery_thread(void *data) 305 { 306 int status; 307 struct dlm_ctxt *dlm = data; 308 unsigned long timeout = msecs_to_jiffies(DLM_RECO_THREAD_TIMEOUT_MS); 309 310 mlog(0, "dlm thread running for %s...\n", dlm->name); 311 312 while (!kthread_should_stop()) { 313 if (dlm_domain_fully_joined(dlm)) { 314 status = dlm_do_recovery(dlm); 315 if (status == -EAGAIN) { 316 /* do not sleep, recheck immediately. */ 317 continue; 318 } 319 if (status < 0) 320 mlog_errno(status); 321 } 322 323 wait_event_interruptible_timeout(dlm->dlm_reco_thread_wq, 324 kthread_should_stop(), 325 timeout); 326 } 327 328 mlog(0, "quitting DLM recovery thread\n"); 329 return 0; 330 } 331 332 /* returns true when the recovery master has contacted us */ 333 static int dlm_reco_master_ready(struct dlm_ctxt *dlm) 334 { 335 int ready; 336 spin_lock(&dlm->spinlock); 337 ready = (dlm->reco.new_master != O2NM_INVALID_NODE_NUM); 338 spin_unlock(&dlm->spinlock); 339 return ready; 340 } 341 342 /* returns true if node is no longer in the domain 343 * could be dead or just not joined */ 344 int dlm_is_node_dead(struct dlm_ctxt *dlm, u8 node) 345 { 346 int dead; 347 spin_lock(&dlm->spinlock); 348 dead = !test_bit(node, dlm->domain_map); 349 spin_unlock(&dlm->spinlock); 350 return dead; 351 } 352 353 /* returns true if node is no longer in the domain 354 * could be dead or just not joined */ 355 static int dlm_is_node_recovered(struct dlm_ctxt *dlm, u8 node) 356 { 357 int recovered; 358 spin_lock(&dlm->spinlock); 359 recovered = !test_bit(node, dlm->recovery_map); 360 spin_unlock(&dlm->spinlock); 361 return recovered; 362 } 363 364 365 void dlm_wait_for_node_death(struct dlm_ctxt *dlm, u8 node, int timeout) 366 { 367 if (dlm_is_node_dead(dlm, node)) 368 return; 369 370 printk(KERN_NOTICE "o2dlm: Waiting on the death of node %u in " 371 "domain %s\n", node, dlm->name); 372 373 if (timeout) 374 wait_event_timeout(dlm->dlm_reco_thread_wq, 375 dlm_is_node_dead(dlm, node), 376 msecs_to_jiffies(timeout)); 377 else 378 wait_event(dlm->dlm_reco_thread_wq, 379 dlm_is_node_dead(dlm, node)); 380 } 381 382 void dlm_wait_for_node_recovery(struct dlm_ctxt *dlm, u8 node, int timeout) 383 { 384 if (dlm_is_node_recovered(dlm, node)) 385 return; 386 387 printk(KERN_NOTICE "o2dlm: Waiting on the recovery of node %u in " 388 "domain %s\n", node, dlm->name); 389 390 if (timeout) 391 wait_event_timeout(dlm->dlm_reco_thread_wq, 392 dlm_is_node_recovered(dlm, node), 393 msecs_to_jiffies(timeout)); 394 else 395 wait_event(dlm->dlm_reco_thread_wq, 396 dlm_is_node_recovered(dlm, node)); 397 } 398 399 /* callers of the top-level api calls (dlmlock/dlmunlock) should 400 * block on the dlm->reco.event when recovery is in progress. 401 * the dlm recovery thread will set this state when it begins 402 * recovering a dead node (as the new master or not) and clear 403 * the state and wake as soon as all affected lock resources have 404 * been marked with the RECOVERY flag */ 405 static int dlm_in_recovery(struct dlm_ctxt *dlm) 406 { 407 int in_recovery; 408 spin_lock(&dlm->spinlock); 409 in_recovery = !!(dlm->reco.state & DLM_RECO_STATE_ACTIVE); 410 spin_unlock(&dlm->spinlock); 411 return in_recovery; 412 } 413 414 415 void dlm_wait_for_recovery(struct dlm_ctxt *dlm) 416 { 417 if (dlm_in_recovery(dlm)) { 418 mlog(0, "%s: reco thread %d in recovery: " 419 "state=%d, master=%u, dead=%u\n", 420 dlm->name, task_pid_nr(dlm->dlm_reco_thread_task), 421 dlm->reco.state, dlm->reco.new_master, 422 dlm->reco.dead_node); 423 } 424 wait_event(dlm->reco.event, !dlm_in_recovery(dlm)); 425 } 426 427 static void dlm_begin_recovery(struct dlm_ctxt *dlm) 428 { 429 spin_lock(&dlm->spinlock); 430 BUG_ON(dlm->reco.state & DLM_RECO_STATE_ACTIVE); 431 printk(KERN_NOTICE "o2dlm: Begin recovery on domain %s for node %u\n", 432 dlm->name, dlm->reco.dead_node); 433 dlm->reco.state |= DLM_RECO_STATE_ACTIVE; 434 spin_unlock(&dlm->spinlock); 435 } 436 437 static void dlm_end_recovery(struct dlm_ctxt *dlm) 438 { 439 spin_lock(&dlm->spinlock); 440 BUG_ON(!(dlm->reco.state & DLM_RECO_STATE_ACTIVE)); 441 dlm->reco.state &= ~DLM_RECO_STATE_ACTIVE; 442 spin_unlock(&dlm->spinlock); 443 printk(KERN_NOTICE "o2dlm: End recovery on domain %s\n", dlm->name); 444 wake_up(&dlm->reco.event); 445 } 446 447 static void dlm_print_recovery_master(struct dlm_ctxt *dlm) 448 { 449 printk(KERN_NOTICE "o2dlm: Node %u (%s) is the Recovery Master for the " 450 "dead node %u in domain %s\n", dlm->reco.new_master, 451 (dlm->node_num == dlm->reco.new_master ? "me" : "he"), 452 dlm->reco.dead_node, dlm->name); 453 } 454 455 static int dlm_do_recovery(struct dlm_ctxt *dlm) 456 { 457 int status = 0; 458 int ret; 459 460 spin_lock(&dlm->spinlock); 461 462 /* check to see if the new master has died */ 463 if (dlm->reco.new_master != O2NM_INVALID_NODE_NUM && 464 test_bit(dlm->reco.new_master, dlm->recovery_map)) { 465 mlog(0, "new master %u died while recovering %u!\n", 466 dlm->reco.new_master, dlm->reco.dead_node); 467 /* unset the new_master, leave dead_node */ 468 dlm_set_reco_master(dlm, O2NM_INVALID_NODE_NUM); 469 } 470 471 /* select a target to recover */ 472 if (dlm->reco.dead_node == O2NM_INVALID_NODE_NUM) { 473 int bit; 474 475 bit = find_next_bit (dlm->recovery_map, O2NM_MAX_NODES, 0); 476 if (bit >= O2NM_MAX_NODES || bit < 0) 477 dlm_set_reco_dead_node(dlm, O2NM_INVALID_NODE_NUM); 478 else 479 dlm_set_reco_dead_node(dlm, bit); 480 } else if (!test_bit(dlm->reco.dead_node, dlm->recovery_map)) { 481 /* BUG? */ 482 mlog(ML_ERROR, "dead_node %u no longer in recovery map!\n", 483 dlm->reco.dead_node); 484 dlm_set_reco_dead_node(dlm, O2NM_INVALID_NODE_NUM); 485 } 486 487 if (dlm->reco.dead_node == O2NM_INVALID_NODE_NUM) { 488 // mlog(0, "nothing to recover! sleeping now!\n"); 489 spin_unlock(&dlm->spinlock); 490 /* return to main thread loop and sleep. */ 491 return 0; 492 } 493 mlog(0, "%s(%d):recovery thread found node %u in the recovery map!\n", 494 dlm->name, task_pid_nr(dlm->dlm_reco_thread_task), 495 dlm->reco.dead_node); 496 spin_unlock(&dlm->spinlock); 497 498 /* take write barrier */ 499 /* (stops the list reshuffling thread, proxy ast handling) */ 500 dlm_begin_recovery(dlm); 501 502 if (dlm->reco.new_master == dlm->node_num) 503 goto master_here; 504 505 if (dlm->reco.new_master == O2NM_INVALID_NODE_NUM) { 506 /* choose a new master, returns 0 if this node 507 * is the master, -EEXIST if it's another node. 508 * this does not return until a new master is chosen 509 * or recovery completes entirely. */ 510 ret = dlm_pick_recovery_master(dlm); 511 if (!ret) { 512 /* already notified everyone. go. */ 513 goto master_here; 514 } 515 mlog(0, "another node will master this recovery session.\n"); 516 } 517 518 dlm_print_recovery_master(dlm); 519 520 /* it is safe to start everything back up here 521 * because all of the dead node's lock resources 522 * have been marked as in-recovery */ 523 dlm_end_recovery(dlm); 524 525 /* sleep out in main dlm_recovery_thread loop. */ 526 return 0; 527 528 master_here: 529 dlm_print_recovery_master(dlm); 530 531 status = dlm_remaster_locks(dlm, dlm->reco.dead_node); 532 if (status < 0) { 533 /* we should never hit this anymore */ 534 mlog(ML_ERROR, "%s: Error %d remastering locks for node %u, " 535 "retrying.\n", dlm->name, status, dlm->reco.dead_node); 536 /* yield a bit to allow any final network messages 537 * to get handled on remaining nodes */ 538 msleep(100); 539 } else { 540 /* success! see if any other nodes need recovery */ 541 mlog(0, "DONE mastering recovery of %s:%u here(this=%u)!\n", 542 dlm->name, dlm->reco.dead_node, dlm->node_num); 543 dlm_reset_recovery(dlm); 544 } 545 dlm_end_recovery(dlm); 546 547 /* continue and look for another dead node */ 548 return -EAGAIN; 549 } 550 551 static int dlm_remaster_locks(struct dlm_ctxt *dlm, u8 dead_node) 552 { 553 int status = 0; 554 struct dlm_reco_node_data *ndata; 555 int all_nodes_done; 556 int destroy = 0; 557 int pass = 0; 558 559 do { 560 /* we have become recovery master. there is no escaping 561 * this, so just keep trying until we get it. */ 562 status = dlm_init_recovery_area(dlm, dead_node); 563 if (status < 0) { 564 mlog(ML_ERROR, "%s: failed to alloc recovery area, " 565 "retrying\n", dlm->name); 566 msleep(1000); 567 } 568 } while (status != 0); 569 570 /* safe to access the node data list without a lock, since this 571 * process is the only one to change the list */ 572 list_for_each_entry(ndata, &dlm->reco.node_data, list) { 573 BUG_ON(ndata->state != DLM_RECO_NODE_DATA_INIT); 574 ndata->state = DLM_RECO_NODE_DATA_REQUESTING; 575 576 mlog(0, "%s: Requesting lock info from node %u\n", dlm->name, 577 ndata->node_num); 578 579 if (ndata->node_num == dlm->node_num) { 580 ndata->state = DLM_RECO_NODE_DATA_DONE; 581 continue; 582 } 583 584 do { 585 status = dlm_request_all_locks(dlm, ndata->node_num, 586 dead_node); 587 if (status < 0) { 588 mlog_errno(status); 589 if (dlm_is_host_down(status)) { 590 /* node died, ignore it for recovery */ 591 status = 0; 592 ndata->state = DLM_RECO_NODE_DATA_DEAD; 593 /* wait for the domain map to catch up 594 * with the network state. */ 595 wait_event_timeout(dlm->dlm_reco_thread_wq, 596 dlm_is_node_dead(dlm, 597 ndata->node_num), 598 msecs_to_jiffies(1000)); 599 mlog(0, "waited 1 sec for %u, " 600 "dead? %s\n", ndata->node_num, 601 dlm_is_node_dead(dlm, ndata->node_num) ? 602 "yes" : "no"); 603 } else { 604 /* -ENOMEM on the other node */ 605 mlog(0, "%s: node %u returned " 606 "%d during recovery, retrying " 607 "after a short wait\n", 608 dlm->name, ndata->node_num, 609 status); 610 msleep(100); 611 } 612 } 613 } while (status != 0); 614 615 spin_lock(&dlm_reco_state_lock); 616 switch (ndata->state) { 617 case DLM_RECO_NODE_DATA_INIT: 618 case DLM_RECO_NODE_DATA_FINALIZE_SENT: 619 case DLM_RECO_NODE_DATA_REQUESTED: 620 BUG(); 621 break; 622 case DLM_RECO_NODE_DATA_DEAD: 623 mlog(0, "node %u died after requesting " 624 "recovery info for node %u\n", 625 ndata->node_num, dead_node); 626 /* fine. don't need this node's info. 627 * continue without it. */ 628 break; 629 case DLM_RECO_NODE_DATA_REQUESTING: 630 ndata->state = DLM_RECO_NODE_DATA_REQUESTED; 631 mlog(0, "now receiving recovery data from " 632 "node %u for dead node %u\n", 633 ndata->node_num, dead_node); 634 break; 635 case DLM_RECO_NODE_DATA_RECEIVING: 636 mlog(0, "already receiving recovery data from " 637 "node %u for dead node %u\n", 638 ndata->node_num, dead_node); 639 break; 640 case DLM_RECO_NODE_DATA_DONE: 641 mlog(0, "already DONE receiving recovery data " 642 "from node %u for dead node %u\n", 643 ndata->node_num, dead_node); 644 break; 645 } 646 spin_unlock(&dlm_reco_state_lock); 647 } 648 649 mlog(0, "%s: Done requesting all lock info\n", dlm->name); 650 651 /* nodes should be sending reco data now 652 * just need to wait */ 653 654 while (1) { 655 /* check all the nodes now to see if we are 656 * done, or if anyone died */ 657 all_nodes_done = 1; 658 spin_lock(&dlm_reco_state_lock); 659 list_for_each_entry(ndata, &dlm->reco.node_data, list) { 660 mlog(0, "checking recovery state of node %u\n", 661 ndata->node_num); 662 switch (ndata->state) { 663 case DLM_RECO_NODE_DATA_INIT: 664 case DLM_RECO_NODE_DATA_REQUESTING: 665 mlog(ML_ERROR, "bad ndata state for " 666 "node %u: state=%d\n", 667 ndata->node_num, ndata->state); 668 BUG(); 669 break; 670 case DLM_RECO_NODE_DATA_DEAD: 671 mlog(0, "node %u died after " 672 "requesting recovery info for " 673 "node %u\n", ndata->node_num, 674 dead_node); 675 break; 676 case DLM_RECO_NODE_DATA_RECEIVING: 677 case DLM_RECO_NODE_DATA_REQUESTED: 678 mlog(0, "%s: node %u still in state %s\n", 679 dlm->name, ndata->node_num, 680 ndata->state==DLM_RECO_NODE_DATA_RECEIVING ? 681 "receiving" : "requested"); 682 all_nodes_done = 0; 683 break; 684 case DLM_RECO_NODE_DATA_DONE: 685 mlog(0, "%s: node %u state is done\n", 686 dlm->name, ndata->node_num); 687 break; 688 case DLM_RECO_NODE_DATA_FINALIZE_SENT: 689 mlog(0, "%s: node %u state is finalize\n", 690 dlm->name, ndata->node_num); 691 break; 692 } 693 } 694 spin_unlock(&dlm_reco_state_lock); 695 696 mlog(0, "pass #%d, all_nodes_done?: %s\n", ++pass, 697 all_nodes_done?"yes":"no"); 698 if (all_nodes_done) { 699 int ret; 700 701 /* all nodes are now in DLM_RECO_NODE_DATA_DONE state 702 * just send a finalize message to everyone and 703 * clean up */ 704 mlog(0, "all nodes are done! send finalize\n"); 705 ret = dlm_send_finalize_reco_message(dlm); 706 if (ret < 0) 707 mlog_errno(ret); 708 709 spin_lock(&dlm->spinlock); 710 dlm_finish_local_lockres_recovery(dlm, dead_node, 711 dlm->node_num); 712 spin_unlock(&dlm->spinlock); 713 mlog(0, "should be done with recovery!\n"); 714 715 mlog(0, "finishing recovery of %s at %lu, " 716 "dead=%u, this=%u, new=%u\n", dlm->name, 717 jiffies, dlm->reco.dead_node, 718 dlm->node_num, dlm->reco.new_master); 719 destroy = 1; 720 status = 0; 721 /* rescan everything marked dirty along the way */ 722 dlm_kick_thread(dlm, NULL); 723 break; 724 } 725 /* wait to be signalled, with periodic timeout 726 * to check for node death */ 727 wait_event_interruptible_timeout(dlm->dlm_reco_thread_wq, 728 kthread_should_stop(), 729 msecs_to_jiffies(DLM_RECO_THREAD_TIMEOUT_MS)); 730 731 } 732 733 if (destroy) 734 dlm_destroy_recovery_area(dlm, dead_node); 735 736 return status; 737 } 738 739 static int dlm_init_recovery_area(struct dlm_ctxt *dlm, u8 dead_node) 740 { 741 int num=0; 742 struct dlm_reco_node_data *ndata; 743 744 spin_lock(&dlm->spinlock); 745 memcpy(dlm->reco.node_map, dlm->domain_map, sizeof(dlm->domain_map)); 746 /* nodes can only be removed (by dying) after dropping 747 * this lock, and death will be trapped later, so this should do */ 748 spin_unlock(&dlm->spinlock); 749 750 while (1) { 751 num = find_next_bit (dlm->reco.node_map, O2NM_MAX_NODES, num); 752 if (num >= O2NM_MAX_NODES) { 753 break; 754 } 755 BUG_ON(num == dead_node); 756 757 ndata = kzalloc(sizeof(*ndata), GFP_NOFS); 758 if (!ndata) { 759 dlm_destroy_recovery_area(dlm, dead_node); 760 return -ENOMEM; 761 } 762 ndata->node_num = num; 763 ndata->state = DLM_RECO_NODE_DATA_INIT; 764 spin_lock(&dlm_reco_state_lock); 765 list_add_tail(&ndata->list, &dlm->reco.node_data); 766 spin_unlock(&dlm_reco_state_lock); 767 num++; 768 } 769 770 return 0; 771 } 772 773 static void dlm_destroy_recovery_area(struct dlm_ctxt *dlm, u8 dead_node) 774 { 775 struct dlm_reco_node_data *ndata, *next; 776 LIST_HEAD(tmplist); 777 778 spin_lock(&dlm_reco_state_lock); 779 list_splice_init(&dlm->reco.node_data, &tmplist); 780 spin_unlock(&dlm_reco_state_lock); 781 782 list_for_each_entry_safe(ndata, next, &tmplist, list) { 783 list_del_init(&ndata->list); 784 kfree(ndata); 785 } 786 } 787 788 static int dlm_request_all_locks(struct dlm_ctxt *dlm, u8 request_from, 789 u8 dead_node) 790 { 791 struct dlm_lock_request lr; 792 enum dlm_status ret; 793 794 mlog(0, "\n"); 795 796 797 mlog(0, "dlm_request_all_locks: dead node is %u, sending request " 798 "to %u\n", dead_node, request_from); 799 800 memset(&lr, 0, sizeof(lr)); 801 lr.node_idx = dlm->node_num; 802 lr.dead_node = dead_node; 803 804 // send message 805 ret = DLM_NOLOCKMGR; 806 ret = o2net_send_message(DLM_LOCK_REQUEST_MSG, dlm->key, 807 &lr, sizeof(lr), request_from, NULL); 808 809 /* negative status is handled by caller */ 810 if (ret < 0) 811 mlog(ML_ERROR, "%s: Error %d send LOCK_REQUEST to node %u " 812 "to recover dead node %u\n", dlm->name, ret, 813 request_from, dead_node); 814 // return from here, then 815 // sleep until all received or error 816 return ret; 817 818 } 819 820 int dlm_request_all_locks_handler(struct o2net_msg *msg, u32 len, void *data, 821 void **ret_data) 822 { 823 struct dlm_ctxt *dlm = data; 824 struct dlm_lock_request *lr = (struct dlm_lock_request *)msg->buf; 825 char *buf = NULL; 826 struct dlm_work_item *item = NULL; 827 828 if (!dlm_grab(dlm)) 829 return -EINVAL; 830 831 if (lr->dead_node != dlm->reco.dead_node) { 832 mlog(ML_ERROR, "%s: node %u sent dead_node=%u, but local " 833 "dead_node is %u\n", dlm->name, lr->node_idx, 834 lr->dead_node, dlm->reco.dead_node); 835 dlm_print_reco_node_status(dlm); 836 /* this is a hack */ 837 dlm_put(dlm); 838 return -ENOMEM; 839 } 840 BUG_ON(lr->dead_node != dlm->reco.dead_node); 841 842 item = kzalloc(sizeof(*item), GFP_NOFS); 843 if (!item) { 844 dlm_put(dlm); 845 return -ENOMEM; 846 } 847 848 /* this will get freed by dlm_request_all_locks_worker */ 849 buf = (char *) __get_free_page(GFP_NOFS); 850 if (!buf) { 851 kfree(item); 852 dlm_put(dlm); 853 return -ENOMEM; 854 } 855 856 /* queue up work for dlm_request_all_locks_worker */ 857 dlm_grab(dlm); /* get an extra ref for the work item */ 858 dlm_init_work_item(dlm, item, dlm_request_all_locks_worker, buf); 859 item->u.ral.reco_master = lr->node_idx; 860 item->u.ral.dead_node = lr->dead_node; 861 spin_lock(&dlm->work_lock); 862 list_add_tail(&item->list, &dlm->work_list); 863 spin_unlock(&dlm->work_lock); 864 queue_work(dlm->dlm_worker, &dlm->dispatched_work); 865 866 dlm_put(dlm); 867 return 0; 868 } 869 870 static void dlm_request_all_locks_worker(struct dlm_work_item *item, void *data) 871 { 872 struct dlm_migratable_lockres *mres; 873 struct dlm_lock_resource *res; 874 struct dlm_ctxt *dlm; 875 LIST_HEAD(resources); 876 int ret; 877 u8 dead_node, reco_master; 878 int skip_all_done = 0; 879 880 dlm = item->dlm; 881 dead_node = item->u.ral.dead_node; 882 reco_master = item->u.ral.reco_master; 883 mres = (struct dlm_migratable_lockres *)data; 884 885 mlog(0, "%s: recovery worker started, dead=%u, master=%u\n", 886 dlm->name, dead_node, reco_master); 887 888 if (dead_node != dlm->reco.dead_node || 889 reco_master != dlm->reco.new_master) { 890 /* worker could have been created before the recovery master 891 * died. if so, do not continue, but do not error. */ 892 if (dlm->reco.new_master == O2NM_INVALID_NODE_NUM) { 893 mlog(ML_NOTICE, "%s: will not send recovery state, " 894 "recovery master %u died, thread=(dead=%u,mas=%u)" 895 " current=(dead=%u,mas=%u)\n", dlm->name, 896 reco_master, dead_node, reco_master, 897 dlm->reco.dead_node, dlm->reco.new_master); 898 } else { 899 mlog(ML_NOTICE, "%s: reco state invalid: reco(dead=%u, " 900 "master=%u), request(dead=%u, master=%u)\n", 901 dlm->name, dlm->reco.dead_node, 902 dlm->reco.new_master, dead_node, reco_master); 903 } 904 goto leave; 905 } 906 907 /* lock resources should have already been moved to the 908 * dlm->reco.resources list. now move items from that list 909 * to a temp list if the dead owner matches. note that the 910 * whole cluster recovers only one node at a time, so we 911 * can safely move UNKNOWN lock resources for each recovery 912 * session. */ 913 dlm_move_reco_locks_to_list(dlm, &resources, dead_node); 914 915 /* now we can begin blasting lockreses without the dlm lock */ 916 917 /* any errors returned will be due to the new_master dying, 918 * the dlm_reco_thread should detect this */ 919 list_for_each_entry(res, &resources, recovering) { 920 ret = dlm_send_one_lockres(dlm, res, mres, reco_master, 921 DLM_MRES_RECOVERY); 922 if (ret < 0) { 923 mlog(ML_ERROR, "%s: node %u went down while sending " 924 "recovery state for dead node %u, ret=%d\n", dlm->name, 925 reco_master, dead_node, ret); 926 skip_all_done = 1; 927 break; 928 } 929 } 930 931 /* move the resources back to the list */ 932 spin_lock(&dlm->spinlock); 933 list_splice_init(&resources, &dlm->reco.resources); 934 spin_unlock(&dlm->spinlock); 935 936 if (!skip_all_done) { 937 ret = dlm_send_all_done_msg(dlm, dead_node, reco_master); 938 if (ret < 0) { 939 mlog(ML_ERROR, "%s: node %u went down while sending " 940 "recovery all-done for dead node %u, ret=%d\n", 941 dlm->name, reco_master, dead_node, ret); 942 } 943 } 944 leave: 945 free_page((unsigned long)data); 946 } 947 948 949 static int dlm_send_all_done_msg(struct dlm_ctxt *dlm, u8 dead_node, u8 send_to) 950 { 951 int ret, tmpret; 952 struct dlm_reco_data_done done_msg; 953 954 memset(&done_msg, 0, sizeof(done_msg)); 955 done_msg.node_idx = dlm->node_num; 956 done_msg.dead_node = dead_node; 957 mlog(0, "sending DATA DONE message to %u, " 958 "my node=%u, dead node=%u\n", send_to, done_msg.node_idx, 959 done_msg.dead_node); 960 961 ret = o2net_send_message(DLM_RECO_DATA_DONE_MSG, dlm->key, &done_msg, 962 sizeof(done_msg), send_to, &tmpret); 963 if (ret < 0) { 964 mlog(ML_ERROR, "%s: Error %d send RECO_DATA_DONE to node %u " 965 "to recover dead node %u\n", dlm->name, ret, send_to, 966 dead_node); 967 if (!dlm_is_host_down(ret)) { 968 BUG(); 969 } 970 } else 971 ret = tmpret; 972 return ret; 973 } 974 975 976 int dlm_reco_data_done_handler(struct o2net_msg *msg, u32 len, void *data, 977 void **ret_data) 978 { 979 struct dlm_ctxt *dlm = data; 980 struct dlm_reco_data_done *done = (struct dlm_reco_data_done *)msg->buf; 981 struct dlm_reco_node_data *ndata = NULL; 982 int ret = -EINVAL; 983 984 if (!dlm_grab(dlm)) 985 return -EINVAL; 986 987 mlog(0, "got DATA DONE: dead_node=%u, reco.dead_node=%u, " 988 "node_idx=%u, this node=%u\n", done->dead_node, 989 dlm->reco.dead_node, done->node_idx, dlm->node_num); 990 991 mlog_bug_on_msg((done->dead_node != dlm->reco.dead_node), 992 "Got DATA DONE: dead_node=%u, reco.dead_node=%u, " 993 "node_idx=%u, this node=%u\n", done->dead_node, 994 dlm->reco.dead_node, done->node_idx, dlm->node_num); 995 996 spin_lock(&dlm_reco_state_lock); 997 list_for_each_entry(ndata, &dlm->reco.node_data, list) { 998 if (ndata->node_num != done->node_idx) 999 continue; 1000 1001 switch (ndata->state) { 1002 /* should have moved beyond INIT but not to FINALIZE yet */ 1003 case DLM_RECO_NODE_DATA_INIT: 1004 case DLM_RECO_NODE_DATA_DEAD: 1005 case DLM_RECO_NODE_DATA_FINALIZE_SENT: 1006 mlog(ML_ERROR, "bad ndata state for node %u:" 1007 " state=%d\n", ndata->node_num, 1008 ndata->state); 1009 BUG(); 1010 break; 1011 /* these states are possible at this point, anywhere along 1012 * the line of recovery */ 1013 case DLM_RECO_NODE_DATA_DONE: 1014 case DLM_RECO_NODE_DATA_RECEIVING: 1015 case DLM_RECO_NODE_DATA_REQUESTED: 1016 case DLM_RECO_NODE_DATA_REQUESTING: 1017 mlog(0, "node %u is DONE sending " 1018 "recovery data!\n", 1019 ndata->node_num); 1020 1021 ndata->state = DLM_RECO_NODE_DATA_DONE; 1022 ret = 0; 1023 break; 1024 } 1025 } 1026 spin_unlock(&dlm_reco_state_lock); 1027 1028 /* wake the recovery thread, some node is done */ 1029 if (!ret) 1030 dlm_kick_recovery_thread(dlm); 1031 1032 if (ret < 0) 1033 mlog(ML_ERROR, "failed to find recovery node data for node " 1034 "%u\n", done->node_idx); 1035 dlm_put(dlm); 1036 1037 mlog(0, "leaving reco data done handler, ret=%d\n", ret); 1038 return ret; 1039 } 1040 1041 static void dlm_move_reco_locks_to_list(struct dlm_ctxt *dlm, 1042 struct list_head *list, 1043 u8 dead_node) 1044 { 1045 struct dlm_lock_resource *res, *next; 1046 struct dlm_lock *lock; 1047 1048 spin_lock(&dlm->spinlock); 1049 list_for_each_entry_safe(res, next, &dlm->reco.resources, recovering) { 1050 /* always prune any $RECOVERY entries for dead nodes, 1051 * otherwise hangs can occur during later recovery */ 1052 if (dlm_is_recovery_lock(res->lockname.name, 1053 res->lockname.len)) { 1054 spin_lock(&res->spinlock); 1055 list_for_each_entry(lock, &res->granted, list) { 1056 if (lock->ml.node == dead_node) { 1057 mlog(0, "AHA! there was " 1058 "a $RECOVERY lock for dead " 1059 "node %u (%s)!\n", 1060 dead_node, dlm->name); 1061 list_del_init(&lock->list); 1062 dlm_lock_put(lock); 1063 break; 1064 } 1065 } 1066 spin_unlock(&res->spinlock); 1067 continue; 1068 } 1069 1070 if (res->owner == dead_node) { 1071 mlog(0, "found lockres owned by dead node while " 1072 "doing recovery for node %u. sending it.\n", 1073 dead_node); 1074 list_move_tail(&res->recovering, list); 1075 } else if (res->owner == DLM_LOCK_RES_OWNER_UNKNOWN) { 1076 mlog(0, "found UNKNOWN owner while doing recovery " 1077 "for node %u. sending it.\n", dead_node); 1078 list_move_tail(&res->recovering, list); 1079 } 1080 } 1081 spin_unlock(&dlm->spinlock); 1082 } 1083 1084 static inline int dlm_num_locks_in_lockres(struct dlm_lock_resource *res) 1085 { 1086 int total_locks = 0; 1087 struct list_head *iter, *queue = &res->granted; 1088 int i; 1089 1090 for (i=0; i<3; i++) { 1091 list_for_each(iter, queue) 1092 total_locks++; 1093 queue++; 1094 } 1095 return total_locks; 1096 } 1097 1098 1099 static int dlm_send_mig_lockres_msg(struct dlm_ctxt *dlm, 1100 struct dlm_migratable_lockres *mres, 1101 u8 send_to, 1102 struct dlm_lock_resource *res, 1103 int total_locks) 1104 { 1105 u64 mig_cookie = be64_to_cpu(mres->mig_cookie); 1106 int mres_total_locks = be32_to_cpu(mres->total_locks); 1107 int sz, ret = 0, status = 0; 1108 u8 orig_flags = mres->flags, 1109 orig_master = mres->master; 1110 1111 BUG_ON(mres->num_locks > DLM_MAX_MIGRATABLE_LOCKS); 1112 if (!mres->num_locks) 1113 return 0; 1114 1115 sz = sizeof(struct dlm_migratable_lockres) + 1116 (mres->num_locks * sizeof(struct dlm_migratable_lock)); 1117 1118 /* add an all-done flag if we reached the last lock */ 1119 orig_flags = mres->flags; 1120 BUG_ON(total_locks > mres_total_locks); 1121 if (total_locks == mres_total_locks) 1122 mres->flags |= DLM_MRES_ALL_DONE; 1123 1124 mlog(0, "%s:%.*s: sending mig lockres (%s) to %u\n", 1125 dlm->name, res->lockname.len, res->lockname.name, 1126 orig_flags & DLM_MRES_MIGRATION ? "migration" : "recovery", 1127 send_to); 1128 1129 /* send it */ 1130 ret = o2net_send_message(DLM_MIG_LOCKRES_MSG, dlm->key, mres, 1131 sz, send_to, &status); 1132 if (ret < 0) { 1133 /* XXX: negative status is not handled. 1134 * this will end up killing this node. */ 1135 mlog(ML_ERROR, "%s: res %.*s, Error %d send MIG_LOCKRES to " 1136 "node %u (%s)\n", dlm->name, mres->lockname_len, 1137 mres->lockname, ret, send_to, 1138 (orig_flags & DLM_MRES_MIGRATION ? 1139 "migration" : "recovery")); 1140 } else { 1141 /* might get an -ENOMEM back here */ 1142 ret = status; 1143 if (ret < 0) { 1144 mlog_errno(ret); 1145 1146 if (ret == -EFAULT) { 1147 mlog(ML_ERROR, "node %u told me to kill " 1148 "myself!\n", send_to); 1149 BUG(); 1150 } 1151 } 1152 } 1153 1154 /* zero and reinit the message buffer */ 1155 dlm_init_migratable_lockres(mres, res->lockname.name, 1156 res->lockname.len, mres_total_locks, 1157 mig_cookie, orig_flags, orig_master); 1158 return ret; 1159 } 1160 1161 static void dlm_init_migratable_lockres(struct dlm_migratable_lockres *mres, 1162 const char *lockname, int namelen, 1163 int total_locks, u64 cookie, 1164 u8 flags, u8 master) 1165 { 1166 /* mres here is one full page */ 1167 clear_page(mres); 1168 mres->lockname_len = namelen; 1169 memcpy(mres->lockname, lockname, namelen); 1170 mres->num_locks = 0; 1171 mres->total_locks = cpu_to_be32(total_locks); 1172 mres->mig_cookie = cpu_to_be64(cookie); 1173 mres->flags = flags; 1174 mres->master = master; 1175 } 1176 1177 static void dlm_prepare_lvb_for_migration(struct dlm_lock *lock, 1178 struct dlm_migratable_lockres *mres, 1179 int queue) 1180 { 1181 if (!lock->lksb) 1182 return; 1183 1184 /* Ignore lvb in all locks in the blocked list */ 1185 if (queue == DLM_BLOCKED_LIST) 1186 return; 1187 1188 /* Only consider lvbs in locks with granted EX or PR lock levels */ 1189 if (lock->ml.type != LKM_EXMODE && lock->ml.type != LKM_PRMODE) 1190 return; 1191 1192 if (dlm_lvb_is_empty(mres->lvb)) { 1193 memcpy(mres->lvb, lock->lksb->lvb, DLM_LVB_LEN); 1194 return; 1195 } 1196 1197 /* Ensure the lvb copied for migration matches in other valid locks */ 1198 if (!memcmp(mres->lvb, lock->lksb->lvb, DLM_LVB_LEN)) 1199 return; 1200 1201 mlog(ML_ERROR, "Mismatched lvb in lock cookie=%u:%llu, name=%.*s, " 1202 "node=%u\n", 1203 dlm_get_lock_cookie_node(be64_to_cpu(lock->ml.cookie)), 1204 dlm_get_lock_cookie_seq(be64_to_cpu(lock->ml.cookie)), 1205 lock->lockres->lockname.len, lock->lockres->lockname.name, 1206 lock->ml.node); 1207 dlm_print_one_lock_resource(lock->lockres); 1208 BUG(); 1209 } 1210 1211 /* returns 1 if this lock fills the network structure, 1212 * 0 otherwise */ 1213 static int dlm_add_lock_to_array(struct dlm_lock *lock, 1214 struct dlm_migratable_lockres *mres, int queue) 1215 { 1216 struct dlm_migratable_lock *ml; 1217 int lock_num = mres->num_locks; 1218 1219 ml = &(mres->ml[lock_num]); 1220 ml->cookie = lock->ml.cookie; 1221 ml->type = lock->ml.type; 1222 ml->convert_type = lock->ml.convert_type; 1223 ml->highest_blocked = lock->ml.highest_blocked; 1224 ml->list = queue; 1225 if (lock->lksb) { 1226 ml->flags = lock->lksb->flags; 1227 dlm_prepare_lvb_for_migration(lock, mres, queue); 1228 } 1229 ml->node = lock->ml.node; 1230 mres->num_locks++; 1231 /* we reached the max, send this network message */ 1232 if (mres->num_locks == DLM_MAX_MIGRATABLE_LOCKS) 1233 return 1; 1234 return 0; 1235 } 1236 1237 static void dlm_add_dummy_lock(struct dlm_ctxt *dlm, 1238 struct dlm_migratable_lockres *mres) 1239 { 1240 struct dlm_lock dummy; 1241 memset(&dummy, 0, sizeof(dummy)); 1242 dummy.ml.cookie = 0; 1243 dummy.ml.type = LKM_IVMODE; 1244 dummy.ml.convert_type = LKM_IVMODE; 1245 dummy.ml.highest_blocked = LKM_IVMODE; 1246 dummy.lksb = NULL; 1247 dummy.ml.node = dlm->node_num; 1248 dlm_add_lock_to_array(&dummy, mres, DLM_BLOCKED_LIST); 1249 } 1250 1251 static inline int dlm_is_dummy_lock(struct dlm_ctxt *dlm, 1252 struct dlm_migratable_lock *ml, 1253 u8 *nodenum) 1254 { 1255 if (unlikely(ml->cookie == 0 && 1256 ml->type == LKM_IVMODE && 1257 ml->convert_type == LKM_IVMODE && 1258 ml->highest_blocked == LKM_IVMODE && 1259 ml->list == DLM_BLOCKED_LIST)) { 1260 *nodenum = ml->node; 1261 return 1; 1262 } 1263 return 0; 1264 } 1265 1266 int dlm_send_one_lockres(struct dlm_ctxt *dlm, struct dlm_lock_resource *res, 1267 struct dlm_migratable_lockres *mres, 1268 u8 send_to, u8 flags) 1269 { 1270 struct list_head *queue; 1271 int total_locks, i; 1272 u64 mig_cookie = 0; 1273 struct dlm_lock *lock; 1274 int ret = 0; 1275 1276 BUG_ON(!(flags & (DLM_MRES_RECOVERY|DLM_MRES_MIGRATION))); 1277 1278 mlog(0, "sending to %u\n", send_to); 1279 1280 total_locks = dlm_num_locks_in_lockres(res); 1281 if (total_locks > DLM_MAX_MIGRATABLE_LOCKS) { 1282 /* rare, but possible */ 1283 mlog(0, "argh. lockres has %d locks. this will " 1284 "require more than one network packet to " 1285 "migrate\n", total_locks); 1286 mig_cookie = dlm_get_next_mig_cookie(); 1287 } 1288 1289 dlm_init_migratable_lockres(mres, res->lockname.name, 1290 res->lockname.len, total_locks, 1291 mig_cookie, flags, res->owner); 1292 1293 total_locks = 0; 1294 for (i=DLM_GRANTED_LIST; i<=DLM_BLOCKED_LIST; i++) { 1295 queue = dlm_list_idx_to_ptr(res, i); 1296 list_for_each_entry(lock, queue, list) { 1297 /* add another lock. */ 1298 total_locks++; 1299 if (!dlm_add_lock_to_array(lock, mres, i)) 1300 continue; 1301 1302 /* this filled the lock message, 1303 * we must send it immediately. */ 1304 ret = dlm_send_mig_lockres_msg(dlm, mres, send_to, 1305 res, total_locks); 1306 if (ret < 0) 1307 goto error; 1308 } 1309 } 1310 if (total_locks == 0) { 1311 /* send a dummy lock to indicate a mastery reference only */ 1312 mlog(0, "%s:%.*s: sending dummy lock to %u, %s\n", 1313 dlm->name, res->lockname.len, res->lockname.name, 1314 send_to, flags & DLM_MRES_RECOVERY ? "recovery" : 1315 "migration"); 1316 dlm_add_dummy_lock(dlm, mres); 1317 } 1318 /* flush any remaining locks */ 1319 ret = dlm_send_mig_lockres_msg(dlm, mres, send_to, res, total_locks); 1320 if (ret < 0) 1321 goto error; 1322 return ret; 1323 1324 error: 1325 mlog(ML_ERROR, "%s: dlm_send_mig_lockres_msg returned %d\n", 1326 dlm->name, ret); 1327 if (!dlm_is_host_down(ret)) 1328 BUG(); 1329 mlog(0, "%s: node %u went down while sending %s " 1330 "lockres %.*s\n", dlm->name, send_to, 1331 flags & DLM_MRES_RECOVERY ? "recovery" : "migration", 1332 res->lockname.len, res->lockname.name); 1333 return ret; 1334 } 1335 1336 1337 1338 /* 1339 * this message will contain no more than one page worth of 1340 * recovery data, and it will work on only one lockres. 1341 * there may be many locks in this page, and we may need to wait 1342 * for additional packets to complete all the locks (rare, but 1343 * possible). 1344 */ 1345 /* 1346 * NOTE: the allocation error cases here are scary 1347 * we really cannot afford to fail an alloc in recovery 1348 * do we spin? returning an error only delays the problem really 1349 */ 1350 1351 int dlm_mig_lockres_handler(struct o2net_msg *msg, u32 len, void *data, 1352 void **ret_data) 1353 { 1354 struct dlm_ctxt *dlm = data; 1355 struct dlm_migratable_lockres *mres = 1356 (struct dlm_migratable_lockres *)msg->buf; 1357 int ret = 0; 1358 u8 real_master; 1359 u8 extra_refs = 0; 1360 char *buf = NULL; 1361 struct dlm_work_item *item = NULL; 1362 struct dlm_lock_resource *res = NULL; 1363 1364 if (!dlm_grab(dlm)) 1365 return -EINVAL; 1366 1367 BUG_ON(!(mres->flags & (DLM_MRES_RECOVERY|DLM_MRES_MIGRATION))); 1368 1369 real_master = mres->master; 1370 if (real_master == DLM_LOCK_RES_OWNER_UNKNOWN) { 1371 /* cannot migrate a lockres with no master */ 1372 BUG_ON(!(mres->flags & DLM_MRES_RECOVERY)); 1373 } 1374 1375 mlog(0, "%s message received from node %u\n", 1376 (mres->flags & DLM_MRES_RECOVERY) ? 1377 "recovery" : "migration", mres->master); 1378 if (mres->flags & DLM_MRES_ALL_DONE) 1379 mlog(0, "all done flag. all lockres data received!\n"); 1380 1381 ret = -ENOMEM; 1382 buf = kmalloc(be16_to_cpu(msg->data_len), GFP_NOFS); 1383 item = kzalloc(sizeof(*item), GFP_NOFS); 1384 if (!buf || !item) 1385 goto leave; 1386 1387 /* lookup the lock to see if we have a secondary queue for this 1388 * already... just add the locks in and this will have its owner 1389 * and RECOVERY flag changed when it completes. */ 1390 res = dlm_lookup_lockres(dlm, mres->lockname, mres->lockname_len); 1391 if (res) { 1392 /* this will get a ref on res */ 1393 /* mark it as recovering/migrating and hash it */ 1394 spin_lock(&res->spinlock); 1395 if (mres->flags & DLM_MRES_RECOVERY) { 1396 res->state |= DLM_LOCK_RES_RECOVERING; 1397 } else { 1398 if (res->state & DLM_LOCK_RES_MIGRATING) { 1399 /* this is at least the second 1400 * lockres message */ 1401 mlog(0, "lock %.*s is already migrating\n", 1402 mres->lockname_len, 1403 mres->lockname); 1404 } else if (res->state & DLM_LOCK_RES_RECOVERING) { 1405 /* caller should BUG */ 1406 mlog(ML_ERROR, "node is attempting to migrate " 1407 "lock %.*s, but marked as recovering!\n", 1408 mres->lockname_len, mres->lockname); 1409 ret = -EFAULT; 1410 spin_unlock(&res->spinlock); 1411 goto leave; 1412 } 1413 res->state |= DLM_LOCK_RES_MIGRATING; 1414 } 1415 spin_unlock(&res->spinlock); 1416 } else { 1417 /* need to allocate, just like if it was 1418 * mastered here normally */ 1419 res = dlm_new_lockres(dlm, mres->lockname, mres->lockname_len); 1420 if (!res) 1421 goto leave; 1422 1423 /* to match the ref that we would have gotten if 1424 * dlm_lookup_lockres had succeeded */ 1425 dlm_lockres_get(res); 1426 1427 /* mark it as recovering/migrating and hash it */ 1428 if (mres->flags & DLM_MRES_RECOVERY) 1429 res->state |= DLM_LOCK_RES_RECOVERING; 1430 else 1431 res->state |= DLM_LOCK_RES_MIGRATING; 1432 1433 spin_lock(&dlm->spinlock); 1434 __dlm_insert_lockres(dlm, res); 1435 spin_unlock(&dlm->spinlock); 1436 1437 /* Add an extra ref for this lock-less lockres lest the 1438 * dlm_thread purges it before we get the chance to add 1439 * locks to it */ 1440 dlm_lockres_get(res); 1441 1442 /* There are three refs that need to be put. 1443 * 1. Taken above. 1444 * 2. kref_init in dlm_new_lockres()->dlm_init_lockres(). 1445 * 3. dlm_lookup_lockres() 1446 * The first one is handled at the end of this function. The 1447 * other two are handled in the worker thread after locks have 1448 * been attached. Yes, we don't wait for purge time to match 1449 * kref_init. The lockres will still have atleast one ref 1450 * added because it is in the hash __dlm_insert_lockres() */ 1451 extra_refs++; 1452 1453 /* now that the new lockres is inserted, 1454 * make it usable by other processes */ 1455 spin_lock(&res->spinlock); 1456 res->state &= ~DLM_LOCK_RES_IN_PROGRESS; 1457 spin_unlock(&res->spinlock); 1458 wake_up(&res->wq); 1459 } 1460 1461 /* at this point we have allocated everything we need, 1462 * and we have a hashed lockres with an extra ref and 1463 * the proper res->state flags. */ 1464 ret = 0; 1465 spin_lock(&res->spinlock); 1466 /* drop this either when master requery finds a different master 1467 * or when a lock is added by the recovery worker */ 1468 dlm_lockres_grab_inflight_ref(dlm, res); 1469 if (mres->master == DLM_LOCK_RES_OWNER_UNKNOWN) { 1470 /* migration cannot have an unknown master */ 1471 BUG_ON(!(mres->flags & DLM_MRES_RECOVERY)); 1472 mlog(0, "recovery has passed me a lockres with an " 1473 "unknown owner.. will need to requery: " 1474 "%.*s\n", mres->lockname_len, mres->lockname); 1475 } else { 1476 /* take a reference now to pin the lockres, drop it 1477 * when locks are added in the worker */ 1478 dlm_change_lockres_owner(dlm, res, dlm->node_num); 1479 } 1480 spin_unlock(&res->spinlock); 1481 1482 /* queue up work for dlm_mig_lockres_worker */ 1483 dlm_grab(dlm); /* get an extra ref for the work item */ 1484 memcpy(buf, msg->buf, be16_to_cpu(msg->data_len)); /* copy the whole message */ 1485 dlm_init_work_item(dlm, item, dlm_mig_lockres_worker, buf); 1486 item->u.ml.lockres = res; /* already have a ref */ 1487 item->u.ml.real_master = real_master; 1488 item->u.ml.extra_ref = extra_refs; 1489 spin_lock(&dlm->work_lock); 1490 list_add_tail(&item->list, &dlm->work_list); 1491 spin_unlock(&dlm->work_lock); 1492 queue_work(dlm->dlm_worker, &dlm->dispatched_work); 1493 1494 leave: 1495 /* One extra ref taken needs to be put here */ 1496 if (extra_refs) 1497 dlm_lockres_put(res); 1498 1499 dlm_put(dlm); 1500 if (ret < 0) { 1501 kfree(buf); 1502 kfree(item); 1503 mlog_errno(ret); 1504 } 1505 1506 return ret; 1507 } 1508 1509 1510 static void dlm_mig_lockres_worker(struct dlm_work_item *item, void *data) 1511 { 1512 struct dlm_ctxt *dlm; 1513 struct dlm_migratable_lockres *mres; 1514 int ret = 0; 1515 struct dlm_lock_resource *res; 1516 u8 real_master; 1517 u8 extra_ref; 1518 1519 dlm = item->dlm; 1520 mres = (struct dlm_migratable_lockres *)data; 1521 1522 res = item->u.ml.lockres; 1523 real_master = item->u.ml.real_master; 1524 extra_ref = item->u.ml.extra_ref; 1525 1526 if (real_master == DLM_LOCK_RES_OWNER_UNKNOWN) { 1527 /* this case is super-rare. only occurs if 1528 * node death happens during migration. */ 1529 again: 1530 ret = dlm_lockres_master_requery(dlm, res, &real_master); 1531 if (ret < 0) { 1532 mlog(0, "dlm_lockres_master_requery ret=%d\n", 1533 ret); 1534 goto again; 1535 } 1536 if (real_master == DLM_LOCK_RES_OWNER_UNKNOWN) { 1537 mlog(0, "lockres %.*s not claimed. " 1538 "this node will take it.\n", 1539 res->lockname.len, res->lockname.name); 1540 } else { 1541 spin_lock(&res->spinlock); 1542 dlm_lockres_drop_inflight_ref(dlm, res); 1543 spin_unlock(&res->spinlock); 1544 mlog(0, "master needs to respond to sender " 1545 "that node %u still owns %.*s\n", 1546 real_master, res->lockname.len, 1547 res->lockname.name); 1548 /* cannot touch this lockres */ 1549 goto leave; 1550 } 1551 } 1552 1553 ret = dlm_process_recovery_data(dlm, res, mres); 1554 if (ret < 0) 1555 mlog(0, "dlm_process_recovery_data returned %d\n", ret); 1556 else 1557 mlog(0, "dlm_process_recovery_data succeeded\n"); 1558 1559 if ((mres->flags & (DLM_MRES_MIGRATION|DLM_MRES_ALL_DONE)) == 1560 (DLM_MRES_MIGRATION|DLM_MRES_ALL_DONE)) { 1561 ret = dlm_finish_migration(dlm, res, mres->master); 1562 if (ret < 0) 1563 mlog_errno(ret); 1564 } 1565 1566 leave: 1567 /* See comment in dlm_mig_lockres_handler() */ 1568 if (res) { 1569 if (extra_ref) 1570 dlm_lockres_put(res); 1571 dlm_lockres_put(res); 1572 } 1573 kfree(data); 1574 } 1575 1576 1577 1578 static int dlm_lockres_master_requery(struct dlm_ctxt *dlm, 1579 struct dlm_lock_resource *res, 1580 u8 *real_master) 1581 { 1582 struct dlm_node_iter iter; 1583 int nodenum; 1584 int ret = 0; 1585 1586 *real_master = DLM_LOCK_RES_OWNER_UNKNOWN; 1587 1588 /* we only reach here if one of the two nodes in a 1589 * migration died while the migration was in progress. 1590 * at this point we need to requery the master. we 1591 * know that the new_master got as far as creating 1592 * an mle on at least one node, but we do not know 1593 * if any nodes had actually cleared the mle and set 1594 * the master to the new_master. the old master 1595 * is supposed to set the owner to UNKNOWN in the 1596 * event of a new_master death, so the only possible 1597 * responses that we can get from nodes here are 1598 * that the master is new_master, or that the master 1599 * is UNKNOWN. 1600 * if all nodes come back with UNKNOWN then we know 1601 * the lock needs remastering here. 1602 * if any node comes back with a valid master, check 1603 * to see if that master is the one that we are 1604 * recovering. if so, then the new_master died and 1605 * we need to remaster this lock. if not, then the 1606 * new_master survived and that node will respond to 1607 * other nodes about the owner. 1608 * if there is an owner, this node needs to dump this 1609 * lockres and alert the sender that this lockres 1610 * was rejected. */ 1611 spin_lock(&dlm->spinlock); 1612 dlm_node_iter_init(dlm->domain_map, &iter); 1613 spin_unlock(&dlm->spinlock); 1614 1615 while ((nodenum = dlm_node_iter_next(&iter)) >= 0) { 1616 /* do not send to self */ 1617 if (nodenum == dlm->node_num) 1618 continue; 1619 ret = dlm_do_master_requery(dlm, res, nodenum, real_master); 1620 if (ret < 0) { 1621 mlog_errno(ret); 1622 if (!dlm_is_host_down(ret)) 1623 BUG(); 1624 /* host is down, so answer for that node would be 1625 * DLM_LOCK_RES_OWNER_UNKNOWN. continue. */ 1626 } 1627 if (*real_master != DLM_LOCK_RES_OWNER_UNKNOWN) { 1628 mlog(0, "lock master is %u\n", *real_master); 1629 break; 1630 } 1631 } 1632 return ret; 1633 } 1634 1635 1636 int dlm_do_master_requery(struct dlm_ctxt *dlm, struct dlm_lock_resource *res, 1637 u8 nodenum, u8 *real_master) 1638 { 1639 int ret = -EINVAL; 1640 struct dlm_master_requery req; 1641 int status = DLM_LOCK_RES_OWNER_UNKNOWN; 1642 1643 memset(&req, 0, sizeof(req)); 1644 req.node_idx = dlm->node_num; 1645 req.namelen = res->lockname.len; 1646 memcpy(req.name, res->lockname.name, res->lockname.len); 1647 1648 ret = o2net_send_message(DLM_MASTER_REQUERY_MSG, dlm->key, 1649 &req, sizeof(req), nodenum, &status); 1650 /* XXX: negative status not handled properly here. */ 1651 if (ret < 0) 1652 mlog(ML_ERROR, "Error %d when sending message %u (key " 1653 "0x%x) to node %u\n", ret, DLM_MASTER_REQUERY_MSG, 1654 dlm->key, nodenum); 1655 else { 1656 BUG_ON(status < 0); 1657 BUG_ON(status > DLM_LOCK_RES_OWNER_UNKNOWN); 1658 *real_master = (u8) (status & 0xff); 1659 mlog(0, "node %u responded to master requery with %u\n", 1660 nodenum, *real_master); 1661 ret = 0; 1662 } 1663 return ret; 1664 } 1665 1666 1667 /* this function cannot error, so unless the sending 1668 * or receiving of the message failed, the owner can 1669 * be trusted */ 1670 int dlm_master_requery_handler(struct o2net_msg *msg, u32 len, void *data, 1671 void **ret_data) 1672 { 1673 struct dlm_ctxt *dlm = data; 1674 struct dlm_master_requery *req = (struct dlm_master_requery *)msg->buf; 1675 struct dlm_lock_resource *res = NULL; 1676 unsigned int hash; 1677 int master = DLM_LOCK_RES_OWNER_UNKNOWN; 1678 u32 flags = DLM_ASSERT_MASTER_REQUERY; 1679 1680 if (!dlm_grab(dlm)) { 1681 /* since the domain has gone away on this 1682 * node, the proper response is UNKNOWN */ 1683 return master; 1684 } 1685 1686 hash = dlm_lockid_hash(req->name, req->namelen); 1687 1688 spin_lock(&dlm->spinlock); 1689 res = __dlm_lookup_lockres(dlm, req->name, req->namelen, hash); 1690 if (res) { 1691 spin_lock(&res->spinlock); 1692 master = res->owner; 1693 if (master == dlm->node_num) { 1694 int ret = dlm_dispatch_assert_master(dlm, res, 1695 0, 0, flags); 1696 if (ret < 0) { 1697 mlog_errno(-ENOMEM); 1698 /* retry!? */ 1699 BUG(); 1700 } 1701 } else /* put.. incase we are not the master */ 1702 dlm_lockres_put(res); 1703 spin_unlock(&res->spinlock); 1704 } 1705 spin_unlock(&dlm->spinlock); 1706 1707 dlm_put(dlm); 1708 return master; 1709 } 1710 1711 static inline struct list_head * 1712 dlm_list_num_to_pointer(struct dlm_lock_resource *res, int list_num) 1713 { 1714 struct list_head *ret; 1715 BUG_ON(list_num < 0); 1716 BUG_ON(list_num > 2); 1717 ret = &(res->granted); 1718 ret += list_num; 1719 return ret; 1720 } 1721 /* TODO: do ast flush business 1722 * TODO: do MIGRATING and RECOVERING spinning 1723 */ 1724 1725 /* 1726 * NOTE about in-flight requests during migration: 1727 * 1728 * Before attempting the migrate, the master has marked the lockres as 1729 * MIGRATING and then flushed all of its pending ASTS. So any in-flight 1730 * requests either got queued before the MIGRATING flag got set, in which 1731 * case the lock data will reflect the change and a return message is on 1732 * the way, or the request failed to get in before MIGRATING got set. In 1733 * this case, the caller will be told to spin and wait for the MIGRATING 1734 * flag to be dropped, then recheck the master. 1735 * This holds true for the convert, cancel and unlock cases, and since lvb 1736 * updates are tied to these same messages, it applies to lvb updates as 1737 * well. For the lock case, there is no way a lock can be on the master 1738 * queue and not be on the secondary queue since the lock is always added 1739 * locally first. This means that the new target node will never be sent 1740 * a lock that he doesn't already have on the list. 1741 * In total, this means that the local lock is correct and should not be 1742 * updated to match the one sent by the master. Any messages sent back 1743 * from the master before the MIGRATING flag will bring the lock properly 1744 * up-to-date, and the change will be ordered properly for the waiter. 1745 * We will *not* attempt to modify the lock underneath the waiter. 1746 */ 1747 1748 static int dlm_process_recovery_data(struct dlm_ctxt *dlm, 1749 struct dlm_lock_resource *res, 1750 struct dlm_migratable_lockres *mres) 1751 { 1752 struct dlm_migratable_lock *ml; 1753 struct list_head *queue; 1754 struct list_head *tmpq = NULL; 1755 struct dlm_lock *newlock = NULL; 1756 struct dlm_lockstatus *lksb = NULL; 1757 int ret = 0; 1758 int i, j, bad; 1759 struct dlm_lock *lock = NULL; 1760 u8 from = O2NM_MAX_NODES; 1761 unsigned int added = 0; 1762 __be64 c; 1763 1764 mlog(0, "running %d locks for this lockres\n", mres->num_locks); 1765 for (i=0; i<mres->num_locks; i++) { 1766 ml = &(mres->ml[i]); 1767 1768 if (dlm_is_dummy_lock(dlm, ml, &from)) { 1769 /* placeholder, just need to set the refmap bit */ 1770 BUG_ON(mres->num_locks != 1); 1771 mlog(0, "%s:%.*s: dummy lock for %u\n", 1772 dlm->name, mres->lockname_len, mres->lockname, 1773 from); 1774 spin_lock(&res->spinlock); 1775 dlm_lockres_set_refmap_bit(dlm, res, from); 1776 spin_unlock(&res->spinlock); 1777 added++; 1778 break; 1779 } 1780 BUG_ON(ml->highest_blocked != LKM_IVMODE); 1781 newlock = NULL; 1782 lksb = NULL; 1783 1784 queue = dlm_list_num_to_pointer(res, ml->list); 1785 tmpq = NULL; 1786 1787 /* if the lock is for the local node it needs to 1788 * be moved to the proper location within the queue. 1789 * do not allocate a new lock structure. */ 1790 if (ml->node == dlm->node_num) { 1791 /* MIGRATION ONLY! */ 1792 BUG_ON(!(mres->flags & DLM_MRES_MIGRATION)); 1793 1794 spin_lock(&res->spinlock); 1795 for (j = DLM_GRANTED_LIST; j <= DLM_BLOCKED_LIST; j++) { 1796 tmpq = dlm_list_idx_to_ptr(res, j); 1797 list_for_each_entry(lock, tmpq, list) { 1798 if (lock->ml.cookie != ml->cookie) 1799 lock = NULL; 1800 else 1801 break; 1802 } 1803 if (lock) 1804 break; 1805 } 1806 1807 /* lock is always created locally first, and 1808 * destroyed locally last. it must be on the list */ 1809 if (!lock) { 1810 c = ml->cookie; 1811 mlog(ML_ERROR, "Could not find local lock " 1812 "with cookie %u:%llu, node %u, " 1813 "list %u, flags 0x%x, type %d, " 1814 "conv %d, highest blocked %d\n", 1815 dlm_get_lock_cookie_node(be64_to_cpu(c)), 1816 dlm_get_lock_cookie_seq(be64_to_cpu(c)), 1817 ml->node, ml->list, ml->flags, ml->type, 1818 ml->convert_type, ml->highest_blocked); 1819 __dlm_print_one_lock_resource(res); 1820 BUG(); 1821 } 1822 1823 if (lock->ml.node != ml->node) { 1824 c = lock->ml.cookie; 1825 mlog(ML_ERROR, "Mismatched node# in lock " 1826 "cookie %u:%llu, name %.*s, node %u\n", 1827 dlm_get_lock_cookie_node(be64_to_cpu(c)), 1828 dlm_get_lock_cookie_seq(be64_to_cpu(c)), 1829 res->lockname.len, res->lockname.name, 1830 lock->ml.node); 1831 c = ml->cookie; 1832 mlog(ML_ERROR, "Migrate lock cookie %u:%llu, " 1833 "node %u, list %u, flags 0x%x, type %d, " 1834 "conv %d, highest blocked %d\n", 1835 dlm_get_lock_cookie_node(be64_to_cpu(c)), 1836 dlm_get_lock_cookie_seq(be64_to_cpu(c)), 1837 ml->node, ml->list, ml->flags, ml->type, 1838 ml->convert_type, ml->highest_blocked); 1839 __dlm_print_one_lock_resource(res); 1840 BUG(); 1841 } 1842 1843 if (tmpq != queue) { 1844 c = ml->cookie; 1845 mlog(0, "Lock cookie %u:%llu was on list %u " 1846 "instead of list %u for %.*s\n", 1847 dlm_get_lock_cookie_node(be64_to_cpu(c)), 1848 dlm_get_lock_cookie_seq(be64_to_cpu(c)), 1849 j, ml->list, res->lockname.len, 1850 res->lockname.name); 1851 __dlm_print_one_lock_resource(res); 1852 spin_unlock(&res->spinlock); 1853 continue; 1854 } 1855 1856 /* see NOTE above about why we do not update 1857 * to match the master here */ 1858 1859 /* move the lock to its proper place */ 1860 /* do not alter lock refcount. switching lists. */ 1861 list_move_tail(&lock->list, queue); 1862 spin_unlock(&res->spinlock); 1863 added++; 1864 1865 mlog(0, "just reordered a local lock!\n"); 1866 continue; 1867 } 1868 1869 /* lock is for another node. */ 1870 newlock = dlm_new_lock(ml->type, ml->node, 1871 be64_to_cpu(ml->cookie), NULL); 1872 if (!newlock) { 1873 ret = -ENOMEM; 1874 goto leave; 1875 } 1876 lksb = newlock->lksb; 1877 dlm_lock_attach_lockres(newlock, res); 1878 1879 if (ml->convert_type != LKM_IVMODE) { 1880 BUG_ON(queue != &res->converting); 1881 newlock->ml.convert_type = ml->convert_type; 1882 } 1883 lksb->flags |= (ml->flags & 1884 (DLM_LKSB_PUT_LVB|DLM_LKSB_GET_LVB)); 1885 1886 if (ml->type == LKM_NLMODE) 1887 goto skip_lvb; 1888 1889 if (!dlm_lvb_is_empty(mres->lvb)) { 1890 if (lksb->flags & DLM_LKSB_PUT_LVB) { 1891 /* other node was trying to update 1892 * lvb when node died. recreate the 1893 * lksb with the updated lvb. */ 1894 memcpy(lksb->lvb, mres->lvb, DLM_LVB_LEN); 1895 /* the lock resource lvb update must happen 1896 * NOW, before the spinlock is dropped. 1897 * we no longer wait for the AST to update 1898 * the lvb. */ 1899 memcpy(res->lvb, mres->lvb, DLM_LVB_LEN); 1900 } else { 1901 /* otherwise, the node is sending its 1902 * most recent valid lvb info */ 1903 BUG_ON(ml->type != LKM_EXMODE && 1904 ml->type != LKM_PRMODE); 1905 if (!dlm_lvb_is_empty(res->lvb) && 1906 (ml->type == LKM_EXMODE || 1907 memcmp(res->lvb, mres->lvb, DLM_LVB_LEN))) { 1908 int i; 1909 mlog(ML_ERROR, "%s:%.*s: received bad " 1910 "lvb! type=%d\n", dlm->name, 1911 res->lockname.len, 1912 res->lockname.name, ml->type); 1913 printk("lockres lvb=["); 1914 for (i=0; i<DLM_LVB_LEN; i++) 1915 printk("%02x", res->lvb[i]); 1916 printk("]\nmigrated lvb=["); 1917 for (i=0; i<DLM_LVB_LEN; i++) 1918 printk("%02x", mres->lvb[i]); 1919 printk("]\n"); 1920 dlm_print_one_lock_resource(res); 1921 BUG(); 1922 } 1923 memcpy(res->lvb, mres->lvb, DLM_LVB_LEN); 1924 } 1925 } 1926 skip_lvb: 1927 1928 /* NOTE: 1929 * wrt lock queue ordering and recovery: 1930 * 1. order of locks on granted queue is 1931 * meaningless. 1932 * 2. order of locks on converting queue is 1933 * LOST with the node death. sorry charlie. 1934 * 3. order of locks on the blocked queue is 1935 * also LOST. 1936 * order of locks does not affect integrity, it 1937 * just means that a lock request may get pushed 1938 * back in line as a result of the node death. 1939 * also note that for a given node the lock order 1940 * for its secondary queue locks is preserved 1941 * relative to each other, but clearly *not* 1942 * preserved relative to locks from other nodes. 1943 */ 1944 bad = 0; 1945 spin_lock(&res->spinlock); 1946 list_for_each_entry(lock, queue, list) { 1947 if (lock->ml.cookie == ml->cookie) { 1948 c = lock->ml.cookie; 1949 mlog(ML_ERROR, "%s:%.*s: %u:%llu: lock already " 1950 "exists on this lockres!\n", dlm->name, 1951 res->lockname.len, res->lockname.name, 1952 dlm_get_lock_cookie_node(be64_to_cpu(c)), 1953 dlm_get_lock_cookie_seq(be64_to_cpu(c))); 1954 1955 mlog(ML_NOTICE, "sent lock: type=%d, conv=%d, " 1956 "node=%u, cookie=%u:%llu, queue=%d\n", 1957 ml->type, ml->convert_type, ml->node, 1958 dlm_get_lock_cookie_node(be64_to_cpu(ml->cookie)), 1959 dlm_get_lock_cookie_seq(be64_to_cpu(ml->cookie)), 1960 ml->list); 1961 1962 __dlm_print_one_lock_resource(res); 1963 bad = 1; 1964 break; 1965 } 1966 } 1967 if (!bad) { 1968 dlm_lock_get(newlock); 1969 list_add_tail(&newlock->list, queue); 1970 mlog(0, "%s:%.*s: added lock for node %u, " 1971 "setting refmap bit\n", dlm->name, 1972 res->lockname.len, res->lockname.name, ml->node); 1973 dlm_lockres_set_refmap_bit(dlm, res, ml->node); 1974 added++; 1975 } 1976 spin_unlock(&res->spinlock); 1977 } 1978 mlog(0, "done running all the locks\n"); 1979 1980 leave: 1981 /* balance the ref taken when the work was queued */ 1982 spin_lock(&res->spinlock); 1983 dlm_lockres_drop_inflight_ref(dlm, res); 1984 spin_unlock(&res->spinlock); 1985 1986 if (ret < 0) { 1987 mlog_errno(ret); 1988 if (newlock) 1989 dlm_lock_put(newlock); 1990 } 1991 1992 return ret; 1993 } 1994 1995 void dlm_move_lockres_to_recovery_list(struct dlm_ctxt *dlm, 1996 struct dlm_lock_resource *res) 1997 { 1998 int i; 1999 struct list_head *queue; 2000 struct dlm_lock *lock, *next; 2001 2002 assert_spin_locked(&dlm->spinlock); 2003 assert_spin_locked(&res->spinlock); 2004 res->state |= DLM_LOCK_RES_RECOVERING; 2005 if (!list_empty(&res->recovering)) { 2006 mlog(0, 2007 "Recovering res %s:%.*s, is already on recovery list!\n", 2008 dlm->name, res->lockname.len, res->lockname.name); 2009 list_del_init(&res->recovering); 2010 dlm_lockres_put(res); 2011 } 2012 /* We need to hold a reference while on the recovery list */ 2013 dlm_lockres_get(res); 2014 list_add_tail(&res->recovering, &dlm->reco.resources); 2015 2016 /* find any pending locks and put them back on proper list */ 2017 for (i=DLM_BLOCKED_LIST; i>=DLM_GRANTED_LIST; i--) { 2018 queue = dlm_list_idx_to_ptr(res, i); 2019 list_for_each_entry_safe(lock, next, queue, list) { 2020 dlm_lock_get(lock); 2021 if (lock->convert_pending) { 2022 /* move converting lock back to granted */ 2023 BUG_ON(i != DLM_CONVERTING_LIST); 2024 mlog(0, "node died with convert pending " 2025 "on %.*s. move back to granted list.\n", 2026 res->lockname.len, res->lockname.name); 2027 dlm_revert_pending_convert(res, lock); 2028 lock->convert_pending = 0; 2029 } else if (lock->lock_pending) { 2030 /* remove pending lock requests completely */ 2031 BUG_ON(i != DLM_BLOCKED_LIST); 2032 mlog(0, "node died with lock pending " 2033 "on %.*s. remove from blocked list and skip.\n", 2034 res->lockname.len, res->lockname.name); 2035 /* lock will be floating until ref in 2036 * dlmlock_remote is freed after the network 2037 * call returns. ok for it to not be on any 2038 * list since no ast can be called 2039 * (the master is dead). */ 2040 dlm_revert_pending_lock(res, lock); 2041 lock->lock_pending = 0; 2042 } else if (lock->unlock_pending) { 2043 /* if an unlock was in progress, treat as 2044 * if this had completed successfully 2045 * before sending this lock state to the 2046 * new master. note that the dlm_unlock 2047 * call is still responsible for calling 2048 * the unlockast. that will happen after 2049 * the network call times out. for now, 2050 * just move lists to prepare the new 2051 * recovery master. */ 2052 BUG_ON(i != DLM_GRANTED_LIST); 2053 mlog(0, "node died with unlock pending " 2054 "on %.*s. remove from blocked list and skip.\n", 2055 res->lockname.len, res->lockname.name); 2056 dlm_commit_pending_unlock(res, lock); 2057 lock->unlock_pending = 0; 2058 } else if (lock->cancel_pending) { 2059 /* if a cancel was in progress, treat as 2060 * if this had completed successfully 2061 * before sending this lock state to the 2062 * new master */ 2063 BUG_ON(i != DLM_CONVERTING_LIST); 2064 mlog(0, "node died with cancel pending " 2065 "on %.*s. move back to granted list.\n", 2066 res->lockname.len, res->lockname.name); 2067 dlm_commit_pending_cancel(res, lock); 2068 lock->cancel_pending = 0; 2069 } 2070 dlm_lock_put(lock); 2071 } 2072 } 2073 } 2074 2075 2076 2077 /* removes all recovered locks from the recovery list. 2078 * sets the res->owner to the new master. 2079 * unsets the RECOVERY flag and wakes waiters. */ 2080 static void dlm_finish_local_lockres_recovery(struct dlm_ctxt *dlm, 2081 u8 dead_node, u8 new_master) 2082 { 2083 int i; 2084 struct hlist_head *bucket; 2085 struct dlm_lock_resource *res, *next; 2086 2087 assert_spin_locked(&dlm->spinlock); 2088 2089 list_for_each_entry_safe(res, next, &dlm->reco.resources, recovering) { 2090 if (res->owner == dead_node) { 2091 mlog(0, "%s: res %.*s, Changing owner from %u to %u\n", 2092 dlm->name, res->lockname.len, res->lockname.name, 2093 res->owner, new_master); 2094 list_del_init(&res->recovering); 2095 spin_lock(&res->spinlock); 2096 /* new_master has our reference from 2097 * the lock state sent during recovery */ 2098 dlm_change_lockres_owner(dlm, res, new_master); 2099 res->state &= ~DLM_LOCK_RES_RECOVERING; 2100 if (__dlm_lockres_has_locks(res)) 2101 __dlm_dirty_lockres(dlm, res); 2102 spin_unlock(&res->spinlock); 2103 wake_up(&res->wq); 2104 dlm_lockres_put(res); 2105 } 2106 } 2107 2108 /* this will become unnecessary eventually, but 2109 * for now we need to run the whole hash, clear 2110 * the RECOVERING state and set the owner 2111 * if necessary */ 2112 for (i = 0; i < DLM_HASH_BUCKETS; i++) { 2113 bucket = dlm_lockres_hash(dlm, i); 2114 hlist_for_each_entry(res, bucket, hash_node) { 2115 if (!(res->state & DLM_LOCK_RES_RECOVERING)) 2116 continue; 2117 2118 if (res->owner != dead_node && 2119 res->owner != dlm->node_num) 2120 continue; 2121 2122 if (!list_empty(&res->recovering)) { 2123 list_del_init(&res->recovering); 2124 dlm_lockres_put(res); 2125 } 2126 2127 /* new_master has our reference from 2128 * the lock state sent during recovery */ 2129 mlog(0, "%s: res %.*s, Changing owner from %u to %u\n", 2130 dlm->name, res->lockname.len, res->lockname.name, 2131 res->owner, new_master); 2132 spin_lock(&res->spinlock); 2133 dlm_change_lockres_owner(dlm, res, new_master); 2134 res->state &= ~DLM_LOCK_RES_RECOVERING; 2135 if (__dlm_lockres_has_locks(res)) 2136 __dlm_dirty_lockres(dlm, res); 2137 spin_unlock(&res->spinlock); 2138 wake_up(&res->wq); 2139 } 2140 } 2141 } 2142 2143 static inline int dlm_lvb_needs_invalidation(struct dlm_lock *lock, int local) 2144 { 2145 if (local) { 2146 if (lock->ml.type != LKM_EXMODE && 2147 lock->ml.type != LKM_PRMODE) 2148 return 1; 2149 } else if (lock->ml.type == LKM_EXMODE) 2150 return 1; 2151 return 0; 2152 } 2153 2154 static void dlm_revalidate_lvb(struct dlm_ctxt *dlm, 2155 struct dlm_lock_resource *res, u8 dead_node) 2156 { 2157 struct list_head *queue; 2158 struct dlm_lock *lock; 2159 int blank_lvb = 0, local = 0; 2160 int i; 2161 u8 search_node; 2162 2163 assert_spin_locked(&dlm->spinlock); 2164 assert_spin_locked(&res->spinlock); 2165 2166 if (res->owner == dlm->node_num) 2167 /* if this node owned the lockres, and if the dead node 2168 * had an EX when he died, blank out the lvb */ 2169 search_node = dead_node; 2170 else { 2171 /* if this is a secondary lockres, and we had no EX or PR 2172 * locks granted, we can no longer trust the lvb */ 2173 search_node = dlm->node_num; 2174 local = 1; /* check local state for valid lvb */ 2175 } 2176 2177 for (i=DLM_GRANTED_LIST; i<=DLM_CONVERTING_LIST; i++) { 2178 queue = dlm_list_idx_to_ptr(res, i); 2179 list_for_each_entry(lock, queue, list) { 2180 if (lock->ml.node == search_node) { 2181 if (dlm_lvb_needs_invalidation(lock, local)) { 2182 /* zero the lksb lvb and lockres lvb */ 2183 blank_lvb = 1; 2184 memset(lock->lksb->lvb, 0, DLM_LVB_LEN); 2185 } 2186 } 2187 } 2188 } 2189 2190 if (blank_lvb) { 2191 mlog(0, "clearing %.*s lvb, dead node %u had EX\n", 2192 res->lockname.len, res->lockname.name, dead_node); 2193 memset(res->lvb, 0, DLM_LVB_LEN); 2194 } 2195 } 2196 2197 static void dlm_free_dead_locks(struct dlm_ctxt *dlm, 2198 struct dlm_lock_resource *res, u8 dead_node) 2199 { 2200 struct dlm_lock *lock, *next; 2201 unsigned int freed = 0; 2202 2203 /* this node is the lockres master: 2204 * 1) remove any stale locks for the dead node 2205 * 2) if the dead node had an EX when he died, blank out the lvb 2206 */ 2207 assert_spin_locked(&dlm->spinlock); 2208 assert_spin_locked(&res->spinlock); 2209 2210 /* We do two dlm_lock_put(). One for removing from list and the other is 2211 * to force the DLM_UNLOCK_FREE_LOCK action so as to free the locks */ 2212 2213 /* TODO: check pending_asts, pending_basts here */ 2214 list_for_each_entry_safe(lock, next, &res->granted, list) { 2215 if (lock->ml.node == dead_node) { 2216 list_del_init(&lock->list); 2217 dlm_lock_put(lock); 2218 /* Can't schedule DLM_UNLOCK_FREE_LOCK - do manually */ 2219 dlm_lock_put(lock); 2220 freed++; 2221 } 2222 } 2223 list_for_each_entry_safe(lock, next, &res->converting, list) { 2224 if (lock->ml.node == dead_node) { 2225 list_del_init(&lock->list); 2226 dlm_lock_put(lock); 2227 /* Can't schedule DLM_UNLOCK_FREE_LOCK - do manually */ 2228 dlm_lock_put(lock); 2229 freed++; 2230 } 2231 } 2232 list_for_each_entry_safe(lock, next, &res->blocked, list) { 2233 if (lock->ml.node == dead_node) { 2234 list_del_init(&lock->list); 2235 dlm_lock_put(lock); 2236 /* Can't schedule DLM_UNLOCK_FREE_LOCK - do manually */ 2237 dlm_lock_put(lock); 2238 freed++; 2239 } 2240 } 2241 2242 if (freed) { 2243 mlog(0, "%s:%.*s: freed %u locks for dead node %u, " 2244 "dropping ref from lockres\n", dlm->name, 2245 res->lockname.len, res->lockname.name, freed, dead_node); 2246 if(!test_bit(dead_node, res->refmap)) { 2247 mlog(ML_ERROR, "%s:%.*s: freed %u locks for dead node %u, " 2248 "but ref was not set\n", dlm->name, 2249 res->lockname.len, res->lockname.name, freed, dead_node); 2250 __dlm_print_one_lock_resource(res); 2251 } 2252 dlm_lockres_clear_refmap_bit(dlm, res, dead_node); 2253 } else if (test_bit(dead_node, res->refmap)) { 2254 mlog(0, "%s:%.*s: dead node %u had a ref, but had " 2255 "no locks and had not purged before dying\n", dlm->name, 2256 res->lockname.len, res->lockname.name, dead_node); 2257 dlm_lockres_clear_refmap_bit(dlm, res, dead_node); 2258 } 2259 2260 /* do not kick thread yet */ 2261 __dlm_dirty_lockres(dlm, res); 2262 } 2263 2264 /* if this node is the recovery master, and there are no 2265 * locks for a given lockres owned by this node that are in 2266 * either PR or EX mode, zero out the lvb before requesting. 2267 * 2268 */ 2269 2270 2271 static void dlm_do_local_recovery_cleanup(struct dlm_ctxt *dlm, u8 dead_node) 2272 { 2273 struct dlm_lock_resource *res; 2274 int i; 2275 struct hlist_head *bucket; 2276 struct dlm_lock *lock; 2277 2278 2279 /* purge any stale mles */ 2280 dlm_clean_master_list(dlm, dead_node); 2281 2282 /* 2283 * now clean up all lock resources. there are two rules: 2284 * 2285 * 1) if the dead node was the master, move the lockres 2286 * to the recovering list. set the RECOVERING flag. 2287 * this lockres needs to be cleaned up before it can 2288 * be used further. 2289 * 2290 * 2) if this node was the master, remove all locks from 2291 * each of the lockres queues that were owned by the 2292 * dead node. once recovery finishes, the dlm thread 2293 * can be kicked again to see if any ASTs or BASTs 2294 * need to be fired as a result. 2295 */ 2296 for (i = 0; i < DLM_HASH_BUCKETS; i++) { 2297 bucket = dlm_lockres_hash(dlm, i); 2298 hlist_for_each_entry(res, bucket, hash_node) { 2299 /* always prune any $RECOVERY entries for dead nodes, 2300 * otherwise hangs can occur during later recovery */ 2301 if (dlm_is_recovery_lock(res->lockname.name, 2302 res->lockname.len)) { 2303 spin_lock(&res->spinlock); 2304 list_for_each_entry(lock, &res->granted, list) { 2305 if (lock->ml.node == dead_node) { 2306 mlog(0, "AHA! there was " 2307 "a $RECOVERY lock for dead " 2308 "node %u (%s)!\n", 2309 dead_node, dlm->name); 2310 list_del_init(&lock->list); 2311 dlm_lock_put(lock); 2312 break; 2313 } 2314 } 2315 spin_unlock(&res->spinlock); 2316 continue; 2317 } 2318 spin_lock(&res->spinlock); 2319 /* zero the lvb if necessary */ 2320 dlm_revalidate_lvb(dlm, res, dead_node); 2321 if (res->owner == dead_node) { 2322 if (res->state & DLM_LOCK_RES_DROPPING_REF) { 2323 mlog(ML_NOTICE, "%s: res %.*s, Skip " 2324 "recovery as it is being freed\n", 2325 dlm->name, res->lockname.len, 2326 res->lockname.name); 2327 } else 2328 dlm_move_lockres_to_recovery_list(dlm, 2329 res); 2330 2331 } else if (res->owner == dlm->node_num) { 2332 dlm_free_dead_locks(dlm, res, dead_node); 2333 __dlm_lockres_calc_usage(dlm, res); 2334 } 2335 spin_unlock(&res->spinlock); 2336 } 2337 } 2338 2339 } 2340 2341 static void __dlm_hb_node_down(struct dlm_ctxt *dlm, int idx) 2342 { 2343 assert_spin_locked(&dlm->spinlock); 2344 2345 if (dlm->reco.new_master == idx) { 2346 mlog(0, "%s: recovery master %d just died\n", 2347 dlm->name, idx); 2348 if (dlm->reco.state & DLM_RECO_STATE_FINALIZE) { 2349 /* finalize1 was reached, so it is safe to clear 2350 * the new_master and dead_node. that recovery 2351 * is complete. */ 2352 mlog(0, "%s: dead master %d had reached " 2353 "finalize1 state, clearing\n", dlm->name, idx); 2354 dlm->reco.state &= ~DLM_RECO_STATE_FINALIZE; 2355 __dlm_reset_recovery(dlm); 2356 } 2357 } 2358 2359 /* Clean up join state on node death. */ 2360 if (dlm->joining_node == idx) { 2361 mlog(0, "Clearing join state for node %u\n", idx); 2362 __dlm_set_joining_node(dlm, DLM_LOCK_RES_OWNER_UNKNOWN); 2363 } 2364 2365 /* check to see if the node is already considered dead */ 2366 if (!test_bit(idx, dlm->live_nodes_map)) { 2367 mlog(0, "for domain %s, node %d is already dead. " 2368 "another node likely did recovery already.\n", 2369 dlm->name, idx); 2370 return; 2371 } 2372 2373 /* check to see if we do not care about this node */ 2374 if (!test_bit(idx, dlm->domain_map)) { 2375 /* This also catches the case that we get a node down 2376 * but haven't joined the domain yet. */ 2377 mlog(0, "node %u already removed from domain!\n", idx); 2378 return; 2379 } 2380 2381 clear_bit(idx, dlm->live_nodes_map); 2382 2383 /* make sure local cleanup occurs before the heartbeat events */ 2384 if (!test_bit(idx, dlm->recovery_map)) 2385 dlm_do_local_recovery_cleanup(dlm, idx); 2386 2387 /* notify anything attached to the heartbeat events */ 2388 dlm_hb_event_notify_attached(dlm, idx, 0); 2389 2390 mlog(0, "node %u being removed from domain map!\n", idx); 2391 clear_bit(idx, dlm->domain_map); 2392 clear_bit(idx, dlm->exit_domain_map); 2393 /* wake up migration waiters if a node goes down. 2394 * perhaps later we can genericize this for other waiters. */ 2395 wake_up(&dlm->migration_wq); 2396 2397 if (test_bit(idx, dlm->recovery_map)) 2398 mlog(0, "domain %s, node %u already added " 2399 "to recovery map!\n", dlm->name, idx); 2400 else 2401 set_bit(idx, dlm->recovery_map); 2402 } 2403 2404 void dlm_hb_node_down_cb(struct o2nm_node *node, int idx, void *data) 2405 { 2406 struct dlm_ctxt *dlm = data; 2407 2408 if (!dlm_grab(dlm)) 2409 return; 2410 2411 /* 2412 * This will notify any dlm users that a node in our domain 2413 * went away without notifying us first. 2414 */ 2415 if (test_bit(idx, dlm->domain_map)) 2416 dlm_fire_domain_eviction_callbacks(dlm, idx); 2417 2418 spin_lock(&dlm->spinlock); 2419 __dlm_hb_node_down(dlm, idx); 2420 spin_unlock(&dlm->spinlock); 2421 2422 dlm_put(dlm); 2423 } 2424 2425 void dlm_hb_node_up_cb(struct o2nm_node *node, int idx, void *data) 2426 { 2427 struct dlm_ctxt *dlm = data; 2428 2429 if (!dlm_grab(dlm)) 2430 return; 2431 2432 spin_lock(&dlm->spinlock); 2433 set_bit(idx, dlm->live_nodes_map); 2434 /* do NOT notify mle attached to the heartbeat events. 2435 * new nodes are not interesting in mastery until joined. */ 2436 spin_unlock(&dlm->spinlock); 2437 2438 dlm_put(dlm); 2439 } 2440 2441 static void dlm_reco_ast(void *astdata) 2442 { 2443 struct dlm_ctxt *dlm = astdata; 2444 mlog(0, "ast for recovery lock fired!, this=%u, dlm=%s\n", 2445 dlm->node_num, dlm->name); 2446 } 2447 static void dlm_reco_bast(void *astdata, int blocked_type) 2448 { 2449 struct dlm_ctxt *dlm = astdata; 2450 mlog(0, "bast for recovery lock fired!, this=%u, dlm=%s\n", 2451 dlm->node_num, dlm->name); 2452 } 2453 static void dlm_reco_unlock_ast(void *astdata, enum dlm_status st) 2454 { 2455 mlog(0, "unlockast for recovery lock fired!\n"); 2456 } 2457 2458 /* 2459 * dlm_pick_recovery_master will continually attempt to use 2460 * dlmlock() on the special "$RECOVERY" lockres with the 2461 * LKM_NOQUEUE flag to get an EX. every thread that enters 2462 * this function on each node racing to become the recovery 2463 * master will not stop attempting this until either: 2464 * a) this node gets the EX (and becomes the recovery master), 2465 * or b) dlm->reco.new_master gets set to some nodenum 2466 * != O2NM_INVALID_NODE_NUM (another node will do the reco). 2467 * so each time a recovery master is needed, the entire cluster 2468 * will sync at this point. if the new master dies, that will 2469 * be detected in dlm_do_recovery */ 2470 static int dlm_pick_recovery_master(struct dlm_ctxt *dlm) 2471 { 2472 enum dlm_status ret; 2473 struct dlm_lockstatus lksb; 2474 int status = -EINVAL; 2475 2476 mlog(0, "starting recovery of %s at %lu, dead=%u, this=%u\n", 2477 dlm->name, jiffies, dlm->reco.dead_node, dlm->node_num); 2478 again: 2479 memset(&lksb, 0, sizeof(lksb)); 2480 2481 ret = dlmlock(dlm, LKM_EXMODE, &lksb, LKM_NOQUEUE|LKM_RECOVERY, 2482 DLM_RECOVERY_LOCK_NAME, DLM_RECOVERY_LOCK_NAME_LEN, 2483 dlm_reco_ast, dlm, dlm_reco_bast); 2484 2485 mlog(0, "%s: dlmlock($RECOVERY) returned %d, lksb=%d\n", 2486 dlm->name, ret, lksb.status); 2487 2488 if (ret == DLM_NORMAL) { 2489 mlog(0, "dlm=%s dlmlock says I got it (this=%u)\n", 2490 dlm->name, dlm->node_num); 2491 2492 /* got the EX lock. check to see if another node 2493 * just became the reco master */ 2494 if (dlm_reco_master_ready(dlm)) { 2495 mlog(0, "%s: got reco EX lock, but %u will " 2496 "do the recovery\n", dlm->name, 2497 dlm->reco.new_master); 2498 status = -EEXIST; 2499 } else { 2500 status = 0; 2501 2502 /* see if recovery was already finished elsewhere */ 2503 spin_lock(&dlm->spinlock); 2504 if (dlm->reco.dead_node == O2NM_INVALID_NODE_NUM) { 2505 status = -EINVAL; 2506 mlog(0, "%s: got reco EX lock, but " 2507 "node got recovered already\n", dlm->name); 2508 if (dlm->reco.new_master != O2NM_INVALID_NODE_NUM) { 2509 mlog(ML_ERROR, "%s: new master is %u " 2510 "but no dead node!\n", 2511 dlm->name, dlm->reco.new_master); 2512 BUG(); 2513 } 2514 } 2515 spin_unlock(&dlm->spinlock); 2516 } 2517 2518 /* if this node has actually become the recovery master, 2519 * set the master and send the messages to begin recovery */ 2520 if (!status) { 2521 mlog(0, "%s: dead=%u, this=%u, sending " 2522 "begin_reco now\n", dlm->name, 2523 dlm->reco.dead_node, dlm->node_num); 2524 status = dlm_send_begin_reco_message(dlm, 2525 dlm->reco.dead_node); 2526 /* this always succeeds */ 2527 BUG_ON(status); 2528 2529 /* set the new_master to this node */ 2530 spin_lock(&dlm->spinlock); 2531 dlm_set_reco_master(dlm, dlm->node_num); 2532 spin_unlock(&dlm->spinlock); 2533 } 2534 2535 /* recovery lock is a special case. ast will not get fired, 2536 * so just go ahead and unlock it. */ 2537 ret = dlmunlock(dlm, &lksb, 0, dlm_reco_unlock_ast, dlm); 2538 if (ret == DLM_DENIED) { 2539 mlog(0, "got DLM_DENIED, trying LKM_CANCEL\n"); 2540 ret = dlmunlock(dlm, &lksb, LKM_CANCEL, dlm_reco_unlock_ast, dlm); 2541 } 2542 if (ret != DLM_NORMAL) { 2543 /* this would really suck. this could only happen 2544 * if there was a network error during the unlock 2545 * because of node death. this means the unlock 2546 * is actually "done" and the lock structure is 2547 * even freed. we can continue, but only 2548 * because this specific lock name is special. */ 2549 mlog(ML_ERROR, "dlmunlock returned %d\n", ret); 2550 } 2551 } else if (ret == DLM_NOTQUEUED) { 2552 mlog(0, "dlm=%s dlmlock says another node got it (this=%u)\n", 2553 dlm->name, dlm->node_num); 2554 /* another node is master. wait on 2555 * reco.new_master != O2NM_INVALID_NODE_NUM 2556 * for at most one second */ 2557 wait_event_timeout(dlm->dlm_reco_thread_wq, 2558 dlm_reco_master_ready(dlm), 2559 msecs_to_jiffies(1000)); 2560 if (!dlm_reco_master_ready(dlm)) { 2561 mlog(0, "%s: reco master taking awhile\n", 2562 dlm->name); 2563 goto again; 2564 } 2565 /* another node has informed this one that it is reco master */ 2566 mlog(0, "%s: reco master %u is ready to recover %u\n", 2567 dlm->name, dlm->reco.new_master, dlm->reco.dead_node); 2568 status = -EEXIST; 2569 } else if (ret == DLM_RECOVERING) { 2570 mlog(0, "dlm=%s dlmlock says master node died (this=%u)\n", 2571 dlm->name, dlm->node_num); 2572 goto again; 2573 } else { 2574 struct dlm_lock_resource *res; 2575 2576 /* dlmlock returned something other than NOTQUEUED or NORMAL */ 2577 mlog(ML_ERROR, "%s: got %s from dlmlock($RECOVERY), " 2578 "lksb.status=%s\n", dlm->name, dlm_errname(ret), 2579 dlm_errname(lksb.status)); 2580 res = dlm_lookup_lockres(dlm, DLM_RECOVERY_LOCK_NAME, 2581 DLM_RECOVERY_LOCK_NAME_LEN); 2582 if (res) { 2583 dlm_print_one_lock_resource(res); 2584 dlm_lockres_put(res); 2585 } else { 2586 mlog(ML_ERROR, "recovery lock not found\n"); 2587 } 2588 BUG(); 2589 } 2590 2591 return status; 2592 } 2593 2594 static int dlm_send_begin_reco_message(struct dlm_ctxt *dlm, u8 dead_node) 2595 { 2596 struct dlm_begin_reco br; 2597 int ret = 0; 2598 struct dlm_node_iter iter; 2599 int nodenum; 2600 int status; 2601 2602 mlog(0, "%s: dead node is %u\n", dlm->name, dead_node); 2603 2604 spin_lock(&dlm->spinlock); 2605 dlm_node_iter_init(dlm->domain_map, &iter); 2606 spin_unlock(&dlm->spinlock); 2607 2608 clear_bit(dead_node, iter.node_map); 2609 2610 memset(&br, 0, sizeof(br)); 2611 br.node_idx = dlm->node_num; 2612 br.dead_node = dead_node; 2613 2614 while ((nodenum = dlm_node_iter_next(&iter)) >= 0) { 2615 ret = 0; 2616 if (nodenum == dead_node) { 2617 mlog(0, "not sending begin reco to dead node " 2618 "%u\n", dead_node); 2619 continue; 2620 } 2621 if (nodenum == dlm->node_num) { 2622 mlog(0, "not sending begin reco to self\n"); 2623 continue; 2624 } 2625 retry: 2626 ret = -EINVAL; 2627 mlog(0, "attempting to send begin reco msg to %d\n", 2628 nodenum); 2629 ret = o2net_send_message(DLM_BEGIN_RECO_MSG, dlm->key, 2630 &br, sizeof(br), nodenum, &status); 2631 /* negative status is handled ok by caller here */ 2632 if (ret >= 0) 2633 ret = status; 2634 if (dlm_is_host_down(ret)) { 2635 /* node is down. not involved in recovery 2636 * so just keep going */ 2637 mlog(ML_NOTICE, "%s: node %u was down when sending " 2638 "begin reco msg (%d)\n", dlm->name, nodenum, ret); 2639 ret = 0; 2640 } 2641 2642 /* 2643 * Prior to commit aad1b15310b9bcd59fa81ab8f2b1513b59553ea8, 2644 * dlm_begin_reco_handler() returned EAGAIN and not -EAGAIN. 2645 * We are handling both for compatibility reasons. 2646 */ 2647 if (ret == -EAGAIN || ret == EAGAIN) { 2648 mlog(0, "%s: trying to start recovery of node " 2649 "%u, but node %u is waiting for last recovery " 2650 "to complete, backoff for a bit\n", dlm->name, 2651 dead_node, nodenum); 2652 msleep(100); 2653 goto retry; 2654 } 2655 if (ret < 0) { 2656 struct dlm_lock_resource *res; 2657 2658 /* this is now a serious problem, possibly ENOMEM 2659 * in the network stack. must retry */ 2660 mlog_errno(ret); 2661 mlog(ML_ERROR, "begin reco of dlm %s to node %u " 2662 "returned %d\n", dlm->name, nodenum, ret); 2663 res = dlm_lookup_lockres(dlm, DLM_RECOVERY_LOCK_NAME, 2664 DLM_RECOVERY_LOCK_NAME_LEN); 2665 if (res) { 2666 dlm_print_one_lock_resource(res); 2667 dlm_lockres_put(res); 2668 } else { 2669 mlog(ML_ERROR, "recovery lock not found\n"); 2670 } 2671 /* sleep for a bit in hopes that we can avoid 2672 * another ENOMEM */ 2673 msleep(100); 2674 goto retry; 2675 } 2676 } 2677 2678 return ret; 2679 } 2680 2681 int dlm_begin_reco_handler(struct o2net_msg *msg, u32 len, void *data, 2682 void **ret_data) 2683 { 2684 struct dlm_ctxt *dlm = data; 2685 struct dlm_begin_reco *br = (struct dlm_begin_reco *)msg->buf; 2686 2687 /* ok to return 0, domain has gone away */ 2688 if (!dlm_grab(dlm)) 2689 return 0; 2690 2691 spin_lock(&dlm->spinlock); 2692 if (dlm->reco.state & DLM_RECO_STATE_FINALIZE) { 2693 mlog(0, "%s: node %u wants to recover node %u (%u:%u) " 2694 "but this node is in finalize state, waiting on finalize2\n", 2695 dlm->name, br->node_idx, br->dead_node, 2696 dlm->reco.dead_node, dlm->reco.new_master); 2697 spin_unlock(&dlm->spinlock); 2698 return -EAGAIN; 2699 } 2700 spin_unlock(&dlm->spinlock); 2701 2702 mlog(0, "%s: node %u wants to recover node %u (%u:%u)\n", 2703 dlm->name, br->node_idx, br->dead_node, 2704 dlm->reco.dead_node, dlm->reco.new_master); 2705 2706 dlm_fire_domain_eviction_callbacks(dlm, br->dead_node); 2707 2708 spin_lock(&dlm->spinlock); 2709 if (dlm->reco.new_master != O2NM_INVALID_NODE_NUM) { 2710 if (test_bit(dlm->reco.new_master, dlm->recovery_map)) { 2711 mlog(0, "%s: new_master %u died, changing " 2712 "to %u\n", dlm->name, dlm->reco.new_master, 2713 br->node_idx); 2714 } else { 2715 mlog(0, "%s: new_master %u NOT DEAD, changing " 2716 "to %u\n", dlm->name, dlm->reco.new_master, 2717 br->node_idx); 2718 /* may not have seen the new master as dead yet */ 2719 } 2720 } 2721 if (dlm->reco.dead_node != O2NM_INVALID_NODE_NUM) { 2722 mlog(ML_NOTICE, "%s: dead_node previously set to %u, " 2723 "node %u changing it to %u\n", dlm->name, 2724 dlm->reco.dead_node, br->node_idx, br->dead_node); 2725 } 2726 dlm_set_reco_master(dlm, br->node_idx); 2727 dlm_set_reco_dead_node(dlm, br->dead_node); 2728 if (!test_bit(br->dead_node, dlm->recovery_map)) { 2729 mlog(0, "recovery master %u sees %u as dead, but this " 2730 "node has not yet. marking %u as dead\n", 2731 br->node_idx, br->dead_node, br->dead_node); 2732 if (!test_bit(br->dead_node, dlm->domain_map) || 2733 !test_bit(br->dead_node, dlm->live_nodes_map)) 2734 mlog(0, "%u not in domain/live_nodes map " 2735 "so setting it in reco map manually\n", 2736 br->dead_node); 2737 /* force the recovery cleanup in __dlm_hb_node_down 2738 * both of these will be cleared in a moment */ 2739 set_bit(br->dead_node, dlm->domain_map); 2740 set_bit(br->dead_node, dlm->live_nodes_map); 2741 __dlm_hb_node_down(dlm, br->dead_node); 2742 } 2743 spin_unlock(&dlm->spinlock); 2744 2745 dlm_kick_recovery_thread(dlm); 2746 2747 mlog(0, "%s: recovery started by node %u, for %u (%u:%u)\n", 2748 dlm->name, br->node_idx, br->dead_node, 2749 dlm->reco.dead_node, dlm->reco.new_master); 2750 2751 dlm_put(dlm); 2752 return 0; 2753 } 2754 2755 #define DLM_FINALIZE_STAGE2 0x01 2756 static int dlm_send_finalize_reco_message(struct dlm_ctxt *dlm) 2757 { 2758 int ret = 0; 2759 struct dlm_finalize_reco fr; 2760 struct dlm_node_iter iter; 2761 int nodenum; 2762 int status; 2763 int stage = 1; 2764 2765 mlog(0, "finishing recovery for node %s:%u, " 2766 "stage %d\n", dlm->name, dlm->reco.dead_node, stage); 2767 2768 spin_lock(&dlm->spinlock); 2769 dlm_node_iter_init(dlm->domain_map, &iter); 2770 spin_unlock(&dlm->spinlock); 2771 2772 stage2: 2773 memset(&fr, 0, sizeof(fr)); 2774 fr.node_idx = dlm->node_num; 2775 fr.dead_node = dlm->reco.dead_node; 2776 if (stage == 2) 2777 fr.flags |= DLM_FINALIZE_STAGE2; 2778 2779 while ((nodenum = dlm_node_iter_next(&iter)) >= 0) { 2780 if (nodenum == dlm->node_num) 2781 continue; 2782 ret = o2net_send_message(DLM_FINALIZE_RECO_MSG, dlm->key, 2783 &fr, sizeof(fr), nodenum, &status); 2784 if (ret >= 0) 2785 ret = status; 2786 if (ret < 0) { 2787 mlog(ML_ERROR, "Error %d when sending message %u (key " 2788 "0x%x) to node %u\n", ret, DLM_FINALIZE_RECO_MSG, 2789 dlm->key, nodenum); 2790 if (dlm_is_host_down(ret)) { 2791 /* this has no effect on this recovery 2792 * session, so set the status to zero to 2793 * finish out the last recovery */ 2794 mlog(ML_ERROR, "node %u went down after this " 2795 "node finished recovery.\n", nodenum); 2796 ret = 0; 2797 continue; 2798 } 2799 break; 2800 } 2801 } 2802 if (stage == 1) { 2803 /* reset the node_iter back to the top and send finalize2 */ 2804 iter.curnode = -1; 2805 stage = 2; 2806 goto stage2; 2807 } 2808 2809 return ret; 2810 } 2811 2812 int dlm_finalize_reco_handler(struct o2net_msg *msg, u32 len, void *data, 2813 void **ret_data) 2814 { 2815 struct dlm_ctxt *dlm = data; 2816 struct dlm_finalize_reco *fr = (struct dlm_finalize_reco *)msg->buf; 2817 int stage = 1; 2818 2819 /* ok to return 0, domain has gone away */ 2820 if (!dlm_grab(dlm)) 2821 return 0; 2822 2823 if (fr->flags & DLM_FINALIZE_STAGE2) 2824 stage = 2; 2825 2826 mlog(0, "%s: node %u finalizing recovery stage%d of " 2827 "node %u (%u:%u)\n", dlm->name, fr->node_idx, stage, 2828 fr->dead_node, dlm->reco.dead_node, dlm->reco.new_master); 2829 2830 spin_lock(&dlm->spinlock); 2831 2832 if (dlm->reco.new_master != fr->node_idx) { 2833 mlog(ML_ERROR, "node %u sent recovery finalize msg, but node " 2834 "%u is supposed to be the new master, dead=%u\n", 2835 fr->node_idx, dlm->reco.new_master, fr->dead_node); 2836 BUG(); 2837 } 2838 if (dlm->reco.dead_node != fr->dead_node) { 2839 mlog(ML_ERROR, "node %u sent recovery finalize msg for dead " 2840 "node %u, but node %u is supposed to be dead\n", 2841 fr->node_idx, fr->dead_node, dlm->reco.dead_node); 2842 BUG(); 2843 } 2844 2845 switch (stage) { 2846 case 1: 2847 dlm_finish_local_lockres_recovery(dlm, fr->dead_node, fr->node_idx); 2848 if (dlm->reco.state & DLM_RECO_STATE_FINALIZE) { 2849 mlog(ML_ERROR, "%s: received finalize1 from " 2850 "new master %u for dead node %u, but " 2851 "this node has already received it!\n", 2852 dlm->name, fr->node_idx, fr->dead_node); 2853 dlm_print_reco_node_status(dlm); 2854 BUG(); 2855 } 2856 dlm->reco.state |= DLM_RECO_STATE_FINALIZE; 2857 spin_unlock(&dlm->spinlock); 2858 break; 2859 case 2: 2860 if (!(dlm->reco.state & DLM_RECO_STATE_FINALIZE)) { 2861 mlog(ML_ERROR, "%s: received finalize2 from " 2862 "new master %u for dead node %u, but " 2863 "this node did not have finalize1!\n", 2864 dlm->name, fr->node_idx, fr->dead_node); 2865 dlm_print_reco_node_status(dlm); 2866 BUG(); 2867 } 2868 dlm->reco.state &= ~DLM_RECO_STATE_FINALIZE; 2869 spin_unlock(&dlm->spinlock); 2870 dlm_reset_recovery(dlm); 2871 dlm_kick_recovery_thread(dlm); 2872 break; 2873 default: 2874 BUG(); 2875 } 2876 2877 mlog(0, "%s: recovery done, reco master was %u, dead now %u, master now %u\n", 2878 dlm->name, fr->node_idx, dlm->reco.dead_node, dlm->reco.new_master); 2879 2880 dlm_put(dlm); 2881 return 0; 2882 } 2883