1 /****************************************************************************** 2 ******************************************************************************* 3 ** 4 ** Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved. 5 ** Copyright (C) 2004-2005 Red Hat, Inc. All rights reserved. 6 ** 7 ** This copyrighted material is made available to anyone wishing to use, 8 ** modify, copy, or redistribute it subject to the terms and conditions 9 ** of the GNU General Public License v.2. 10 ** 11 ******************************************************************************* 12 ******************************************************************************/ 13 14 #include "dlm_internal.h" 15 #include "lockspace.h" 16 #include "dir.h" 17 #include "config.h" 18 #include "ast.h" 19 #include "memory.h" 20 #include "rcom.h" 21 #include "lock.h" 22 #include "lowcomms.h" 23 #include "member.h" 24 #include "recover.h" 25 26 27 /* 28 * Recovery waiting routines: these functions wait for a particular reply from 29 * a remote node, or for the remote node to report a certain status. They need 30 * to abort if the lockspace is stopped indicating a node has failed (perhaps 31 * the one being waited for). 32 */ 33 34 /* 35 * Wait until given function returns non-zero or lockspace is stopped 36 * (LS_RECOVERY_STOP set due to failure of a node in ls_nodes). When another 37 * function thinks it could have completed the waited-on task, they should wake 38 * up ls_wait_general to get an immediate response rather than waiting for the 39 * timer to detect the result. A timer wakes us up periodically while waiting 40 * to see if we should abort due to a node failure. This should only be called 41 * by the dlm_recoverd thread. 42 */ 43 44 static void dlm_wait_timer_fn(unsigned long data) 45 { 46 struct dlm_ls *ls = (struct dlm_ls *) data; 47 mod_timer(&ls->ls_timer, jiffies + (dlm_config.ci_recover_timer * HZ)); 48 wake_up(&ls->ls_wait_general); 49 } 50 51 int dlm_wait_function(struct dlm_ls *ls, int (*testfn) (struct dlm_ls *ls)) 52 { 53 int error = 0; 54 55 init_timer(&ls->ls_timer); 56 ls->ls_timer.function = dlm_wait_timer_fn; 57 ls->ls_timer.data = (long) ls; 58 ls->ls_timer.expires = jiffies + (dlm_config.ci_recover_timer * HZ); 59 add_timer(&ls->ls_timer); 60 61 wait_event(ls->ls_wait_general, testfn(ls) || dlm_recovery_stopped(ls)); 62 del_timer_sync(&ls->ls_timer); 63 64 if (dlm_recovery_stopped(ls)) { 65 log_debug(ls, "dlm_wait_function aborted"); 66 error = -EINTR; 67 } 68 return error; 69 } 70 71 /* 72 * An efficient way for all nodes to wait for all others to have a certain 73 * status. The node with the lowest nodeid polls all the others for their 74 * status (wait_status_all) and all the others poll the node with the low id 75 * for its accumulated result (wait_status_low). When all nodes have set 76 * status flag X, then status flag X_ALL will be set on the low nodeid. 77 */ 78 79 uint32_t dlm_recover_status(struct dlm_ls *ls) 80 { 81 uint32_t status; 82 spin_lock(&ls->ls_recover_lock); 83 status = ls->ls_recover_status; 84 spin_unlock(&ls->ls_recover_lock); 85 return status; 86 } 87 88 static void _set_recover_status(struct dlm_ls *ls, uint32_t status) 89 { 90 ls->ls_recover_status |= status; 91 } 92 93 void dlm_set_recover_status(struct dlm_ls *ls, uint32_t status) 94 { 95 spin_lock(&ls->ls_recover_lock); 96 _set_recover_status(ls, status); 97 spin_unlock(&ls->ls_recover_lock); 98 } 99 100 static int wait_status_all(struct dlm_ls *ls, uint32_t wait_status, 101 int save_slots) 102 { 103 struct dlm_rcom *rc = ls->ls_recover_buf; 104 struct dlm_member *memb; 105 int error = 0, delay; 106 107 list_for_each_entry(memb, &ls->ls_nodes, list) { 108 delay = 0; 109 for (;;) { 110 if (dlm_recovery_stopped(ls)) { 111 error = -EINTR; 112 goto out; 113 } 114 115 error = dlm_rcom_status(ls, memb->nodeid, 0); 116 if (error) 117 goto out; 118 119 if (save_slots) 120 dlm_slot_save(ls, rc, memb); 121 122 if (rc->rc_result & wait_status) 123 break; 124 if (delay < 1000) 125 delay += 20; 126 msleep(delay); 127 } 128 } 129 out: 130 return error; 131 } 132 133 static int wait_status_low(struct dlm_ls *ls, uint32_t wait_status, 134 uint32_t status_flags) 135 { 136 struct dlm_rcom *rc = ls->ls_recover_buf; 137 int error = 0, delay = 0, nodeid = ls->ls_low_nodeid; 138 139 for (;;) { 140 if (dlm_recovery_stopped(ls)) { 141 error = -EINTR; 142 goto out; 143 } 144 145 error = dlm_rcom_status(ls, nodeid, status_flags); 146 if (error) 147 break; 148 149 if (rc->rc_result & wait_status) 150 break; 151 if (delay < 1000) 152 delay += 20; 153 msleep(delay); 154 } 155 out: 156 return error; 157 } 158 159 static int wait_status(struct dlm_ls *ls, uint32_t status) 160 { 161 uint32_t status_all = status << 1; 162 int error; 163 164 if (ls->ls_low_nodeid == dlm_our_nodeid()) { 165 error = wait_status_all(ls, status, 0); 166 if (!error) 167 dlm_set_recover_status(ls, status_all); 168 } else 169 error = wait_status_low(ls, status_all, 0); 170 171 return error; 172 } 173 174 int dlm_recover_members_wait(struct dlm_ls *ls) 175 { 176 struct dlm_member *memb; 177 struct dlm_slot *slots; 178 int num_slots, slots_size; 179 int error, rv; 180 uint32_t gen; 181 182 list_for_each_entry(memb, &ls->ls_nodes, list) { 183 memb->slot = -1; 184 memb->generation = 0; 185 } 186 187 if (ls->ls_low_nodeid == dlm_our_nodeid()) { 188 error = wait_status_all(ls, DLM_RS_NODES, 1); 189 if (error) 190 goto out; 191 192 /* slots array is sparse, slots_size may be > num_slots */ 193 194 rv = dlm_slots_assign(ls, &num_slots, &slots_size, &slots, &gen); 195 if (!rv) { 196 spin_lock(&ls->ls_recover_lock); 197 _set_recover_status(ls, DLM_RS_NODES_ALL); 198 ls->ls_num_slots = num_slots; 199 ls->ls_slots_size = slots_size; 200 ls->ls_slots = slots; 201 ls->ls_generation = gen; 202 spin_unlock(&ls->ls_recover_lock); 203 } else { 204 dlm_set_recover_status(ls, DLM_RS_NODES_ALL); 205 } 206 } else { 207 error = wait_status_low(ls, DLM_RS_NODES_ALL, DLM_RSF_NEED_SLOTS); 208 if (error) 209 goto out; 210 211 dlm_slots_copy_in(ls); 212 } 213 out: 214 return error; 215 } 216 217 int dlm_recover_directory_wait(struct dlm_ls *ls) 218 { 219 return wait_status(ls, DLM_RS_DIR); 220 } 221 222 int dlm_recover_locks_wait(struct dlm_ls *ls) 223 { 224 return wait_status(ls, DLM_RS_LOCKS); 225 } 226 227 int dlm_recover_done_wait(struct dlm_ls *ls) 228 { 229 return wait_status(ls, DLM_RS_DONE); 230 } 231 232 /* 233 * The recover_list contains all the rsb's for which we've requested the new 234 * master nodeid. As replies are returned from the resource directories the 235 * rsb's are removed from the list. When the list is empty we're done. 236 * 237 * The recover_list is later similarly used for all rsb's for which we've sent 238 * new lkb's and need to receive new corresponding lkid's. 239 * 240 * We use the address of the rsb struct as a simple local identifier for the 241 * rsb so we can match an rcom reply with the rsb it was sent for. 242 */ 243 244 static int recover_list_empty(struct dlm_ls *ls) 245 { 246 int empty; 247 248 spin_lock(&ls->ls_recover_list_lock); 249 empty = list_empty(&ls->ls_recover_list); 250 spin_unlock(&ls->ls_recover_list_lock); 251 252 return empty; 253 } 254 255 static void recover_list_add(struct dlm_rsb *r) 256 { 257 struct dlm_ls *ls = r->res_ls; 258 259 spin_lock(&ls->ls_recover_list_lock); 260 if (list_empty(&r->res_recover_list)) { 261 list_add_tail(&r->res_recover_list, &ls->ls_recover_list); 262 ls->ls_recover_list_count++; 263 dlm_hold_rsb(r); 264 } 265 spin_unlock(&ls->ls_recover_list_lock); 266 } 267 268 static void recover_list_del(struct dlm_rsb *r) 269 { 270 struct dlm_ls *ls = r->res_ls; 271 272 spin_lock(&ls->ls_recover_list_lock); 273 list_del_init(&r->res_recover_list); 274 ls->ls_recover_list_count--; 275 spin_unlock(&ls->ls_recover_list_lock); 276 277 dlm_put_rsb(r); 278 } 279 280 static struct dlm_rsb *recover_list_find(struct dlm_ls *ls, uint64_t id) 281 { 282 struct dlm_rsb *r = NULL; 283 284 spin_lock(&ls->ls_recover_list_lock); 285 286 list_for_each_entry(r, &ls->ls_recover_list, res_recover_list) { 287 if (id == (unsigned long) r) 288 goto out; 289 } 290 r = NULL; 291 out: 292 spin_unlock(&ls->ls_recover_list_lock); 293 return r; 294 } 295 296 static void recover_list_clear(struct dlm_ls *ls) 297 { 298 struct dlm_rsb *r, *s; 299 300 spin_lock(&ls->ls_recover_list_lock); 301 list_for_each_entry_safe(r, s, &ls->ls_recover_list, res_recover_list) { 302 list_del_init(&r->res_recover_list); 303 r->res_recover_locks_count = 0; 304 dlm_put_rsb(r); 305 ls->ls_recover_list_count--; 306 } 307 308 if (ls->ls_recover_list_count != 0) { 309 log_error(ls, "warning: recover_list_count %d", 310 ls->ls_recover_list_count); 311 ls->ls_recover_list_count = 0; 312 } 313 spin_unlock(&ls->ls_recover_list_lock); 314 } 315 316 317 /* Master recovery: find new master node for rsb's that were 318 mastered on nodes that have been removed. 319 320 dlm_recover_masters 321 recover_master 322 dlm_send_rcom_lookup -> receive_rcom_lookup 323 dlm_dir_lookup 324 receive_rcom_lookup_reply <- 325 dlm_recover_master_reply 326 set_new_master 327 set_master_lkbs 328 set_lock_master 329 */ 330 331 /* 332 * Set the lock master for all LKBs in a lock queue 333 * If we are the new master of the rsb, we may have received new 334 * MSTCPY locks from other nodes already which we need to ignore 335 * when setting the new nodeid. 336 */ 337 338 static void set_lock_master(struct list_head *queue, int nodeid) 339 { 340 struct dlm_lkb *lkb; 341 342 list_for_each_entry(lkb, queue, lkb_statequeue) { 343 if (!(lkb->lkb_flags & DLM_IFL_MSTCPY)) { 344 lkb->lkb_nodeid = nodeid; 345 lkb->lkb_remid = 0; 346 } 347 } 348 } 349 350 static void set_master_lkbs(struct dlm_rsb *r) 351 { 352 set_lock_master(&r->res_grantqueue, r->res_nodeid); 353 set_lock_master(&r->res_convertqueue, r->res_nodeid); 354 set_lock_master(&r->res_waitqueue, r->res_nodeid); 355 } 356 357 /* 358 * Propagate the new master nodeid to locks 359 * The NEW_MASTER flag tells dlm_recover_locks() which rsb's to consider. 360 * The NEW_MASTER2 flag tells recover_lvb() and recover_grant() which 361 * rsb's to consider. 362 */ 363 364 static void set_new_master(struct dlm_rsb *r, int nodeid) 365 { 366 r->res_nodeid = nodeid; 367 set_master_lkbs(r); 368 rsb_set_flag(r, RSB_NEW_MASTER); 369 rsb_set_flag(r, RSB_NEW_MASTER2); 370 } 371 372 /* 373 * We do async lookups on rsb's that need new masters. The rsb's 374 * waiting for a lookup reply are kept on the recover_list. 375 */ 376 377 static int recover_master(struct dlm_rsb *r) 378 { 379 struct dlm_ls *ls = r->res_ls; 380 int error, ret_nodeid; 381 int our_nodeid = dlm_our_nodeid(); 382 int dir_nodeid = dlm_dir_nodeid(r); 383 384 if (dir_nodeid == our_nodeid) { 385 error = dlm_dir_lookup(ls, our_nodeid, r->res_name, 386 r->res_length, &ret_nodeid); 387 if (error) 388 log_error(ls, "recover dir lookup error %d", error); 389 390 if (ret_nodeid == our_nodeid) 391 ret_nodeid = 0; 392 lock_rsb(r); 393 set_new_master(r, ret_nodeid); 394 unlock_rsb(r); 395 } else { 396 recover_list_add(r); 397 error = dlm_send_rcom_lookup(r, dir_nodeid); 398 } 399 400 return error; 401 } 402 403 /* 404 * All MSTCPY locks are purged and rebuilt, even if the master stayed the same. 405 * This is necessary because recovery can be started, aborted and restarted, 406 * causing the master nodeid to briefly change during the aborted recovery, and 407 * change back to the original value in the second recovery. The MSTCPY locks 408 * may or may not have been purged during the aborted recovery. Another node 409 * with an outstanding request in waiters list and a request reply saved in the 410 * requestqueue, cannot know whether it should ignore the reply and resend the 411 * request, or accept the reply and complete the request. It must do the 412 * former if the remote node purged MSTCPY locks, and it must do the later if 413 * the remote node did not. This is solved by always purging MSTCPY locks, in 414 * which case, the request reply would always be ignored and the request 415 * resent. 416 */ 417 418 static int recover_master_static(struct dlm_rsb *r) 419 { 420 int dir_nodeid = dlm_dir_nodeid(r); 421 int new_master = dir_nodeid; 422 423 if (dir_nodeid == dlm_our_nodeid()) 424 new_master = 0; 425 426 lock_rsb(r); 427 dlm_purge_mstcpy_locks(r); 428 set_new_master(r, new_master); 429 unlock_rsb(r); 430 return 1; 431 } 432 433 /* 434 * Go through local root resources and for each rsb which has a master which 435 * has departed, get the new master nodeid from the directory. The dir will 436 * assign mastery to the first node to look up the new master. That means 437 * we'll discover in this lookup if we're the new master of any rsb's. 438 * 439 * We fire off all the dir lookup requests individually and asynchronously to 440 * the correct dir node. 441 */ 442 443 int dlm_recover_masters(struct dlm_ls *ls) 444 { 445 struct dlm_rsb *r; 446 int error = 0, count = 0; 447 448 log_debug(ls, "dlm_recover_masters"); 449 450 down_read(&ls->ls_root_sem); 451 list_for_each_entry(r, &ls->ls_root_list, res_root_list) { 452 if (dlm_recovery_stopped(ls)) { 453 up_read(&ls->ls_root_sem); 454 error = -EINTR; 455 goto out; 456 } 457 458 if (dlm_no_directory(ls)) 459 count += recover_master_static(r); 460 else if (!is_master(r) && 461 (dlm_is_removed(ls, r->res_nodeid) || 462 rsb_flag(r, RSB_NEW_MASTER))) { 463 recover_master(r); 464 count++; 465 } 466 467 schedule(); 468 } 469 up_read(&ls->ls_root_sem); 470 471 log_debug(ls, "dlm_recover_masters %d resources", count); 472 473 error = dlm_wait_function(ls, &recover_list_empty); 474 out: 475 if (error) 476 recover_list_clear(ls); 477 return error; 478 } 479 480 int dlm_recover_master_reply(struct dlm_ls *ls, struct dlm_rcom *rc) 481 { 482 struct dlm_rsb *r; 483 int nodeid; 484 485 r = recover_list_find(ls, rc->rc_id); 486 if (!r) { 487 log_error(ls, "dlm_recover_master_reply no id %llx", 488 (unsigned long long)rc->rc_id); 489 goto out; 490 } 491 492 nodeid = rc->rc_result; 493 if (nodeid == dlm_our_nodeid()) 494 nodeid = 0; 495 496 lock_rsb(r); 497 set_new_master(r, nodeid); 498 unlock_rsb(r); 499 recover_list_del(r); 500 501 if (recover_list_empty(ls)) 502 wake_up(&ls->ls_wait_general); 503 out: 504 return 0; 505 } 506 507 508 /* Lock recovery: rebuild the process-copy locks we hold on a 509 remastered rsb on the new rsb master. 510 511 dlm_recover_locks 512 recover_locks 513 recover_locks_queue 514 dlm_send_rcom_lock -> receive_rcom_lock 515 dlm_recover_master_copy 516 receive_rcom_lock_reply <- 517 dlm_recover_process_copy 518 */ 519 520 521 /* 522 * keep a count of the number of lkb's we send to the new master; when we get 523 * an equal number of replies then recovery for the rsb is done 524 */ 525 526 static int recover_locks_queue(struct dlm_rsb *r, struct list_head *head) 527 { 528 struct dlm_lkb *lkb; 529 int error = 0; 530 531 list_for_each_entry(lkb, head, lkb_statequeue) { 532 error = dlm_send_rcom_lock(r, lkb); 533 if (error) 534 break; 535 r->res_recover_locks_count++; 536 } 537 538 return error; 539 } 540 541 static int recover_locks(struct dlm_rsb *r) 542 { 543 int error = 0; 544 545 lock_rsb(r); 546 547 DLM_ASSERT(!r->res_recover_locks_count, dlm_dump_rsb(r);); 548 549 error = recover_locks_queue(r, &r->res_grantqueue); 550 if (error) 551 goto out; 552 error = recover_locks_queue(r, &r->res_convertqueue); 553 if (error) 554 goto out; 555 error = recover_locks_queue(r, &r->res_waitqueue); 556 if (error) 557 goto out; 558 559 if (r->res_recover_locks_count) 560 recover_list_add(r); 561 else 562 rsb_clear_flag(r, RSB_NEW_MASTER); 563 out: 564 unlock_rsb(r); 565 return error; 566 } 567 568 int dlm_recover_locks(struct dlm_ls *ls) 569 { 570 struct dlm_rsb *r; 571 int error, count = 0; 572 573 down_read(&ls->ls_root_sem); 574 list_for_each_entry(r, &ls->ls_root_list, res_root_list) { 575 if (is_master(r)) { 576 rsb_clear_flag(r, RSB_NEW_MASTER); 577 continue; 578 } 579 580 if (!rsb_flag(r, RSB_NEW_MASTER)) 581 continue; 582 583 if (dlm_recovery_stopped(ls)) { 584 error = -EINTR; 585 up_read(&ls->ls_root_sem); 586 goto out; 587 } 588 589 error = recover_locks(r); 590 if (error) { 591 up_read(&ls->ls_root_sem); 592 goto out; 593 } 594 595 count += r->res_recover_locks_count; 596 } 597 up_read(&ls->ls_root_sem); 598 599 log_debug(ls, "dlm_recover_locks %d out", count); 600 601 error = dlm_wait_function(ls, &recover_list_empty); 602 out: 603 if (error) 604 recover_list_clear(ls); 605 return error; 606 } 607 608 void dlm_recovered_lock(struct dlm_rsb *r) 609 { 610 DLM_ASSERT(rsb_flag(r, RSB_NEW_MASTER), dlm_dump_rsb(r);); 611 612 r->res_recover_locks_count--; 613 if (!r->res_recover_locks_count) { 614 rsb_clear_flag(r, RSB_NEW_MASTER); 615 recover_list_del(r); 616 } 617 618 if (recover_list_empty(r->res_ls)) 619 wake_up(&r->res_ls->ls_wait_general); 620 } 621 622 /* 623 * The lvb needs to be recovered on all master rsb's. This includes setting 624 * the VALNOTVALID flag if necessary, and determining the correct lvb contents 625 * based on the lvb's of the locks held on the rsb. 626 * 627 * RSB_VALNOTVALID is set if there are only NL/CR locks on the rsb. If it 628 * was already set prior to recovery, it's not cleared, regardless of locks. 629 * 630 * The LVB contents are only considered for changing when this is a new master 631 * of the rsb (NEW_MASTER2). Then, the rsb's lvb is taken from any lkb with 632 * mode > CR. If no lkb's exist with mode above CR, the lvb contents are taken 633 * from the lkb with the largest lvb sequence number. 634 */ 635 636 static void recover_lvb(struct dlm_rsb *r) 637 { 638 struct dlm_lkb *lkb, *high_lkb = NULL; 639 uint32_t high_seq = 0; 640 int lock_lvb_exists = 0; 641 int big_lock_exists = 0; 642 int lvblen = r->res_ls->ls_lvblen; 643 644 list_for_each_entry(lkb, &r->res_grantqueue, lkb_statequeue) { 645 if (!(lkb->lkb_exflags & DLM_LKF_VALBLK)) 646 continue; 647 648 lock_lvb_exists = 1; 649 650 if (lkb->lkb_grmode > DLM_LOCK_CR) { 651 big_lock_exists = 1; 652 goto setflag; 653 } 654 655 if (((int)lkb->lkb_lvbseq - (int)high_seq) >= 0) { 656 high_lkb = lkb; 657 high_seq = lkb->lkb_lvbseq; 658 } 659 } 660 661 list_for_each_entry(lkb, &r->res_convertqueue, lkb_statequeue) { 662 if (!(lkb->lkb_exflags & DLM_LKF_VALBLK)) 663 continue; 664 665 lock_lvb_exists = 1; 666 667 if (lkb->lkb_grmode > DLM_LOCK_CR) { 668 big_lock_exists = 1; 669 goto setflag; 670 } 671 672 if (((int)lkb->lkb_lvbseq - (int)high_seq) >= 0) { 673 high_lkb = lkb; 674 high_seq = lkb->lkb_lvbseq; 675 } 676 } 677 678 setflag: 679 if (!lock_lvb_exists) 680 goto out; 681 682 if (!big_lock_exists) 683 rsb_set_flag(r, RSB_VALNOTVALID); 684 685 /* don't mess with the lvb unless we're the new master */ 686 if (!rsb_flag(r, RSB_NEW_MASTER2)) 687 goto out; 688 689 if (!r->res_lvbptr) { 690 r->res_lvbptr = dlm_allocate_lvb(r->res_ls); 691 if (!r->res_lvbptr) 692 goto out; 693 } 694 695 if (big_lock_exists) { 696 r->res_lvbseq = lkb->lkb_lvbseq; 697 memcpy(r->res_lvbptr, lkb->lkb_lvbptr, lvblen); 698 } else if (high_lkb) { 699 r->res_lvbseq = high_lkb->lkb_lvbseq; 700 memcpy(r->res_lvbptr, high_lkb->lkb_lvbptr, lvblen); 701 } else { 702 r->res_lvbseq = 0; 703 memset(r->res_lvbptr, 0, lvblen); 704 } 705 out: 706 return; 707 } 708 709 /* All master rsb's flagged RECOVER_CONVERT need to be looked at. The locks 710 converting PR->CW or CW->PR need to have their lkb_grmode set. */ 711 712 static void recover_conversion(struct dlm_rsb *r) 713 { 714 struct dlm_lkb *lkb; 715 int grmode = -1; 716 717 list_for_each_entry(lkb, &r->res_grantqueue, lkb_statequeue) { 718 if (lkb->lkb_grmode == DLM_LOCK_PR || 719 lkb->lkb_grmode == DLM_LOCK_CW) { 720 grmode = lkb->lkb_grmode; 721 break; 722 } 723 } 724 725 list_for_each_entry(lkb, &r->res_convertqueue, lkb_statequeue) { 726 if (lkb->lkb_grmode != DLM_LOCK_IV) 727 continue; 728 if (grmode == -1) 729 lkb->lkb_grmode = lkb->lkb_rqmode; 730 else 731 lkb->lkb_grmode = grmode; 732 } 733 } 734 735 /* We've become the new master for this rsb and waiting/converting locks may 736 need to be granted in dlm_recover_grant() due to locks that may have 737 existed from a removed node. */ 738 739 static void recover_grant(struct dlm_rsb *r) 740 { 741 if (!list_empty(&r->res_waitqueue) || !list_empty(&r->res_convertqueue)) 742 rsb_set_flag(r, RSB_RECOVER_GRANT); 743 } 744 745 void dlm_recover_rsbs(struct dlm_ls *ls) 746 { 747 struct dlm_rsb *r; 748 unsigned int count = 0; 749 750 down_read(&ls->ls_root_sem); 751 list_for_each_entry(r, &ls->ls_root_list, res_root_list) { 752 lock_rsb(r); 753 if (is_master(r)) { 754 if (rsb_flag(r, RSB_RECOVER_CONVERT)) 755 recover_conversion(r); 756 if (rsb_flag(r, RSB_NEW_MASTER2)) 757 recover_grant(r); 758 recover_lvb(r); 759 count++; 760 } 761 rsb_clear_flag(r, RSB_RECOVER_CONVERT); 762 rsb_clear_flag(r, RSB_NEW_MASTER2); 763 unlock_rsb(r); 764 } 765 up_read(&ls->ls_root_sem); 766 767 if (count) 768 log_debug(ls, "dlm_recover_rsbs %d done", count); 769 } 770 771 /* Create a single list of all root rsb's to be used during recovery */ 772 773 int dlm_create_root_list(struct dlm_ls *ls) 774 { 775 struct rb_node *n; 776 struct dlm_rsb *r; 777 int i, error = 0; 778 779 down_write(&ls->ls_root_sem); 780 if (!list_empty(&ls->ls_root_list)) { 781 log_error(ls, "root list not empty"); 782 error = -EINVAL; 783 goto out; 784 } 785 786 for (i = 0; i < ls->ls_rsbtbl_size; i++) { 787 spin_lock(&ls->ls_rsbtbl[i].lock); 788 for (n = rb_first(&ls->ls_rsbtbl[i].keep); n; n = rb_next(n)) { 789 r = rb_entry(n, struct dlm_rsb, res_hashnode); 790 list_add(&r->res_root_list, &ls->ls_root_list); 791 dlm_hold_rsb(r); 792 } 793 794 /* If we're using a directory, add tossed rsbs to the root 795 list; they'll have entries created in the new directory, 796 but no other recovery steps should do anything with them. */ 797 798 if (dlm_no_directory(ls)) { 799 spin_unlock(&ls->ls_rsbtbl[i].lock); 800 continue; 801 } 802 803 for (n = rb_first(&ls->ls_rsbtbl[i].toss); n; n = rb_next(n)) { 804 r = rb_entry(n, struct dlm_rsb, res_hashnode); 805 list_add(&r->res_root_list, &ls->ls_root_list); 806 dlm_hold_rsb(r); 807 } 808 spin_unlock(&ls->ls_rsbtbl[i].lock); 809 } 810 out: 811 up_write(&ls->ls_root_sem); 812 return error; 813 } 814 815 void dlm_release_root_list(struct dlm_ls *ls) 816 { 817 struct dlm_rsb *r, *safe; 818 819 down_write(&ls->ls_root_sem); 820 list_for_each_entry_safe(r, safe, &ls->ls_root_list, res_root_list) { 821 list_del_init(&r->res_root_list); 822 dlm_put_rsb(r); 823 } 824 up_write(&ls->ls_root_sem); 825 } 826 827 /* If not using a directory, clear the entire toss list, there's no benefit to 828 caching the master value since it's fixed. If we are using a dir, keep the 829 rsb's we're the master of. Recovery will add them to the root list and from 830 there they'll be entered in the rebuilt directory. */ 831 832 void dlm_clear_toss_list(struct dlm_ls *ls) 833 { 834 struct rb_node *n, *next; 835 struct dlm_rsb *rsb; 836 int i; 837 838 for (i = 0; i < ls->ls_rsbtbl_size; i++) { 839 spin_lock(&ls->ls_rsbtbl[i].lock); 840 for (n = rb_first(&ls->ls_rsbtbl[i].toss); n; n = next) { 841 next = rb_next(n);; 842 rsb = rb_entry(n, struct dlm_rsb, res_hashnode); 843 if (dlm_no_directory(ls) || !is_master(rsb)) { 844 rb_erase(n, &ls->ls_rsbtbl[i].toss); 845 dlm_free_rsb(rsb); 846 } 847 } 848 spin_unlock(&ls->ls_rsbtbl[i].lock); 849 } 850 } 851 852