1 // SPDX-License-Identifier: GPL-2.0-only 2 /****************************************************************************** 3 ******************************************************************************* 4 ** 5 ** Copyright (C) 2005-2011 Red Hat, Inc. All rights reserved. 6 ** 7 ** 8 ******************************************************************************* 9 ******************************************************************************/ 10 11 #include "dlm_internal.h" 12 #include "lockspace.h" 13 #include "member.h" 14 #include "recoverd.h" 15 #include "recover.h" 16 #include "rcom.h" 17 #include "config.h" 18 #include "midcomms.h" 19 #include "lowcomms.h" 20 21 int dlm_slots_version(struct dlm_header *h) 22 { 23 if ((le32_to_cpu(h->h_version) & 0x0000FFFF) < DLM_HEADER_SLOTS) 24 return 0; 25 return 1; 26 } 27 28 void dlm_slot_save(struct dlm_ls *ls, struct dlm_rcom *rc, 29 struct dlm_member *memb) 30 { 31 struct rcom_config *rf = (struct rcom_config *)rc->rc_buf; 32 33 if (!dlm_slots_version(&rc->rc_header)) 34 return; 35 36 memb->slot = le16_to_cpu(rf->rf_our_slot); 37 memb->generation = le32_to_cpu(rf->rf_generation); 38 } 39 40 void dlm_slots_copy_out(struct dlm_ls *ls, struct dlm_rcom *rc) 41 { 42 struct dlm_slot *slot; 43 struct rcom_slot *ro; 44 int i; 45 46 ro = (struct rcom_slot *)(rc->rc_buf + sizeof(struct rcom_config)); 47 48 /* ls_slots array is sparse, but not rcom_slots */ 49 50 for (i = 0; i < ls->ls_slots_size; i++) { 51 slot = &ls->ls_slots[i]; 52 if (!slot->nodeid) 53 continue; 54 ro->ro_nodeid = cpu_to_le32(slot->nodeid); 55 ro->ro_slot = cpu_to_le16(slot->slot); 56 ro++; 57 } 58 } 59 60 #define SLOT_DEBUG_LINE 128 61 62 static void log_slots(struct dlm_ls *ls, uint32_t gen, int num_slots, 63 struct rcom_slot *ro0, struct dlm_slot *array, 64 int array_size) 65 { 66 char line[SLOT_DEBUG_LINE]; 67 int len = SLOT_DEBUG_LINE - 1; 68 int pos = 0; 69 int ret, i; 70 71 memset(line, 0, sizeof(line)); 72 73 if (array) { 74 for (i = 0; i < array_size; i++) { 75 if (!array[i].nodeid) 76 continue; 77 78 ret = snprintf(line + pos, len - pos, " %d:%d", 79 array[i].slot, array[i].nodeid); 80 if (ret >= len - pos) 81 break; 82 pos += ret; 83 } 84 } else if (ro0) { 85 for (i = 0; i < num_slots; i++) { 86 ret = snprintf(line + pos, len - pos, " %d:%d", 87 ro0[i].ro_slot, ro0[i].ro_nodeid); 88 if (ret >= len - pos) 89 break; 90 pos += ret; 91 } 92 } 93 94 log_rinfo(ls, "generation %u slots %d%s", gen, num_slots, line); 95 } 96 97 int dlm_slots_copy_in(struct dlm_ls *ls) 98 { 99 struct dlm_member *memb; 100 struct dlm_rcom *rc = ls->ls_recover_buf; 101 struct rcom_config *rf = (struct rcom_config *)rc->rc_buf; 102 struct rcom_slot *ro0, *ro; 103 int our_nodeid = dlm_our_nodeid(); 104 int i, num_slots; 105 uint32_t gen; 106 107 if (!dlm_slots_version(&rc->rc_header)) 108 return -1; 109 110 gen = le32_to_cpu(rf->rf_generation); 111 if (gen <= ls->ls_generation) { 112 log_error(ls, "dlm_slots_copy_in gen %u old %u", 113 gen, ls->ls_generation); 114 } 115 ls->ls_generation = gen; 116 117 num_slots = le16_to_cpu(rf->rf_num_slots); 118 if (!num_slots) 119 return -1; 120 121 ro0 = (struct rcom_slot *)(rc->rc_buf + sizeof(struct rcom_config)); 122 123 log_slots(ls, gen, num_slots, ro0, NULL, 0); 124 125 list_for_each_entry(memb, &ls->ls_nodes, list) { 126 for (i = 0, ro = ro0; i < num_slots; i++, ro++) { 127 if (le32_to_cpu(ro->ro_nodeid) != memb->nodeid) 128 continue; 129 memb->slot = le16_to_cpu(ro->ro_slot); 130 memb->slot_prev = memb->slot; 131 break; 132 } 133 134 if (memb->nodeid == our_nodeid) { 135 if (ls->ls_slot && ls->ls_slot != memb->slot) { 136 log_error(ls, "dlm_slots_copy_in our slot " 137 "changed %d %d", ls->ls_slot, 138 memb->slot); 139 return -1; 140 } 141 142 if (!ls->ls_slot) 143 ls->ls_slot = memb->slot; 144 } 145 146 if (!memb->slot) { 147 log_error(ls, "dlm_slots_copy_in nodeid %d no slot", 148 memb->nodeid); 149 return -1; 150 } 151 } 152 153 return 0; 154 } 155 156 /* for any nodes that do not support slots, we will not have set memb->slot 157 in wait_status_all(), so memb->slot will remain -1, and we will not 158 assign slots or set ls_num_slots here */ 159 160 int dlm_slots_assign(struct dlm_ls *ls, int *num_slots, int *slots_size, 161 struct dlm_slot **slots_out, uint32_t *gen_out) 162 { 163 struct dlm_member *memb; 164 struct dlm_slot *array; 165 int our_nodeid = dlm_our_nodeid(); 166 int array_size, max_slots, i; 167 int need = 0; 168 int max = 0; 169 int num = 0; 170 uint32_t gen = 0; 171 172 /* our own memb struct will have slot -1 gen 0 */ 173 174 list_for_each_entry(memb, &ls->ls_nodes, list) { 175 if (memb->nodeid == our_nodeid) { 176 memb->slot = ls->ls_slot; 177 memb->generation = ls->ls_generation; 178 break; 179 } 180 } 181 182 list_for_each_entry(memb, &ls->ls_nodes, list) { 183 if (memb->generation > gen) 184 gen = memb->generation; 185 186 /* node doesn't support slots */ 187 188 if (memb->slot == -1) 189 return -1; 190 191 /* node needs a slot assigned */ 192 193 if (!memb->slot) 194 need++; 195 196 /* node has a slot assigned */ 197 198 num++; 199 200 if (!max || max < memb->slot) 201 max = memb->slot; 202 203 /* sanity check, once slot is assigned it shouldn't change */ 204 205 if (memb->slot_prev && memb->slot && memb->slot_prev != memb->slot) { 206 log_error(ls, "nodeid %d slot changed %d %d", 207 memb->nodeid, memb->slot_prev, memb->slot); 208 return -1; 209 } 210 memb->slot_prev = memb->slot; 211 } 212 213 array_size = max + need; 214 array = kcalloc(array_size, sizeof(*array), GFP_NOFS); 215 if (!array) 216 return -ENOMEM; 217 218 num = 0; 219 220 /* fill in slots (offsets) that are used */ 221 222 list_for_each_entry(memb, &ls->ls_nodes, list) { 223 if (!memb->slot) 224 continue; 225 226 if (memb->slot > array_size) { 227 log_error(ls, "invalid slot number %d", memb->slot); 228 kfree(array); 229 return -1; 230 } 231 232 array[memb->slot - 1].nodeid = memb->nodeid; 233 array[memb->slot - 1].slot = memb->slot; 234 num++; 235 } 236 237 /* assign new slots from unused offsets */ 238 239 list_for_each_entry(memb, &ls->ls_nodes, list) { 240 if (memb->slot) 241 continue; 242 243 for (i = 0; i < array_size; i++) { 244 if (array[i].nodeid) 245 continue; 246 247 memb->slot = i + 1; 248 memb->slot_prev = memb->slot; 249 array[i].nodeid = memb->nodeid; 250 array[i].slot = memb->slot; 251 num++; 252 253 if (!ls->ls_slot && memb->nodeid == our_nodeid) 254 ls->ls_slot = memb->slot; 255 break; 256 } 257 258 if (!memb->slot) { 259 log_error(ls, "no free slot found"); 260 kfree(array); 261 return -1; 262 } 263 } 264 265 gen++; 266 267 log_slots(ls, gen, num, NULL, array, array_size); 268 269 max_slots = (DLM_MAX_APP_BUFSIZE - sizeof(struct dlm_rcom) - 270 sizeof(struct rcom_config)) / sizeof(struct rcom_slot); 271 272 if (num > max_slots) { 273 log_error(ls, "num_slots %d exceeds max_slots %d", 274 num, max_slots); 275 kfree(array); 276 return -1; 277 } 278 279 *gen_out = gen; 280 *slots_out = array; 281 *slots_size = array_size; 282 *num_slots = num; 283 return 0; 284 } 285 286 static void add_ordered_member(struct dlm_ls *ls, struct dlm_member *new) 287 { 288 struct dlm_member *memb = NULL; 289 struct list_head *tmp; 290 struct list_head *newlist = &new->list; 291 struct list_head *head = &ls->ls_nodes; 292 293 list_for_each(tmp, head) { 294 memb = list_entry(tmp, struct dlm_member, list); 295 if (new->nodeid < memb->nodeid) 296 break; 297 } 298 299 if (!memb) 300 list_add_tail(newlist, head); 301 else { 302 /* FIXME: can use list macro here */ 303 newlist->prev = tmp->prev; 304 newlist->next = tmp; 305 tmp->prev->next = newlist; 306 tmp->prev = newlist; 307 } 308 } 309 310 static int add_remote_member(int nodeid) 311 { 312 int error; 313 314 if (nodeid == dlm_our_nodeid()) 315 return 0; 316 317 error = dlm_lowcomms_connect_node(nodeid); 318 if (error < 0) 319 return error; 320 321 dlm_midcomms_add_member(nodeid); 322 return 0; 323 } 324 325 static int dlm_add_member(struct dlm_ls *ls, struct dlm_config_node *node) 326 { 327 struct dlm_member *memb; 328 int error; 329 330 memb = kzalloc(sizeof(*memb), GFP_NOFS); 331 if (!memb) 332 return -ENOMEM; 333 334 memb->nodeid = node->nodeid; 335 memb->weight = node->weight; 336 memb->comm_seq = node->comm_seq; 337 338 error = add_remote_member(node->nodeid); 339 if (error < 0) { 340 kfree(memb); 341 return error; 342 } 343 344 add_ordered_member(ls, memb); 345 ls->ls_num_nodes++; 346 return 0; 347 } 348 349 static struct dlm_member *find_memb(struct list_head *head, int nodeid) 350 { 351 struct dlm_member *memb; 352 353 list_for_each_entry(memb, head, list) { 354 if (memb->nodeid == nodeid) 355 return memb; 356 } 357 return NULL; 358 } 359 360 int dlm_is_member(struct dlm_ls *ls, int nodeid) 361 { 362 if (find_memb(&ls->ls_nodes, nodeid)) 363 return 1; 364 return 0; 365 } 366 367 int dlm_is_removed(struct dlm_ls *ls, int nodeid) 368 { 369 if (find_memb(&ls->ls_nodes_gone, nodeid)) 370 return 1; 371 return 0; 372 } 373 374 static void clear_memb_list(struct list_head *head, 375 void (*after_del)(int nodeid)) 376 { 377 struct dlm_member *memb; 378 379 while (!list_empty(head)) { 380 memb = list_entry(head->next, struct dlm_member, list); 381 list_del(&memb->list); 382 if (after_del) 383 after_del(memb->nodeid); 384 kfree(memb); 385 } 386 } 387 388 static void remove_remote_member(int nodeid) 389 { 390 if (nodeid == dlm_our_nodeid()) 391 return; 392 393 dlm_midcomms_remove_member(nodeid); 394 } 395 396 static void clear_members_cb(int nodeid) 397 { 398 remove_remote_member(nodeid); 399 } 400 401 void dlm_clear_members(struct dlm_ls *ls) 402 { 403 clear_memb_list(&ls->ls_nodes, clear_members_cb); 404 ls->ls_num_nodes = 0; 405 } 406 407 void dlm_clear_members_gone(struct dlm_ls *ls) 408 { 409 clear_memb_list(&ls->ls_nodes_gone, NULL); 410 } 411 412 static void make_member_array(struct dlm_ls *ls) 413 { 414 struct dlm_member *memb; 415 int i, w, x = 0, total = 0, all_zero = 0, *array; 416 417 kfree(ls->ls_node_array); 418 ls->ls_node_array = NULL; 419 420 list_for_each_entry(memb, &ls->ls_nodes, list) { 421 if (memb->weight) 422 total += memb->weight; 423 } 424 425 /* all nodes revert to weight of 1 if all have weight 0 */ 426 427 if (!total) { 428 total = ls->ls_num_nodes; 429 all_zero = 1; 430 } 431 432 ls->ls_total_weight = total; 433 array = kmalloc_array(total, sizeof(*array), GFP_NOFS); 434 if (!array) 435 return; 436 437 list_for_each_entry(memb, &ls->ls_nodes, list) { 438 if (!all_zero && !memb->weight) 439 continue; 440 441 if (all_zero) 442 w = 1; 443 else 444 w = memb->weight; 445 446 DLM_ASSERT(x < total, printk("total %d x %d\n", total, x);); 447 448 for (i = 0; i < w; i++) 449 array[x++] = memb->nodeid; 450 } 451 452 ls->ls_node_array = array; 453 } 454 455 /* send a status request to all members just to establish comms connections */ 456 457 static int ping_members(struct dlm_ls *ls) 458 { 459 struct dlm_member *memb; 460 int error = 0; 461 462 list_for_each_entry(memb, &ls->ls_nodes, list) { 463 if (dlm_recovery_stopped(ls)) { 464 error = -EINTR; 465 break; 466 } 467 error = dlm_rcom_status(ls, memb->nodeid, 0); 468 if (error) 469 break; 470 } 471 if (error) 472 log_rinfo(ls, "ping_members aborted %d last nodeid %d", 473 error, ls->ls_recover_nodeid); 474 return error; 475 } 476 477 static void dlm_lsop_recover_prep(struct dlm_ls *ls) 478 { 479 if (!ls->ls_ops || !ls->ls_ops->recover_prep) 480 return; 481 ls->ls_ops->recover_prep(ls->ls_ops_arg); 482 } 483 484 static void dlm_lsop_recover_slot(struct dlm_ls *ls, struct dlm_member *memb) 485 { 486 struct dlm_slot slot; 487 uint32_t seq; 488 int error; 489 490 if (!ls->ls_ops || !ls->ls_ops->recover_slot) 491 return; 492 493 /* if there is no comms connection with this node 494 or the present comms connection is newer 495 than the one when this member was added, then 496 we consider the node to have failed (versus 497 being removed due to dlm_release_lockspace) */ 498 499 error = dlm_comm_seq(memb->nodeid, &seq); 500 501 if (!error && seq == memb->comm_seq) 502 return; 503 504 slot.nodeid = memb->nodeid; 505 slot.slot = memb->slot; 506 507 ls->ls_ops->recover_slot(ls->ls_ops_arg, &slot); 508 } 509 510 void dlm_lsop_recover_done(struct dlm_ls *ls) 511 { 512 struct dlm_member *memb; 513 struct dlm_slot *slots; 514 int i, num; 515 516 if (!ls->ls_ops || !ls->ls_ops->recover_done) 517 return; 518 519 num = ls->ls_num_nodes; 520 slots = kcalloc(num, sizeof(*slots), GFP_KERNEL); 521 if (!slots) 522 return; 523 524 i = 0; 525 list_for_each_entry(memb, &ls->ls_nodes, list) { 526 if (i == num) { 527 log_error(ls, "dlm_lsop_recover_done bad num %d", num); 528 goto out; 529 } 530 slots[i].nodeid = memb->nodeid; 531 slots[i].slot = memb->slot; 532 i++; 533 } 534 535 ls->ls_ops->recover_done(ls->ls_ops_arg, slots, num, 536 ls->ls_slot, ls->ls_generation); 537 out: 538 kfree(slots); 539 } 540 541 static struct dlm_config_node *find_config_node(struct dlm_recover *rv, 542 int nodeid) 543 { 544 int i; 545 546 for (i = 0; i < rv->nodes_count; i++) { 547 if (rv->nodes[i].nodeid == nodeid) 548 return &rv->nodes[i]; 549 } 550 return NULL; 551 } 552 553 int dlm_recover_members(struct dlm_ls *ls, struct dlm_recover *rv, int *neg_out) 554 { 555 struct dlm_member *memb, *safe; 556 struct dlm_config_node *node; 557 int i, error, neg = 0, low = -1; 558 559 /* previously removed members that we've not finished removing need to 560 * count as a negative change so the "neg" recovery steps will happen 561 * 562 * This functionality must report all member changes to lsops or 563 * midcomms layer and must never return before. 564 */ 565 566 list_for_each_entry(memb, &ls->ls_nodes_gone, list) { 567 log_rinfo(ls, "prev removed member %d", memb->nodeid); 568 neg++; 569 } 570 571 /* move departed members from ls_nodes to ls_nodes_gone */ 572 573 list_for_each_entry_safe(memb, safe, &ls->ls_nodes, list) { 574 node = find_config_node(rv, memb->nodeid); 575 if (node && !node->new) 576 continue; 577 578 if (!node) { 579 log_rinfo(ls, "remove member %d", memb->nodeid); 580 } else { 581 /* removed and re-added */ 582 log_rinfo(ls, "remove member %d comm_seq %u %u", 583 memb->nodeid, memb->comm_seq, node->comm_seq); 584 } 585 586 neg++; 587 list_move(&memb->list, &ls->ls_nodes_gone); 588 remove_remote_member(memb->nodeid); 589 ls->ls_num_nodes--; 590 dlm_lsop_recover_slot(ls, memb); 591 } 592 593 /* add new members to ls_nodes */ 594 595 for (i = 0; i < rv->nodes_count; i++) { 596 node = &rv->nodes[i]; 597 if (dlm_is_member(ls, node->nodeid)) 598 continue; 599 error = dlm_add_member(ls, node); 600 if (error) 601 return error; 602 603 log_rinfo(ls, "add member %d", node->nodeid); 604 } 605 606 list_for_each_entry(memb, &ls->ls_nodes, list) { 607 if (low == -1 || memb->nodeid < low) 608 low = memb->nodeid; 609 } 610 ls->ls_low_nodeid = low; 611 612 make_member_array(ls); 613 *neg_out = neg; 614 615 error = ping_members(ls); 616 log_rinfo(ls, "dlm_recover_members %d nodes", ls->ls_num_nodes); 617 return error; 618 } 619 620 /* Userspace guarantees that dlm_ls_stop() has completed on all nodes before 621 dlm_ls_start() is called on any of them to start the new recovery. */ 622 623 int dlm_ls_stop(struct dlm_ls *ls) 624 { 625 int new; 626 627 /* 628 * Prevent dlm_recv from being in the middle of something when we do 629 * the stop. This includes ensuring dlm_recv isn't processing a 630 * recovery message (rcom), while dlm_recoverd is aborting and 631 * resetting things from an in-progress recovery. i.e. we want 632 * dlm_recoverd to abort its recovery without worrying about dlm_recv 633 * processing an rcom at the same time. Stopping dlm_recv also makes 634 * it easy for dlm_receive_message() to check locking stopped and add a 635 * message to the requestqueue without races. 636 */ 637 638 down_write(&ls->ls_recv_active); 639 640 /* 641 * Abort any recovery that's in progress (see RECOVER_STOP, 642 * dlm_recovery_stopped()) and tell any other threads running in the 643 * dlm to quit any processing (see RUNNING, dlm_locking_stopped()). 644 */ 645 646 spin_lock(&ls->ls_recover_lock); 647 set_bit(LSFL_RECOVER_STOP, &ls->ls_flags); 648 new = test_and_clear_bit(LSFL_RUNNING, &ls->ls_flags); 649 ls->ls_recover_seq++; 650 spin_unlock(&ls->ls_recover_lock); 651 652 /* 653 * Let dlm_recv run again, now any normal messages will be saved on the 654 * requestqueue for later. 655 */ 656 657 up_write(&ls->ls_recv_active); 658 659 /* 660 * This in_recovery lock does two things: 661 * 1) Keeps this function from returning until all threads are out 662 * of locking routines and locking is truly stopped. 663 * 2) Keeps any new requests from being processed until it's unlocked 664 * when recovery is complete. 665 */ 666 667 if (new) { 668 set_bit(LSFL_RECOVER_DOWN, &ls->ls_flags); 669 wake_up_process(ls->ls_recoverd_task); 670 wait_event(ls->ls_recover_lock_wait, 671 test_bit(LSFL_RECOVER_LOCK, &ls->ls_flags)); 672 } 673 674 /* 675 * The recoverd suspend/resume makes sure that dlm_recoverd (if 676 * running) has noticed RECOVER_STOP above and quit processing the 677 * previous recovery. 678 */ 679 680 dlm_recoverd_suspend(ls); 681 682 spin_lock(&ls->ls_recover_lock); 683 kfree(ls->ls_slots); 684 ls->ls_slots = NULL; 685 ls->ls_num_slots = 0; 686 ls->ls_slots_size = 0; 687 ls->ls_recover_status = 0; 688 spin_unlock(&ls->ls_recover_lock); 689 690 dlm_recoverd_resume(ls); 691 692 if (!ls->ls_recover_begin) 693 ls->ls_recover_begin = jiffies; 694 695 /* call recover_prep ops only once and not multiple times 696 * for each possible dlm_ls_stop() when recovery is already 697 * stopped. 698 * 699 * If we successful was able to clear LSFL_RUNNING bit and 700 * it was set we know it is the first dlm_ls_stop() call. 701 */ 702 if (new) 703 dlm_lsop_recover_prep(ls); 704 705 return 0; 706 } 707 708 int dlm_ls_start(struct dlm_ls *ls) 709 { 710 struct dlm_recover *rv, *rv_old; 711 struct dlm_config_node *nodes = NULL; 712 int error, count; 713 714 rv = kzalloc(sizeof(*rv), GFP_NOFS); 715 if (!rv) 716 return -ENOMEM; 717 718 error = dlm_config_nodes(ls->ls_name, &nodes, &count); 719 if (error < 0) 720 goto fail_rv; 721 722 spin_lock(&ls->ls_recover_lock); 723 724 /* the lockspace needs to be stopped before it can be started */ 725 726 if (!dlm_locking_stopped(ls)) { 727 spin_unlock(&ls->ls_recover_lock); 728 log_error(ls, "start ignored: lockspace running"); 729 error = -EINVAL; 730 goto fail; 731 } 732 733 rv->nodes = nodes; 734 rv->nodes_count = count; 735 rv->seq = ++ls->ls_recover_seq; 736 rv_old = ls->ls_recover_args; 737 ls->ls_recover_args = rv; 738 spin_unlock(&ls->ls_recover_lock); 739 740 if (rv_old) { 741 log_error(ls, "unused recovery %llx %d", 742 (unsigned long long)rv_old->seq, rv_old->nodes_count); 743 kfree(rv_old->nodes); 744 kfree(rv_old); 745 } 746 747 set_bit(LSFL_RECOVER_WORK, &ls->ls_flags); 748 wake_up_process(ls->ls_recoverd_task); 749 return 0; 750 751 fail: 752 kfree(nodes); 753 fail_rv: 754 kfree(rv); 755 return error; 756 } 757 758