1 /* 2 * pseries CPU Hotplug infrastructure. 3 * 4 * Split out from arch/powerpc/platforms/pseries/setup.c 5 * arch/powerpc/kernel/rtas.c, and arch/powerpc/platforms/pseries/smp.c 6 * 7 * Peter Bergner, IBM March 2001. 8 * Copyright (C) 2001 IBM. 9 * Dave Engebretsen, Peter Bergner, and 10 * Mike Corrigan {engebret|bergner|mikec}@us.ibm.com 11 * Plus various changes from other IBM teams... 12 * 13 * Copyright (C) 2006 Michael Ellerman, IBM Corporation 14 * 15 * This program is free software; you can redistribute it and/or 16 * modify it under the terms of the GNU General Public License 17 * as published by the Free Software Foundation; either version 18 * 2 of the License, or (at your option) any later version. 19 */ 20 21 #define pr_fmt(fmt) "pseries-hotplug-cpu: " fmt 22 23 #include <linux/kernel.h> 24 #include <linux/interrupt.h> 25 #include <linux/delay.h> 26 #include <linux/sched.h> /* for idle_task_exit */ 27 #include <linux/sched/hotplug.h> 28 #include <linux/cpu.h> 29 #include <linux/of.h> 30 #include <linux/slab.h> 31 #include <asm/prom.h> 32 #include <asm/rtas.h> 33 #include <asm/firmware.h> 34 #include <asm/machdep.h> 35 #include <asm/vdso_datapage.h> 36 #include <asm/xics.h> 37 #include <asm/xive.h> 38 #include <asm/plpar_wrappers.h> 39 40 #include "pseries.h" 41 #include "offline_states.h" 42 43 /* This version can't take the spinlock, because it never returns */ 44 static int rtas_stop_self_token = RTAS_UNKNOWN_SERVICE; 45 46 static DEFINE_PER_CPU(enum cpu_state_vals, preferred_offline_state) = 47 CPU_STATE_OFFLINE; 48 static DEFINE_PER_CPU(enum cpu_state_vals, current_state) = CPU_STATE_OFFLINE; 49 50 static enum cpu_state_vals default_offline_state = CPU_STATE_OFFLINE; 51 52 static bool cede_offline_enabled __read_mostly = true; 53 54 /* 55 * Enable/disable cede_offline when available. 56 */ 57 static int __init setup_cede_offline(char *str) 58 { 59 return (kstrtobool(str, &cede_offline_enabled) == 0); 60 } 61 62 __setup("cede_offline=", setup_cede_offline); 63 64 enum cpu_state_vals get_cpu_current_state(int cpu) 65 { 66 return per_cpu(current_state, cpu); 67 } 68 69 void set_cpu_current_state(int cpu, enum cpu_state_vals state) 70 { 71 per_cpu(current_state, cpu) = state; 72 } 73 74 enum cpu_state_vals get_preferred_offline_state(int cpu) 75 { 76 return per_cpu(preferred_offline_state, cpu); 77 } 78 79 void set_preferred_offline_state(int cpu, enum cpu_state_vals state) 80 { 81 per_cpu(preferred_offline_state, cpu) = state; 82 } 83 84 void set_default_offline_state(int cpu) 85 { 86 per_cpu(preferred_offline_state, cpu) = default_offline_state; 87 } 88 89 static void rtas_stop_self(void) 90 { 91 static struct rtas_args args; 92 93 local_irq_disable(); 94 95 BUG_ON(rtas_stop_self_token == RTAS_UNKNOWN_SERVICE); 96 97 printk("cpu %u (hwid %u) Ready to die...\n", 98 smp_processor_id(), hard_smp_processor_id()); 99 100 rtas_call_unlocked(&args, rtas_stop_self_token, 0, 1, NULL); 101 102 panic("Alas, I survived.\n"); 103 } 104 105 static void pseries_mach_cpu_die(void) 106 { 107 unsigned int cpu = smp_processor_id(); 108 unsigned int hwcpu = hard_smp_processor_id(); 109 u8 cede_latency_hint = 0; 110 111 local_irq_disable(); 112 idle_task_exit(); 113 if (xive_enabled()) 114 xive_teardown_cpu(); 115 else 116 xics_teardown_cpu(); 117 118 if (get_preferred_offline_state(cpu) == CPU_STATE_INACTIVE) { 119 set_cpu_current_state(cpu, CPU_STATE_INACTIVE); 120 if (ppc_md.suspend_disable_cpu) 121 ppc_md.suspend_disable_cpu(); 122 123 cede_latency_hint = 2; 124 125 get_lppaca()->idle = 1; 126 if (!lppaca_shared_proc(get_lppaca())) 127 get_lppaca()->donate_dedicated_cpu = 1; 128 129 while (get_preferred_offline_state(cpu) == CPU_STATE_INACTIVE) { 130 while (!prep_irq_for_idle()) { 131 local_irq_enable(); 132 local_irq_disable(); 133 } 134 135 extended_cede_processor(cede_latency_hint); 136 } 137 138 local_irq_disable(); 139 140 if (!lppaca_shared_proc(get_lppaca())) 141 get_lppaca()->donate_dedicated_cpu = 0; 142 get_lppaca()->idle = 0; 143 144 if (get_preferred_offline_state(cpu) == CPU_STATE_ONLINE) { 145 unregister_slb_shadow(hwcpu); 146 147 hard_irq_disable(); 148 /* 149 * Call to start_secondary_resume() will not return. 150 * Kernel stack will be reset and start_secondary() 151 * will be called to continue the online operation. 152 */ 153 start_secondary_resume(); 154 } 155 } 156 157 /* Requested state is CPU_STATE_OFFLINE at this point */ 158 WARN_ON(get_preferred_offline_state(cpu) != CPU_STATE_OFFLINE); 159 160 set_cpu_current_state(cpu, CPU_STATE_OFFLINE); 161 unregister_slb_shadow(hwcpu); 162 rtas_stop_self(); 163 164 /* Should never get here... */ 165 BUG(); 166 for(;;); 167 } 168 169 static int pseries_cpu_disable(void) 170 { 171 int cpu = smp_processor_id(); 172 173 set_cpu_online(cpu, false); 174 vdso_data->processorCount--; 175 176 /*fix boot_cpuid here*/ 177 if (cpu == boot_cpuid) 178 boot_cpuid = cpumask_any(cpu_online_mask); 179 180 /* FIXME: abstract this to not be platform specific later on */ 181 if (xive_enabled()) 182 xive_smp_disable_cpu(); 183 else 184 xics_migrate_irqs_away(); 185 return 0; 186 } 187 188 /* 189 * pseries_cpu_die: Wait for the cpu to die. 190 * @cpu: logical processor id of the CPU whose death we're awaiting. 191 * 192 * This function is called from the context of the thread which is performing 193 * the cpu-offline. Here we wait for long enough to allow the cpu in question 194 * to self-destroy so that the cpu-offline thread can send the CPU_DEAD 195 * notifications. 196 * 197 * OTOH, pseries_mach_cpu_die() is called by the @cpu when it wants to 198 * self-destruct. 199 */ 200 static void pseries_cpu_die(unsigned int cpu) 201 { 202 int tries; 203 int cpu_status = 1; 204 unsigned int pcpu = get_hard_smp_processor_id(cpu); 205 206 if (get_preferred_offline_state(cpu) == CPU_STATE_INACTIVE) { 207 cpu_status = 1; 208 for (tries = 0; tries < 5000; tries++) { 209 if (get_cpu_current_state(cpu) == CPU_STATE_INACTIVE) { 210 cpu_status = 0; 211 break; 212 } 213 msleep(1); 214 } 215 } else if (get_preferred_offline_state(cpu) == CPU_STATE_OFFLINE) { 216 217 for (tries = 0; tries < 25; tries++) { 218 cpu_status = smp_query_cpu_stopped(pcpu); 219 if (cpu_status == QCSS_STOPPED || 220 cpu_status == QCSS_HARDWARE_ERROR) 221 break; 222 cpu_relax(); 223 } 224 } 225 226 if (cpu_status != 0) { 227 printk("Querying DEAD? cpu %i (%i) shows %i\n", 228 cpu, pcpu, cpu_status); 229 } 230 231 /* Isolation and deallocation are definitely done by 232 * drslot_chrp_cpu. If they were not they would be 233 * done here. Change isolate state to Isolate and 234 * change allocation-state to Unusable. 235 */ 236 paca[cpu].cpu_start = 0; 237 } 238 239 /* 240 * Update cpu_present_mask and paca(s) for a new cpu node. The wrinkle 241 * here is that a cpu device node may represent up to two logical cpus 242 * in the SMT case. We must honor the assumption in other code that 243 * the logical ids for sibling SMT threads x and y are adjacent, such 244 * that x^1 == y and y^1 == x. 245 */ 246 static int pseries_add_processor(struct device_node *np) 247 { 248 unsigned int cpu; 249 cpumask_var_t candidate_mask, tmp; 250 int err = -ENOSPC, len, nthreads, i; 251 const __be32 *intserv; 252 253 intserv = of_get_property(np, "ibm,ppc-interrupt-server#s", &len); 254 if (!intserv) 255 return 0; 256 257 zalloc_cpumask_var(&candidate_mask, GFP_KERNEL); 258 zalloc_cpumask_var(&tmp, GFP_KERNEL); 259 260 nthreads = len / sizeof(u32); 261 for (i = 0; i < nthreads; i++) 262 cpumask_set_cpu(i, tmp); 263 264 cpu_maps_update_begin(); 265 266 BUG_ON(!cpumask_subset(cpu_present_mask, cpu_possible_mask)); 267 268 /* Get a bitmap of unoccupied slots. */ 269 cpumask_xor(candidate_mask, cpu_possible_mask, cpu_present_mask); 270 if (cpumask_empty(candidate_mask)) { 271 /* If we get here, it most likely means that NR_CPUS is 272 * less than the partition's max processors setting. 273 */ 274 printk(KERN_ERR "Cannot add cpu %pOF; this system configuration" 275 " supports %d logical cpus.\n", np, 276 num_possible_cpus()); 277 goto out_unlock; 278 } 279 280 while (!cpumask_empty(tmp)) 281 if (cpumask_subset(tmp, candidate_mask)) 282 /* Found a range where we can insert the new cpu(s) */ 283 break; 284 else 285 cpumask_shift_left(tmp, tmp, nthreads); 286 287 if (cpumask_empty(tmp)) { 288 printk(KERN_ERR "Unable to find space in cpu_present_mask for" 289 " processor %s with %d thread(s)\n", np->name, 290 nthreads); 291 goto out_unlock; 292 } 293 294 for_each_cpu(cpu, tmp) { 295 BUG_ON(cpu_present(cpu)); 296 set_cpu_present(cpu, true); 297 set_hard_smp_processor_id(cpu, be32_to_cpu(*intserv++)); 298 } 299 err = 0; 300 out_unlock: 301 cpu_maps_update_done(); 302 free_cpumask_var(candidate_mask); 303 free_cpumask_var(tmp); 304 return err; 305 } 306 307 /* 308 * Update the present map for a cpu node which is going away, and set 309 * the hard id in the paca(s) to -1 to be consistent with boot time 310 * convention for non-present cpus. 311 */ 312 static void pseries_remove_processor(struct device_node *np) 313 { 314 unsigned int cpu; 315 int len, nthreads, i; 316 const __be32 *intserv; 317 u32 thread; 318 319 intserv = of_get_property(np, "ibm,ppc-interrupt-server#s", &len); 320 if (!intserv) 321 return; 322 323 nthreads = len / sizeof(u32); 324 325 cpu_maps_update_begin(); 326 for (i = 0; i < nthreads; i++) { 327 thread = be32_to_cpu(intserv[i]); 328 for_each_present_cpu(cpu) { 329 if (get_hard_smp_processor_id(cpu) != thread) 330 continue; 331 BUG_ON(cpu_online(cpu)); 332 set_cpu_present(cpu, false); 333 set_hard_smp_processor_id(cpu, -1); 334 break; 335 } 336 if (cpu >= nr_cpu_ids) 337 printk(KERN_WARNING "Could not find cpu to remove " 338 "with physical id 0x%x\n", thread); 339 } 340 cpu_maps_update_done(); 341 } 342 343 static int dlpar_online_cpu(struct device_node *dn) 344 { 345 int rc = 0; 346 unsigned int cpu; 347 int len, nthreads, i; 348 const __be32 *intserv; 349 u32 thread; 350 351 intserv = of_get_property(dn, "ibm,ppc-interrupt-server#s", &len); 352 if (!intserv) 353 return -EINVAL; 354 355 nthreads = len / sizeof(u32); 356 357 cpu_maps_update_begin(); 358 for (i = 0; i < nthreads; i++) { 359 thread = be32_to_cpu(intserv[i]); 360 for_each_present_cpu(cpu) { 361 if (get_hard_smp_processor_id(cpu) != thread) 362 continue; 363 BUG_ON(get_cpu_current_state(cpu) 364 != CPU_STATE_OFFLINE); 365 cpu_maps_update_done(); 366 rc = device_online(get_cpu_device(cpu)); 367 if (rc) 368 goto out; 369 cpu_maps_update_begin(); 370 371 break; 372 } 373 if (cpu == num_possible_cpus()) 374 printk(KERN_WARNING "Could not find cpu to online " 375 "with physical id 0x%x\n", thread); 376 } 377 cpu_maps_update_done(); 378 379 out: 380 return rc; 381 382 } 383 384 static bool dlpar_cpu_exists(struct device_node *parent, u32 drc_index) 385 { 386 struct device_node *child = NULL; 387 u32 my_drc_index; 388 bool found; 389 int rc; 390 391 /* Assume cpu doesn't exist */ 392 found = false; 393 394 for_each_child_of_node(parent, child) { 395 rc = of_property_read_u32(child, "ibm,my-drc-index", 396 &my_drc_index); 397 if (rc) 398 continue; 399 400 if (my_drc_index == drc_index) { 401 of_node_put(child); 402 found = true; 403 break; 404 } 405 } 406 407 return found; 408 } 409 410 static bool valid_cpu_drc_index(struct device_node *parent, u32 drc_index) 411 { 412 bool found = false; 413 int rc, index; 414 415 index = 0; 416 while (!found) { 417 u32 drc; 418 419 rc = of_property_read_u32_index(parent, "ibm,drc-indexes", 420 index++, &drc); 421 if (rc) 422 break; 423 424 if (drc == drc_index) 425 found = true; 426 } 427 428 return found; 429 } 430 431 static ssize_t dlpar_cpu_add(u32 drc_index) 432 { 433 struct device_node *dn, *parent; 434 int rc, saved_rc; 435 436 pr_debug("Attempting to add CPU, drc index: %x\n", drc_index); 437 438 parent = of_find_node_by_path("/cpus"); 439 if (!parent) { 440 pr_warn("Failed to find CPU root node \"/cpus\"\n"); 441 return -ENODEV; 442 } 443 444 if (dlpar_cpu_exists(parent, drc_index)) { 445 of_node_put(parent); 446 pr_warn("CPU with drc index %x already exists\n", drc_index); 447 return -EINVAL; 448 } 449 450 if (!valid_cpu_drc_index(parent, drc_index)) { 451 of_node_put(parent); 452 pr_warn("Cannot find CPU (drc index %x) to add.\n", drc_index); 453 return -EINVAL; 454 } 455 456 rc = dlpar_acquire_drc(drc_index); 457 if (rc) { 458 pr_warn("Failed to acquire DRC, rc: %d, drc index: %x\n", 459 rc, drc_index); 460 of_node_put(parent); 461 return -EINVAL; 462 } 463 464 dn = dlpar_configure_connector(cpu_to_be32(drc_index), parent); 465 if (!dn) { 466 pr_warn("Failed call to configure-connector, drc index: %x\n", 467 drc_index); 468 dlpar_release_drc(drc_index); 469 of_node_put(parent); 470 return -EINVAL; 471 } 472 473 rc = dlpar_attach_node(dn, parent); 474 475 /* Regardless we are done with parent now */ 476 of_node_put(parent); 477 478 if (rc) { 479 saved_rc = rc; 480 pr_warn("Failed to attach node %s, rc: %d, drc index: %x\n", 481 dn->name, rc, drc_index); 482 483 rc = dlpar_release_drc(drc_index); 484 if (!rc) 485 dlpar_free_cc_nodes(dn); 486 487 return saved_rc; 488 } 489 490 rc = dlpar_online_cpu(dn); 491 if (rc) { 492 saved_rc = rc; 493 pr_warn("Failed to online cpu %s, rc: %d, drc index: %x\n", 494 dn->name, rc, drc_index); 495 496 rc = dlpar_detach_node(dn); 497 if (!rc) 498 dlpar_release_drc(drc_index); 499 500 return saved_rc; 501 } 502 503 pr_debug("Successfully added CPU %s, drc index: %x\n", dn->name, 504 drc_index); 505 return rc; 506 } 507 508 static int dlpar_offline_cpu(struct device_node *dn) 509 { 510 int rc = 0; 511 unsigned int cpu; 512 int len, nthreads, i; 513 const __be32 *intserv; 514 u32 thread; 515 516 intserv = of_get_property(dn, "ibm,ppc-interrupt-server#s", &len); 517 if (!intserv) 518 return -EINVAL; 519 520 nthreads = len / sizeof(u32); 521 522 cpu_maps_update_begin(); 523 for (i = 0; i < nthreads; i++) { 524 thread = be32_to_cpu(intserv[i]); 525 for_each_present_cpu(cpu) { 526 if (get_hard_smp_processor_id(cpu) != thread) 527 continue; 528 529 if (get_cpu_current_state(cpu) == CPU_STATE_OFFLINE) 530 break; 531 532 if (get_cpu_current_state(cpu) == CPU_STATE_ONLINE) { 533 set_preferred_offline_state(cpu, 534 CPU_STATE_OFFLINE); 535 cpu_maps_update_done(); 536 rc = device_offline(get_cpu_device(cpu)); 537 if (rc) 538 goto out; 539 cpu_maps_update_begin(); 540 break; 541 542 } 543 544 /* 545 * The cpu is in CPU_STATE_INACTIVE. 546 * Upgrade it's state to CPU_STATE_OFFLINE. 547 */ 548 set_preferred_offline_state(cpu, CPU_STATE_OFFLINE); 549 BUG_ON(plpar_hcall_norets(H_PROD, thread) 550 != H_SUCCESS); 551 __cpu_die(cpu); 552 break; 553 } 554 if (cpu == num_possible_cpus()) 555 printk(KERN_WARNING "Could not find cpu to offline with physical id 0x%x\n", thread); 556 } 557 cpu_maps_update_done(); 558 559 out: 560 return rc; 561 562 } 563 564 static ssize_t dlpar_cpu_remove(struct device_node *dn, u32 drc_index) 565 { 566 int rc; 567 568 pr_debug("Attempting to remove CPU %s, drc index: %x\n", 569 dn->name, drc_index); 570 571 rc = dlpar_offline_cpu(dn); 572 if (rc) { 573 pr_warn("Failed to offline CPU %s, rc: %d\n", dn->name, rc); 574 return -EINVAL; 575 } 576 577 rc = dlpar_release_drc(drc_index); 578 if (rc) { 579 pr_warn("Failed to release drc (%x) for CPU %s, rc: %d\n", 580 drc_index, dn->name, rc); 581 dlpar_online_cpu(dn); 582 return rc; 583 } 584 585 rc = dlpar_detach_node(dn); 586 if (rc) { 587 int saved_rc = rc; 588 589 pr_warn("Failed to detach CPU %s, rc: %d", dn->name, rc); 590 591 rc = dlpar_acquire_drc(drc_index); 592 if (!rc) 593 dlpar_online_cpu(dn); 594 595 return saved_rc; 596 } 597 598 pr_debug("Successfully removed CPU, drc index: %x\n", drc_index); 599 return 0; 600 } 601 602 static struct device_node *cpu_drc_index_to_dn(u32 drc_index) 603 { 604 struct device_node *dn; 605 u32 my_index; 606 int rc; 607 608 for_each_node_by_type(dn, "cpu") { 609 rc = of_property_read_u32(dn, "ibm,my-drc-index", &my_index); 610 if (rc) 611 continue; 612 613 if (my_index == drc_index) 614 break; 615 } 616 617 return dn; 618 } 619 620 static int dlpar_cpu_remove_by_index(u32 drc_index) 621 { 622 struct device_node *dn; 623 int rc; 624 625 dn = cpu_drc_index_to_dn(drc_index); 626 if (!dn) { 627 pr_warn("Cannot find CPU (drc index %x) to remove\n", 628 drc_index); 629 return -ENODEV; 630 } 631 632 rc = dlpar_cpu_remove(dn, drc_index); 633 of_node_put(dn); 634 return rc; 635 } 636 637 static int find_dlpar_cpus_to_remove(u32 *cpu_drcs, int cpus_to_remove) 638 { 639 struct device_node *dn; 640 int cpus_found = 0; 641 int rc; 642 643 /* We want to find cpus_to_remove + 1 CPUs to ensure we do not 644 * remove the last CPU. 645 */ 646 for_each_node_by_type(dn, "cpu") { 647 cpus_found++; 648 649 if (cpus_found > cpus_to_remove) { 650 of_node_put(dn); 651 break; 652 } 653 654 /* Note that cpus_found is always 1 ahead of the index 655 * into the cpu_drcs array, so we use cpus_found - 1 656 */ 657 rc = of_property_read_u32(dn, "ibm,my-drc-index", 658 &cpu_drcs[cpus_found - 1]); 659 if (rc) { 660 pr_warn("Error occurred getting drc-index for %s\n", 661 dn->name); 662 of_node_put(dn); 663 return -1; 664 } 665 } 666 667 if (cpus_found < cpus_to_remove) { 668 pr_warn("Failed to find enough CPUs (%d of %d) to remove\n", 669 cpus_found, cpus_to_remove); 670 } else if (cpus_found == cpus_to_remove) { 671 pr_warn("Cannot remove all CPUs\n"); 672 } 673 674 return cpus_found; 675 } 676 677 static int dlpar_cpu_remove_by_count(u32 cpus_to_remove) 678 { 679 u32 *cpu_drcs; 680 int cpus_found; 681 int cpus_removed = 0; 682 int i, rc; 683 684 pr_debug("Attempting to hot-remove %d CPUs\n", cpus_to_remove); 685 686 cpu_drcs = kcalloc(cpus_to_remove, sizeof(*cpu_drcs), GFP_KERNEL); 687 if (!cpu_drcs) 688 return -EINVAL; 689 690 cpus_found = find_dlpar_cpus_to_remove(cpu_drcs, cpus_to_remove); 691 if (cpus_found <= cpus_to_remove) { 692 kfree(cpu_drcs); 693 return -EINVAL; 694 } 695 696 for (i = 0; i < cpus_to_remove; i++) { 697 rc = dlpar_cpu_remove_by_index(cpu_drcs[i]); 698 if (rc) 699 break; 700 701 cpus_removed++; 702 } 703 704 if (cpus_removed != cpus_to_remove) { 705 pr_warn("CPU hot-remove failed, adding back removed CPUs\n"); 706 707 for (i = 0; i < cpus_removed; i++) 708 dlpar_cpu_add(cpu_drcs[i]); 709 710 rc = -EINVAL; 711 } else { 712 rc = 0; 713 } 714 715 kfree(cpu_drcs); 716 return rc; 717 } 718 719 static int find_dlpar_cpus_to_add(u32 *cpu_drcs, u32 cpus_to_add) 720 { 721 struct device_node *parent; 722 int cpus_found = 0; 723 int index, rc; 724 725 parent = of_find_node_by_path("/cpus"); 726 if (!parent) { 727 pr_warn("Could not find CPU root node in device tree\n"); 728 kfree(cpu_drcs); 729 return -1; 730 } 731 732 /* Search the ibm,drc-indexes array for possible CPU drcs to 733 * add. Note that the format of the ibm,drc-indexes array is 734 * the number of entries in the array followed by the array 735 * of drc values so we start looking at index = 1. 736 */ 737 index = 1; 738 while (cpus_found < cpus_to_add) { 739 u32 drc; 740 741 rc = of_property_read_u32_index(parent, "ibm,drc-indexes", 742 index++, &drc); 743 if (rc) 744 break; 745 746 if (dlpar_cpu_exists(parent, drc)) 747 continue; 748 749 cpu_drcs[cpus_found++] = drc; 750 } 751 752 of_node_put(parent); 753 return cpus_found; 754 } 755 756 static int dlpar_cpu_add_by_count(u32 cpus_to_add) 757 { 758 u32 *cpu_drcs; 759 int cpus_added = 0; 760 int cpus_found; 761 int i, rc; 762 763 pr_debug("Attempting to hot-add %d CPUs\n", cpus_to_add); 764 765 cpu_drcs = kcalloc(cpus_to_add, sizeof(*cpu_drcs), GFP_KERNEL); 766 if (!cpu_drcs) 767 return -EINVAL; 768 769 cpus_found = find_dlpar_cpus_to_add(cpu_drcs, cpus_to_add); 770 if (cpus_found < cpus_to_add) { 771 pr_warn("Failed to find enough CPUs (%d of %d) to add\n", 772 cpus_found, cpus_to_add); 773 kfree(cpu_drcs); 774 return -EINVAL; 775 } 776 777 for (i = 0; i < cpus_to_add; i++) { 778 rc = dlpar_cpu_add(cpu_drcs[i]); 779 if (rc) 780 break; 781 782 cpus_added++; 783 } 784 785 if (cpus_added < cpus_to_add) { 786 pr_warn("CPU hot-add failed, removing any added CPUs\n"); 787 788 for (i = 0; i < cpus_added; i++) 789 dlpar_cpu_remove_by_index(cpu_drcs[i]); 790 791 rc = -EINVAL; 792 } else { 793 rc = 0; 794 } 795 796 kfree(cpu_drcs); 797 return rc; 798 } 799 800 int dlpar_cpu(struct pseries_hp_errorlog *hp_elog) 801 { 802 u32 count, drc_index; 803 int rc; 804 805 count = hp_elog->_drc_u.drc_count; 806 drc_index = hp_elog->_drc_u.drc_index; 807 808 lock_device_hotplug(); 809 810 switch (hp_elog->action) { 811 case PSERIES_HP_ELOG_ACTION_REMOVE: 812 if (hp_elog->id_type == PSERIES_HP_ELOG_ID_DRC_COUNT) 813 rc = dlpar_cpu_remove_by_count(count); 814 else if (hp_elog->id_type == PSERIES_HP_ELOG_ID_DRC_INDEX) 815 rc = dlpar_cpu_remove_by_index(drc_index); 816 else 817 rc = -EINVAL; 818 break; 819 case PSERIES_HP_ELOG_ACTION_ADD: 820 if (hp_elog->id_type == PSERIES_HP_ELOG_ID_DRC_COUNT) 821 rc = dlpar_cpu_add_by_count(count); 822 else if (hp_elog->id_type == PSERIES_HP_ELOG_ID_DRC_INDEX) 823 rc = dlpar_cpu_add(drc_index); 824 else 825 rc = -EINVAL; 826 break; 827 default: 828 pr_err("Invalid action (%d) specified\n", hp_elog->action); 829 rc = -EINVAL; 830 break; 831 } 832 833 unlock_device_hotplug(); 834 return rc; 835 } 836 837 #ifdef CONFIG_ARCH_CPU_PROBE_RELEASE 838 839 static ssize_t dlpar_cpu_probe(const char *buf, size_t count) 840 { 841 u32 drc_index; 842 int rc; 843 844 rc = kstrtou32(buf, 0, &drc_index); 845 if (rc) 846 return -EINVAL; 847 848 rc = dlpar_cpu_add(drc_index); 849 850 return rc ? rc : count; 851 } 852 853 static ssize_t dlpar_cpu_release(const char *buf, size_t count) 854 { 855 struct device_node *dn; 856 u32 drc_index; 857 int rc; 858 859 dn = of_find_node_by_path(buf); 860 if (!dn) 861 return -EINVAL; 862 863 rc = of_property_read_u32(dn, "ibm,my-drc-index", &drc_index); 864 if (rc) { 865 of_node_put(dn); 866 return -EINVAL; 867 } 868 869 rc = dlpar_cpu_remove(dn, drc_index); 870 of_node_put(dn); 871 872 return rc ? rc : count; 873 } 874 875 #endif /* CONFIG_ARCH_CPU_PROBE_RELEASE */ 876 877 static int pseries_smp_notifier(struct notifier_block *nb, 878 unsigned long action, void *data) 879 { 880 struct of_reconfig_data *rd = data; 881 int err = 0; 882 883 switch (action) { 884 case OF_RECONFIG_ATTACH_NODE: 885 err = pseries_add_processor(rd->dn); 886 break; 887 case OF_RECONFIG_DETACH_NODE: 888 pseries_remove_processor(rd->dn); 889 break; 890 } 891 return notifier_from_errno(err); 892 } 893 894 static struct notifier_block pseries_smp_nb = { 895 .notifier_call = pseries_smp_notifier, 896 }; 897 898 #define MAX_CEDE_LATENCY_LEVELS 4 899 #define CEDE_LATENCY_PARAM_LENGTH 10 900 #define CEDE_LATENCY_PARAM_MAX_LENGTH \ 901 (MAX_CEDE_LATENCY_LEVELS * CEDE_LATENCY_PARAM_LENGTH * sizeof(char)) 902 #define CEDE_LATENCY_TOKEN 45 903 904 static char cede_parameters[CEDE_LATENCY_PARAM_MAX_LENGTH]; 905 906 static int parse_cede_parameters(void) 907 { 908 memset(cede_parameters, 0, CEDE_LATENCY_PARAM_MAX_LENGTH); 909 return rtas_call(rtas_token("ibm,get-system-parameter"), 3, 1, 910 NULL, 911 CEDE_LATENCY_TOKEN, 912 __pa(cede_parameters), 913 CEDE_LATENCY_PARAM_MAX_LENGTH); 914 } 915 916 static int __init pseries_cpu_hotplug_init(void) 917 { 918 int cpu; 919 int qcss_tok; 920 921 #ifdef CONFIG_ARCH_CPU_PROBE_RELEASE 922 ppc_md.cpu_probe = dlpar_cpu_probe; 923 ppc_md.cpu_release = dlpar_cpu_release; 924 #endif /* CONFIG_ARCH_CPU_PROBE_RELEASE */ 925 926 rtas_stop_self_token = rtas_token("stop-self"); 927 qcss_tok = rtas_token("query-cpu-stopped-state"); 928 929 if (rtas_stop_self_token == RTAS_UNKNOWN_SERVICE || 930 qcss_tok == RTAS_UNKNOWN_SERVICE) { 931 printk(KERN_INFO "CPU Hotplug not supported by firmware " 932 "- disabling.\n"); 933 return 0; 934 } 935 936 ppc_md.cpu_die = pseries_mach_cpu_die; 937 smp_ops->cpu_disable = pseries_cpu_disable; 938 smp_ops->cpu_die = pseries_cpu_die; 939 940 /* Processors can be added/removed only on LPAR */ 941 if (firmware_has_feature(FW_FEATURE_LPAR)) { 942 of_reconfig_notifier_register(&pseries_smp_nb); 943 cpu_maps_update_begin(); 944 if (cede_offline_enabled && parse_cede_parameters() == 0) { 945 default_offline_state = CPU_STATE_INACTIVE; 946 for_each_online_cpu(cpu) 947 set_default_offline_state(cpu); 948 } 949 cpu_maps_update_done(); 950 } 951 952 return 0; 953 } 954 machine_arch_initcall(pseries, pseries_cpu_hotplug_init); 955