1 /* 2 * pseries CPU Hotplug infrastructure. 3 * 4 * Split out from arch/powerpc/platforms/pseries/setup.c 5 * arch/powerpc/kernel/rtas.c, and arch/powerpc/platforms/pseries/smp.c 6 * 7 * Peter Bergner, IBM March 2001. 8 * Copyright (C) 2001 IBM. 9 * Dave Engebretsen, Peter Bergner, and 10 * Mike Corrigan {engebret|bergner|mikec}@us.ibm.com 11 * Plus various changes from other IBM teams... 12 * 13 * Copyright (C) 2006 Michael Ellerman, IBM Corporation 14 * 15 * This program is free software; you can redistribute it and/or 16 * modify it under the terms of the GNU General Public License 17 * as published by the Free Software Foundation; either version 18 * 2 of the License, or (at your option) any later version. 19 */ 20 21 #define pr_fmt(fmt) "pseries-hotplug-cpu: " fmt 22 23 #include <linux/kernel.h> 24 #include <linux/interrupt.h> 25 #include <linux/delay.h> 26 #include <linux/sched.h> /* for idle_task_exit */ 27 #include <linux/cpu.h> 28 #include <linux/of.h> 29 #include <linux/slab.h> 30 #include <asm/prom.h> 31 #include <asm/rtas.h> 32 #include <asm/firmware.h> 33 #include <asm/machdep.h> 34 #include <asm/vdso_datapage.h> 35 #include <asm/xics.h> 36 #include <asm/plpar_wrappers.h> 37 38 #include "pseries.h" 39 #include "offline_states.h" 40 41 /* This version can't take the spinlock, because it never returns */ 42 static int rtas_stop_self_token = RTAS_UNKNOWN_SERVICE; 43 44 static DEFINE_PER_CPU(enum cpu_state_vals, preferred_offline_state) = 45 CPU_STATE_OFFLINE; 46 static DEFINE_PER_CPU(enum cpu_state_vals, current_state) = CPU_STATE_OFFLINE; 47 48 static enum cpu_state_vals default_offline_state = CPU_STATE_OFFLINE; 49 50 static int cede_offline_enabled __read_mostly = 1; 51 52 /* 53 * Enable/disable cede_offline when available. 54 */ 55 static int __init setup_cede_offline(char *str) 56 { 57 if (!strcmp(str, "off")) 58 cede_offline_enabled = 0; 59 else if (!strcmp(str, "on")) 60 cede_offline_enabled = 1; 61 else 62 return 0; 63 return 1; 64 } 65 66 __setup("cede_offline=", setup_cede_offline); 67 68 enum cpu_state_vals get_cpu_current_state(int cpu) 69 { 70 return per_cpu(current_state, cpu); 71 } 72 73 void set_cpu_current_state(int cpu, enum cpu_state_vals state) 74 { 75 per_cpu(current_state, cpu) = state; 76 } 77 78 enum cpu_state_vals get_preferred_offline_state(int cpu) 79 { 80 return per_cpu(preferred_offline_state, cpu); 81 } 82 83 void set_preferred_offline_state(int cpu, enum cpu_state_vals state) 84 { 85 per_cpu(preferred_offline_state, cpu) = state; 86 } 87 88 void set_default_offline_state(int cpu) 89 { 90 per_cpu(preferred_offline_state, cpu) = default_offline_state; 91 } 92 93 static void rtas_stop_self(void) 94 { 95 static struct rtas_args args; 96 97 local_irq_disable(); 98 99 BUG_ON(rtas_stop_self_token == RTAS_UNKNOWN_SERVICE); 100 101 printk("cpu %u (hwid %u) Ready to die...\n", 102 smp_processor_id(), hard_smp_processor_id()); 103 104 rtas_call_unlocked(&args, rtas_stop_self_token, 0, 1, NULL); 105 106 panic("Alas, I survived.\n"); 107 } 108 109 static void pseries_mach_cpu_die(void) 110 { 111 unsigned int cpu = smp_processor_id(); 112 unsigned int hwcpu = hard_smp_processor_id(); 113 u8 cede_latency_hint = 0; 114 115 local_irq_disable(); 116 idle_task_exit(); 117 xics_teardown_cpu(); 118 119 if (get_preferred_offline_state(cpu) == CPU_STATE_INACTIVE) { 120 set_cpu_current_state(cpu, CPU_STATE_INACTIVE); 121 if (ppc_md.suspend_disable_cpu) 122 ppc_md.suspend_disable_cpu(); 123 124 cede_latency_hint = 2; 125 126 get_lppaca()->idle = 1; 127 if (!lppaca_shared_proc(get_lppaca())) 128 get_lppaca()->donate_dedicated_cpu = 1; 129 130 while (get_preferred_offline_state(cpu) == CPU_STATE_INACTIVE) { 131 while (!prep_irq_for_idle()) { 132 local_irq_enable(); 133 local_irq_disable(); 134 } 135 136 extended_cede_processor(cede_latency_hint); 137 } 138 139 local_irq_disable(); 140 141 if (!lppaca_shared_proc(get_lppaca())) 142 get_lppaca()->donate_dedicated_cpu = 0; 143 get_lppaca()->idle = 0; 144 145 if (get_preferred_offline_state(cpu) == CPU_STATE_ONLINE) { 146 unregister_slb_shadow(hwcpu); 147 148 hard_irq_disable(); 149 /* 150 * Call to start_secondary_resume() will not return. 151 * Kernel stack will be reset and start_secondary() 152 * will be called to continue the online operation. 153 */ 154 start_secondary_resume(); 155 } 156 } 157 158 /* Requested state is CPU_STATE_OFFLINE at this point */ 159 WARN_ON(get_preferred_offline_state(cpu) != CPU_STATE_OFFLINE); 160 161 set_cpu_current_state(cpu, CPU_STATE_OFFLINE); 162 unregister_slb_shadow(hwcpu); 163 rtas_stop_self(); 164 165 /* Should never get here... */ 166 BUG(); 167 for(;;); 168 } 169 170 static int pseries_cpu_disable(void) 171 { 172 int cpu = smp_processor_id(); 173 174 set_cpu_online(cpu, false); 175 vdso_data->processorCount--; 176 177 /*fix boot_cpuid here*/ 178 if (cpu == boot_cpuid) 179 boot_cpuid = cpumask_any(cpu_online_mask); 180 181 /* FIXME: abstract this to not be platform specific later on */ 182 xics_migrate_irqs_away(); 183 return 0; 184 } 185 186 /* 187 * pseries_cpu_die: Wait for the cpu to die. 188 * @cpu: logical processor id of the CPU whose death we're awaiting. 189 * 190 * This function is called from the context of the thread which is performing 191 * the cpu-offline. Here we wait for long enough to allow the cpu in question 192 * to self-destroy so that the cpu-offline thread can send the CPU_DEAD 193 * notifications. 194 * 195 * OTOH, pseries_mach_cpu_die() is called by the @cpu when it wants to 196 * self-destruct. 197 */ 198 static void pseries_cpu_die(unsigned int cpu) 199 { 200 int tries; 201 int cpu_status = 1; 202 unsigned int pcpu = get_hard_smp_processor_id(cpu); 203 204 if (get_preferred_offline_state(cpu) == CPU_STATE_INACTIVE) { 205 cpu_status = 1; 206 for (tries = 0; tries < 5000; tries++) { 207 if (get_cpu_current_state(cpu) == CPU_STATE_INACTIVE) { 208 cpu_status = 0; 209 break; 210 } 211 msleep(1); 212 } 213 } else if (get_preferred_offline_state(cpu) == CPU_STATE_OFFLINE) { 214 215 for (tries = 0; tries < 25; tries++) { 216 cpu_status = smp_query_cpu_stopped(pcpu); 217 if (cpu_status == QCSS_STOPPED || 218 cpu_status == QCSS_HARDWARE_ERROR) 219 break; 220 cpu_relax(); 221 } 222 } 223 224 if (cpu_status != 0) { 225 printk("Querying DEAD? cpu %i (%i) shows %i\n", 226 cpu, pcpu, cpu_status); 227 } 228 229 /* Isolation and deallocation are definitely done by 230 * drslot_chrp_cpu. If they were not they would be 231 * done here. Change isolate state to Isolate and 232 * change allocation-state to Unusable. 233 */ 234 paca[cpu].cpu_start = 0; 235 } 236 237 /* 238 * Update cpu_present_mask and paca(s) for a new cpu node. The wrinkle 239 * here is that a cpu device node may represent up to two logical cpus 240 * in the SMT case. We must honor the assumption in other code that 241 * the logical ids for sibling SMT threads x and y are adjacent, such 242 * that x^1 == y and y^1 == x. 243 */ 244 static int pseries_add_processor(struct device_node *np) 245 { 246 unsigned int cpu; 247 cpumask_var_t candidate_mask, tmp; 248 int err = -ENOSPC, len, nthreads, i; 249 const __be32 *intserv; 250 251 intserv = of_get_property(np, "ibm,ppc-interrupt-server#s", &len); 252 if (!intserv) 253 return 0; 254 255 zalloc_cpumask_var(&candidate_mask, GFP_KERNEL); 256 zalloc_cpumask_var(&tmp, GFP_KERNEL); 257 258 nthreads = len / sizeof(u32); 259 for (i = 0; i < nthreads; i++) 260 cpumask_set_cpu(i, tmp); 261 262 cpu_maps_update_begin(); 263 264 BUG_ON(!cpumask_subset(cpu_present_mask, cpu_possible_mask)); 265 266 /* Get a bitmap of unoccupied slots. */ 267 cpumask_xor(candidate_mask, cpu_possible_mask, cpu_present_mask); 268 if (cpumask_empty(candidate_mask)) { 269 /* If we get here, it most likely means that NR_CPUS is 270 * less than the partition's max processors setting. 271 */ 272 printk(KERN_ERR "Cannot add cpu %s; this system configuration" 273 " supports %d logical cpus.\n", np->full_name, 274 num_possible_cpus()); 275 goto out_unlock; 276 } 277 278 while (!cpumask_empty(tmp)) 279 if (cpumask_subset(tmp, candidate_mask)) 280 /* Found a range where we can insert the new cpu(s) */ 281 break; 282 else 283 cpumask_shift_left(tmp, tmp, nthreads); 284 285 if (cpumask_empty(tmp)) { 286 printk(KERN_ERR "Unable to find space in cpu_present_mask for" 287 " processor %s with %d thread(s)\n", np->name, 288 nthreads); 289 goto out_unlock; 290 } 291 292 for_each_cpu(cpu, tmp) { 293 BUG_ON(cpu_present(cpu)); 294 set_cpu_present(cpu, true); 295 set_hard_smp_processor_id(cpu, be32_to_cpu(*intserv++)); 296 } 297 err = 0; 298 out_unlock: 299 cpu_maps_update_done(); 300 free_cpumask_var(candidate_mask); 301 free_cpumask_var(tmp); 302 return err; 303 } 304 305 /* 306 * Update the present map for a cpu node which is going away, and set 307 * the hard id in the paca(s) to -1 to be consistent with boot time 308 * convention for non-present cpus. 309 */ 310 static void pseries_remove_processor(struct device_node *np) 311 { 312 unsigned int cpu; 313 int len, nthreads, i; 314 const __be32 *intserv; 315 u32 thread; 316 317 intserv = of_get_property(np, "ibm,ppc-interrupt-server#s", &len); 318 if (!intserv) 319 return; 320 321 nthreads = len / sizeof(u32); 322 323 cpu_maps_update_begin(); 324 for (i = 0; i < nthreads; i++) { 325 thread = be32_to_cpu(intserv[i]); 326 for_each_present_cpu(cpu) { 327 if (get_hard_smp_processor_id(cpu) != thread) 328 continue; 329 BUG_ON(cpu_online(cpu)); 330 set_cpu_present(cpu, false); 331 set_hard_smp_processor_id(cpu, -1); 332 break; 333 } 334 if (cpu >= nr_cpu_ids) 335 printk(KERN_WARNING "Could not find cpu to remove " 336 "with physical id 0x%x\n", thread); 337 } 338 cpu_maps_update_done(); 339 } 340 341 static int dlpar_online_cpu(struct device_node *dn) 342 { 343 int rc = 0; 344 unsigned int cpu; 345 int len, nthreads, i; 346 const __be32 *intserv; 347 u32 thread; 348 349 intserv = of_get_property(dn, "ibm,ppc-interrupt-server#s", &len); 350 if (!intserv) 351 return -EINVAL; 352 353 nthreads = len / sizeof(u32); 354 355 cpu_maps_update_begin(); 356 for (i = 0; i < nthreads; i++) { 357 thread = be32_to_cpu(intserv[i]); 358 for_each_present_cpu(cpu) { 359 if (get_hard_smp_processor_id(cpu) != thread) 360 continue; 361 BUG_ON(get_cpu_current_state(cpu) 362 != CPU_STATE_OFFLINE); 363 cpu_maps_update_done(); 364 rc = device_online(get_cpu_device(cpu)); 365 if (rc) 366 goto out; 367 cpu_maps_update_begin(); 368 369 break; 370 } 371 if (cpu == num_possible_cpus()) 372 printk(KERN_WARNING "Could not find cpu to online " 373 "with physical id 0x%x\n", thread); 374 } 375 cpu_maps_update_done(); 376 377 out: 378 return rc; 379 380 } 381 382 static bool dlpar_cpu_exists(struct device_node *parent, u32 drc_index) 383 { 384 struct device_node *child = NULL; 385 u32 my_drc_index; 386 bool found; 387 int rc; 388 389 /* Assume cpu doesn't exist */ 390 found = false; 391 392 for_each_child_of_node(parent, child) { 393 rc = of_property_read_u32(child, "ibm,my-drc-index", 394 &my_drc_index); 395 if (rc) 396 continue; 397 398 if (my_drc_index == drc_index) { 399 of_node_put(child); 400 found = true; 401 break; 402 } 403 } 404 405 return found; 406 } 407 408 static bool valid_cpu_drc_index(struct device_node *parent, u32 drc_index) 409 { 410 bool found = false; 411 int rc, index; 412 413 index = 0; 414 while (!found) { 415 u32 drc; 416 417 rc = of_property_read_u32_index(parent, "ibm,drc-indexes", 418 index++, &drc); 419 if (rc) 420 break; 421 422 if (drc == drc_index) 423 found = true; 424 } 425 426 return found; 427 } 428 429 static ssize_t dlpar_cpu_add(u32 drc_index) 430 { 431 struct device_node *dn, *parent; 432 int rc, saved_rc; 433 434 pr_debug("Attempting to add CPU, drc index: %x\n", drc_index); 435 436 parent = of_find_node_by_path("/cpus"); 437 if (!parent) { 438 pr_warn("Failed to find CPU root node \"/cpus\"\n"); 439 return -ENODEV; 440 } 441 442 if (dlpar_cpu_exists(parent, drc_index)) { 443 of_node_put(parent); 444 pr_warn("CPU with drc index %x already exists\n", drc_index); 445 return -EINVAL; 446 } 447 448 if (!valid_cpu_drc_index(parent, drc_index)) { 449 of_node_put(parent); 450 pr_warn("Cannot find CPU (drc index %x) to add.\n", drc_index); 451 return -EINVAL; 452 } 453 454 rc = dlpar_acquire_drc(drc_index); 455 if (rc) { 456 pr_warn("Failed to acquire DRC, rc: %d, drc index: %x\n", 457 rc, drc_index); 458 of_node_put(parent); 459 return -EINVAL; 460 } 461 462 dn = dlpar_configure_connector(cpu_to_be32(drc_index), parent); 463 of_node_put(parent); 464 if (!dn) { 465 pr_warn("Failed call to configure-connector, drc index: %x\n", 466 drc_index); 467 dlpar_release_drc(drc_index); 468 return -EINVAL; 469 } 470 471 rc = dlpar_attach_node(dn); 472 if (rc) { 473 saved_rc = rc; 474 pr_warn("Failed to attach node %s, rc: %d, drc index: %x\n", 475 dn->name, rc, drc_index); 476 477 rc = dlpar_release_drc(drc_index); 478 if (!rc) 479 dlpar_free_cc_nodes(dn); 480 481 return saved_rc; 482 } 483 484 rc = dlpar_online_cpu(dn); 485 if (rc) { 486 saved_rc = rc; 487 pr_warn("Failed to online cpu %s, rc: %d, drc index: %x\n", 488 dn->name, rc, drc_index); 489 490 rc = dlpar_detach_node(dn); 491 if (!rc) 492 dlpar_release_drc(drc_index); 493 494 return saved_rc; 495 } 496 497 pr_debug("Successfully added CPU %s, drc index: %x\n", dn->name, 498 drc_index); 499 return rc; 500 } 501 502 static int dlpar_offline_cpu(struct device_node *dn) 503 { 504 int rc = 0; 505 unsigned int cpu; 506 int len, nthreads, i; 507 const __be32 *intserv; 508 u32 thread; 509 510 intserv = of_get_property(dn, "ibm,ppc-interrupt-server#s", &len); 511 if (!intserv) 512 return -EINVAL; 513 514 nthreads = len / sizeof(u32); 515 516 cpu_maps_update_begin(); 517 for (i = 0; i < nthreads; i++) { 518 thread = be32_to_cpu(intserv[i]); 519 for_each_present_cpu(cpu) { 520 if (get_hard_smp_processor_id(cpu) != thread) 521 continue; 522 523 if (get_cpu_current_state(cpu) == CPU_STATE_OFFLINE) 524 break; 525 526 if (get_cpu_current_state(cpu) == CPU_STATE_ONLINE) { 527 set_preferred_offline_state(cpu, 528 CPU_STATE_OFFLINE); 529 cpu_maps_update_done(); 530 rc = device_offline(get_cpu_device(cpu)); 531 if (rc) 532 goto out; 533 cpu_maps_update_begin(); 534 break; 535 536 } 537 538 /* 539 * The cpu is in CPU_STATE_INACTIVE. 540 * Upgrade it's state to CPU_STATE_OFFLINE. 541 */ 542 set_preferred_offline_state(cpu, CPU_STATE_OFFLINE); 543 BUG_ON(plpar_hcall_norets(H_PROD, thread) 544 != H_SUCCESS); 545 __cpu_die(cpu); 546 break; 547 } 548 if (cpu == num_possible_cpus()) 549 printk(KERN_WARNING "Could not find cpu to offline with physical id 0x%x\n", thread); 550 } 551 cpu_maps_update_done(); 552 553 out: 554 return rc; 555 556 } 557 558 static ssize_t dlpar_cpu_remove(struct device_node *dn, u32 drc_index) 559 { 560 int rc; 561 562 pr_debug("Attemping to remove CPU %s, drc index: %x\n", 563 dn->name, drc_index); 564 565 rc = dlpar_offline_cpu(dn); 566 if (rc) { 567 pr_warn("Failed to offline CPU %s, rc: %d\n", dn->name, rc); 568 return -EINVAL; 569 } 570 571 rc = dlpar_release_drc(drc_index); 572 if (rc) { 573 pr_warn("Failed to release drc (%x) for CPU %s, rc: %d\n", 574 drc_index, dn->name, rc); 575 dlpar_online_cpu(dn); 576 return rc; 577 } 578 579 rc = dlpar_detach_node(dn); 580 if (rc) { 581 int saved_rc = rc; 582 583 pr_warn("Failed to detach CPU %s, rc: %d", dn->name, rc); 584 585 rc = dlpar_acquire_drc(drc_index); 586 if (!rc) 587 dlpar_online_cpu(dn); 588 589 return saved_rc; 590 } 591 592 pr_debug("Successfully removed CPU, drc index: %x\n", drc_index); 593 return 0; 594 } 595 596 static struct device_node *cpu_drc_index_to_dn(u32 drc_index) 597 { 598 struct device_node *dn; 599 u32 my_index; 600 int rc; 601 602 for_each_node_by_type(dn, "cpu") { 603 rc = of_property_read_u32(dn, "ibm,my-drc-index", &my_index); 604 if (rc) 605 continue; 606 607 if (my_index == drc_index) 608 break; 609 } 610 611 return dn; 612 } 613 614 static int dlpar_cpu_remove_by_index(u32 drc_index) 615 { 616 struct device_node *dn; 617 int rc; 618 619 dn = cpu_drc_index_to_dn(drc_index); 620 if (!dn) { 621 pr_warn("Cannot find CPU (drc index %x) to remove\n", 622 drc_index); 623 return -ENODEV; 624 } 625 626 rc = dlpar_cpu_remove(dn, drc_index); 627 of_node_put(dn); 628 return rc; 629 } 630 631 static int find_dlpar_cpus_to_remove(u32 *cpu_drcs, int cpus_to_remove) 632 { 633 struct device_node *dn; 634 int cpus_found = 0; 635 int rc; 636 637 /* We want to find cpus_to_remove + 1 CPUs to ensure we do not 638 * remove the last CPU. 639 */ 640 for_each_node_by_type(dn, "cpu") { 641 cpus_found++; 642 643 if (cpus_found > cpus_to_remove) { 644 of_node_put(dn); 645 break; 646 } 647 648 /* Note that cpus_found is always 1 ahead of the index 649 * into the cpu_drcs array, so we use cpus_found - 1 650 */ 651 rc = of_property_read_u32(dn, "ibm,my-drc-index", 652 &cpu_drcs[cpus_found - 1]); 653 if (rc) { 654 pr_warn("Error occurred getting drc-index for %s\n", 655 dn->name); 656 of_node_put(dn); 657 return -1; 658 } 659 } 660 661 if (cpus_found < cpus_to_remove) { 662 pr_warn("Failed to find enough CPUs (%d of %d) to remove\n", 663 cpus_found, cpus_to_remove); 664 } else if (cpus_found == cpus_to_remove) { 665 pr_warn("Cannot remove all CPUs\n"); 666 } 667 668 return cpus_found; 669 } 670 671 static int dlpar_cpu_remove_by_count(u32 cpus_to_remove) 672 { 673 u32 *cpu_drcs; 674 int cpus_found; 675 int cpus_removed = 0; 676 int i, rc; 677 678 pr_debug("Attempting to hot-remove %d CPUs\n", cpus_to_remove); 679 680 cpu_drcs = kcalloc(cpus_to_remove, sizeof(*cpu_drcs), GFP_KERNEL); 681 if (!cpu_drcs) 682 return -EINVAL; 683 684 cpus_found = find_dlpar_cpus_to_remove(cpu_drcs, cpus_to_remove); 685 if (cpus_found <= cpus_to_remove) { 686 kfree(cpu_drcs); 687 return -EINVAL; 688 } 689 690 for (i = 0; i < cpus_to_remove; i++) { 691 rc = dlpar_cpu_remove_by_index(cpu_drcs[i]); 692 if (rc) 693 break; 694 695 cpus_removed++; 696 } 697 698 if (cpus_removed != cpus_to_remove) { 699 pr_warn("CPU hot-remove failed, adding back removed CPUs\n"); 700 701 for (i = 0; i < cpus_removed; i++) 702 dlpar_cpu_add(cpu_drcs[i]); 703 704 rc = -EINVAL; 705 } else { 706 rc = 0; 707 } 708 709 kfree(cpu_drcs); 710 return rc; 711 } 712 713 static int find_dlpar_cpus_to_add(u32 *cpu_drcs, u32 cpus_to_add) 714 { 715 struct device_node *parent; 716 int cpus_found = 0; 717 int index, rc; 718 719 parent = of_find_node_by_path("/cpus"); 720 if (!parent) { 721 pr_warn("Could not find CPU root node in device tree\n"); 722 kfree(cpu_drcs); 723 return -1; 724 } 725 726 /* Search the ibm,drc-indexes array for possible CPU drcs to 727 * add. Note that the format of the ibm,drc-indexes array is 728 * the number of entries in the array followed by the array 729 * of drc values so we start looking at index = 1. 730 */ 731 index = 1; 732 while (cpus_found < cpus_to_add) { 733 u32 drc; 734 735 rc = of_property_read_u32_index(parent, "ibm,drc-indexes", 736 index++, &drc); 737 if (rc) 738 break; 739 740 if (dlpar_cpu_exists(parent, drc)) 741 continue; 742 743 cpu_drcs[cpus_found++] = drc; 744 } 745 746 of_node_put(parent); 747 return cpus_found; 748 } 749 750 static int dlpar_cpu_add_by_count(u32 cpus_to_add) 751 { 752 u32 *cpu_drcs; 753 int cpus_added = 0; 754 int cpus_found; 755 int i, rc; 756 757 pr_debug("Attempting to hot-add %d CPUs\n", cpus_to_add); 758 759 cpu_drcs = kcalloc(cpus_to_add, sizeof(*cpu_drcs), GFP_KERNEL); 760 if (!cpu_drcs) 761 return -EINVAL; 762 763 cpus_found = find_dlpar_cpus_to_add(cpu_drcs, cpus_to_add); 764 if (cpus_found < cpus_to_add) { 765 pr_warn("Failed to find enough CPUs (%d of %d) to add\n", 766 cpus_found, cpus_to_add); 767 kfree(cpu_drcs); 768 return -EINVAL; 769 } 770 771 for (i = 0; i < cpus_to_add; i++) { 772 rc = dlpar_cpu_add(cpu_drcs[i]); 773 if (rc) 774 break; 775 776 cpus_added++; 777 } 778 779 if (cpus_added < cpus_to_add) { 780 pr_warn("CPU hot-add failed, removing any added CPUs\n"); 781 782 for (i = 0; i < cpus_added; i++) 783 dlpar_cpu_remove_by_index(cpu_drcs[i]); 784 785 rc = -EINVAL; 786 } else { 787 rc = 0; 788 } 789 790 kfree(cpu_drcs); 791 return rc; 792 } 793 794 int dlpar_cpu(struct pseries_hp_errorlog *hp_elog) 795 { 796 u32 count, drc_index; 797 int rc; 798 799 count = hp_elog->_drc_u.drc_count; 800 drc_index = hp_elog->_drc_u.drc_index; 801 802 lock_device_hotplug(); 803 804 switch (hp_elog->action) { 805 case PSERIES_HP_ELOG_ACTION_REMOVE: 806 if (hp_elog->id_type == PSERIES_HP_ELOG_ID_DRC_COUNT) 807 rc = dlpar_cpu_remove_by_count(count); 808 else if (hp_elog->id_type == PSERIES_HP_ELOG_ID_DRC_INDEX) 809 rc = dlpar_cpu_remove_by_index(drc_index); 810 else 811 rc = -EINVAL; 812 break; 813 case PSERIES_HP_ELOG_ACTION_ADD: 814 if (hp_elog->id_type == PSERIES_HP_ELOG_ID_DRC_COUNT) 815 rc = dlpar_cpu_add_by_count(count); 816 else if (hp_elog->id_type == PSERIES_HP_ELOG_ID_DRC_INDEX) 817 rc = dlpar_cpu_add(drc_index); 818 else 819 rc = -EINVAL; 820 break; 821 default: 822 pr_err("Invalid action (%d) specified\n", hp_elog->action); 823 rc = -EINVAL; 824 break; 825 } 826 827 unlock_device_hotplug(); 828 return rc; 829 } 830 831 #ifdef CONFIG_ARCH_CPU_PROBE_RELEASE 832 833 static ssize_t dlpar_cpu_probe(const char *buf, size_t count) 834 { 835 u32 drc_index; 836 int rc; 837 838 rc = kstrtou32(buf, 0, &drc_index); 839 if (rc) 840 return -EINVAL; 841 842 rc = dlpar_cpu_add(drc_index); 843 844 return rc ? rc : count; 845 } 846 847 static ssize_t dlpar_cpu_release(const char *buf, size_t count) 848 { 849 struct device_node *dn; 850 u32 drc_index; 851 int rc; 852 853 dn = of_find_node_by_path(buf); 854 if (!dn) 855 return -EINVAL; 856 857 rc = of_property_read_u32(dn, "ibm,my-drc-index", &drc_index); 858 if (rc) { 859 of_node_put(dn); 860 return -EINVAL; 861 } 862 863 rc = dlpar_cpu_remove(dn, drc_index); 864 of_node_put(dn); 865 866 return rc ? rc : count; 867 } 868 869 #endif /* CONFIG_ARCH_CPU_PROBE_RELEASE */ 870 871 static int pseries_smp_notifier(struct notifier_block *nb, 872 unsigned long action, void *data) 873 { 874 struct of_reconfig_data *rd = data; 875 int err = 0; 876 877 switch (action) { 878 case OF_RECONFIG_ATTACH_NODE: 879 err = pseries_add_processor(rd->dn); 880 break; 881 case OF_RECONFIG_DETACH_NODE: 882 pseries_remove_processor(rd->dn); 883 break; 884 } 885 return notifier_from_errno(err); 886 } 887 888 static struct notifier_block pseries_smp_nb = { 889 .notifier_call = pseries_smp_notifier, 890 }; 891 892 #define MAX_CEDE_LATENCY_LEVELS 4 893 #define CEDE_LATENCY_PARAM_LENGTH 10 894 #define CEDE_LATENCY_PARAM_MAX_LENGTH \ 895 (MAX_CEDE_LATENCY_LEVELS * CEDE_LATENCY_PARAM_LENGTH * sizeof(char)) 896 #define CEDE_LATENCY_TOKEN 45 897 898 static char cede_parameters[CEDE_LATENCY_PARAM_MAX_LENGTH]; 899 900 static int parse_cede_parameters(void) 901 { 902 memset(cede_parameters, 0, CEDE_LATENCY_PARAM_MAX_LENGTH); 903 return rtas_call(rtas_token("ibm,get-system-parameter"), 3, 1, 904 NULL, 905 CEDE_LATENCY_TOKEN, 906 __pa(cede_parameters), 907 CEDE_LATENCY_PARAM_MAX_LENGTH); 908 } 909 910 static int __init pseries_cpu_hotplug_init(void) 911 { 912 struct device_node *np; 913 const char *typep; 914 int cpu; 915 int qcss_tok; 916 917 #ifdef CONFIG_ARCH_CPU_PROBE_RELEASE 918 ppc_md.cpu_probe = dlpar_cpu_probe; 919 ppc_md.cpu_release = dlpar_cpu_release; 920 #endif /* CONFIG_ARCH_CPU_PROBE_RELEASE */ 921 922 for_each_node_by_name(np, "interrupt-controller") { 923 typep = of_get_property(np, "compatible", NULL); 924 if (strstr(typep, "open-pic")) { 925 of_node_put(np); 926 927 printk(KERN_INFO "CPU Hotplug not supported on " 928 "systems using MPIC\n"); 929 return 0; 930 } 931 } 932 933 rtas_stop_self_token = rtas_token("stop-self"); 934 qcss_tok = rtas_token("query-cpu-stopped-state"); 935 936 if (rtas_stop_self_token == RTAS_UNKNOWN_SERVICE || 937 qcss_tok == RTAS_UNKNOWN_SERVICE) { 938 printk(KERN_INFO "CPU Hotplug not supported by firmware " 939 "- disabling.\n"); 940 return 0; 941 } 942 943 ppc_md.cpu_die = pseries_mach_cpu_die; 944 smp_ops->cpu_disable = pseries_cpu_disable; 945 smp_ops->cpu_die = pseries_cpu_die; 946 947 /* Processors can be added/removed only on LPAR */ 948 if (firmware_has_feature(FW_FEATURE_LPAR)) { 949 of_reconfig_notifier_register(&pseries_smp_nb); 950 cpu_maps_update_begin(); 951 if (cede_offline_enabled && parse_cede_parameters() == 0) { 952 default_offline_state = CPU_STATE_INACTIVE; 953 for_each_online_cpu(cpu) 954 set_default_offline_state(cpu); 955 } 956 cpu_maps_update_done(); 957 } 958 959 return 0; 960 } 961 machine_arch_initcall(pseries, pseries_cpu_hotplug_init); 962