1 /* 2 * SMP support for ppc. 3 * 4 * Written by Cort Dougan (cort@cs.nmt.edu) borrowing a great 5 * deal of code from the sparc and intel versions. 6 * 7 * Copyright (C) 1999 Cort Dougan <cort@cs.nmt.edu> 8 * 9 * PowerPC-64 Support added by Dave Engebretsen, Peter Bergner, and 10 * Mike Corrigan {engebret|bergner|mikec}@us.ibm.com 11 * 12 * This program is free software; you can redistribute it and/or 13 * modify it under the terms of the GNU General Public License 14 * as published by the Free Software Foundation; either version 15 * 2 of the License, or (at your option) any later version. 16 */ 17 18 #undef DEBUG 19 20 #include <linux/kernel.h> 21 #include <linux/export.h> 22 #include <linux/sched/mm.h> 23 #include <linux/sched/topology.h> 24 #include <linux/smp.h> 25 #include <linux/interrupt.h> 26 #include <linux/delay.h> 27 #include <linux/init.h> 28 #include <linux/spinlock.h> 29 #include <linux/cache.h> 30 #include <linux/err.h> 31 #include <linux/device.h> 32 #include <linux/cpu.h> 33 #include <linux/notifier.h> 34 #include <linux/topology.h> 35 #include <linux/profile.h> 36 37 #include <asm/ptrace.h> 38 #include <linux/atomic.h> 39 #include <asm/irq.h> 40 #include <asm/hw_irq.h> 41 #include <asm/kvm_ppc.h> 42 #include <asm/page.h> 43 #include <asm/pgtable.h> 44 #include <asm/prom.h> 45 #include <asm/smp.h> 46 #include <asm/time.h> 47 #include <asm/machdep.h> 48 #include <asm/cputhreads.h> 49 #include <asm/cputable.h> 50 #include <asm/mpic.h> 51 #include <asm/vdso_datapage.h> 52 #ifdef CONFIG_PPC64 53 #include <asm/paca.h> 54 #endif 55 #include <asm/vdso.h> 56 #include <asm/debug.h> 57 #include <asm/kexec.h> 58 #include <asm/asm-prototypes.h> 59 #include <asm/cpu_has_feature.h> 60 61 #ifdef DEBUG 62 #include <asm/udbg.h> 63 #define DBG(fmt...) udbg_printf(fmt) 64 #else 65 #define DBG(fmt...) 66 #endif 67 68 #ifdef CONFIG_HOTPLUG_CPU 69 /* State of each CPU during hotplug phases */ 70 static DEFINE_PER_CPU(int, cpu_state) = { 0 }; 71 #endif 72 73 struct thread_info *secondary_ti; 74 75 DEFINE_PER_CPU(cpumask_var_t, cpu_sibling_map); 76 DEFINE_PER_CPU(cpumask_var_t, cpu_core_map); 77 78 EXPORT_PER_CPU_SYMBOL(cpu_sibling_map); 79 EXPORT_PER_CPU_SYMBOL(cpu_core_map); 80 81 /* SMP operations for this machine */ 82 struct smp_ops_t *smp_ops; 83 84 /* Can't be static due to PowerMac hackery */ 85 volatile unsigned int cpu_callin_map[NR_CPUS]; 86 87 int smt_enabled_at_boot = 1; 88 89 static void (*crash_ipi_function_ptr)(struct pt_regs *) = NULL; 90 91 /* 92 * Returns 1 if the specified cpu should be brought up during boot. 93 * Used to inhibit booting threads if they've been disabled or 94 * limited on the command line 95 */ 96 int smp_generic_cpu_bootable(unsigned int nr) 97 { 98 /* Special case - we inhibit secondary thread startup 99 * during boot if the user requests it. 100 */ 101 if (system_state == SYSTEM_BOOTING && cpu_has_feature(CPU_FTR_SMT)) { 102 if (!smt_enabled_at_boot && cpu_thread_in_core(nr) != 0) 103 return 0; 104 if (smt_enabled_at_boot 105 && cpu_thread_in_core(nr) >= smt_enabled_at_boot) 106 return 0; 107 } 108 109 return 1; 110 } 111 112 113 #ifdef CONFIG_PPC64 114 int smp_generic_kick_cpu(int nr) 115 { 116 BUG_ON(nr < 0 || nr >= NR_CPUS); 117 118 /* 119 * The processor is currently spinning, waiting for the 120 * cpu_start field to become non-zero After we set cpu_start, 121 * the processor will continue on to secondary_start 122 */ 123 if (!paca[nr].cpu_start) { 124 paca[nr].cpu_start = 1; 125 smp_mb(); 126 return 0; 127 } 128 129 #ifdef CONFIG_HOTPLUG_CPU 130 /* 131 * Ok it's not there, so it might be soft-unplugged, let's 132 * try to bring it back 133 */ 134 generic_set_cpu_up(nr); 135 smp_wmb(); 136 smp_send_reschedule(nr); 137 #endif /* CONFIG_HOTPLUG_CPU */ 138 139 return 0; 140 } 141 #endif /* CONFIG_PPC64 */ 142 143 static irqreturn_t call_function_action(int irq, void *data) 144 { 145 generic_smp_call_function_interrupt(); 146 return IRQ_HANDLED; 147 } 148 149 static irqreturn_t reschedule_action(int irq, void *data) 150 { 151 scheduler_ipi(); 152 return IRQ_HANDLED; 153 } 154 155 static irqreturn_t tick_broadcast_ipi_action(int irq, void *data) 156 { 157 tick_broadcast_ipi_handler(); 158 return IRQ_HANDLED; 159 } 160 161 static irqreturn_t debug_ipi_action(int irq, void *data) 162 { 163 if (crash_ipi_function_ptr) { 164 crash_ipi_function_ptr(get_irq_regs()); 165 return IRQ_HANDLED; 166 } 167 168 #ifdef CONFIG_DEBUGGER 169 debugger_ipi(get_irq_regs()); 170 #endif /* CONFIG_DEBUGGER */ 171 172 return IRQ_HANDLED; 173 } 174 175 static irq_handler_t smp_ipi_action[] = { 176 [PPC_MSG_CALL_FUNCTION] = call_function_action, 177 [PPC_MSG_RESCHEDULE] = reschedule_action, 178 [PPC_MSG_TICK_BROADCAST] = tick_broadcast_ipi_action, 179 [PPC_MSG_DEBUGGER_BREAK] = debug_ipi_action, 180 }; 181 182 const char *smp_ipi_name[] = { 183 [PPC_MSG_CALL_FUNCTION] = "ipi call function", 184 [PPC_MSG_RESCHEDULE] = "ipi reschedule", 185 [PPC_MSG_TICK_BROADCAST] = "ipi tick-broadcast", 186 [PPC_MSG_DEBUGGER_BREAK] = "ipi debugger", 187 }; 188 189 /* optional function to request ipi, for controllers with >= 4 ipis */ 190 int smp_request_message_ipi(int virq, int msg) 191 { 192 int err; 193 194 if (msg < 0 || msg > PPC_MSG_DEBUGGER_BREAK) { 195 return -EINVAL; 196 } 197 #if !defined(CONFIG_DEBUGGER) && !defined(CONFIG_KEXEC_CORE) 198 if (msg == PPC_MSG_DEBUGGER_BREAK) { 199 return 1; 200 } 201 #endif 202 err = request_irq(virq, smp_ipi_action[msg], 203 IRQF_PERCPU | IRQF_NO_THREAD | IRQF_NO_SUSPEND, 204 smp_ipi_name[msg], NULL); 205 WARN(err < 0, "unable to request_irq %d for %s (rc %d)\n", 206 virq, smp_ipi_name[msg], err); 207 208 return err; 209 } 210 211 #ifdef CONFIG_PPC_SMP_MUXED_IPI 212 struct cpu_messages { 213 long messages; /* current messages */ 214 unsigned long data; /* data for cause ipi */ 215 }; 216 static DEFINE_PER_CPU_SHARED_ALIGNED(struct cpu_messages, ipi_message); 217 218 void smp_muxed_ipi_set_data(int cpu, unsigned long data) 219 { 220 struct cpu_messages *info = &per_cpu(ipi_message, cpu); 221 222 info->data = data; 223 } 224 225 void smp_muxed_ipi_set_message(int cpu, int msg) 226 { 227 struct cpu_messages *info = &per_cpu(ipi_message, cpu); 228 char *message = (char *)&info->messages; 229 230 /* 231 * Order previous accesses before accesses in the IPI handler. 232 */ 233 smp_mb(); 234 message[msg] = 1; 235 } 236 237 void smp_muxed_ipi_message_pass(int cpu, int msg) 238 { 239 struct cpu_messages *info = &per_cpu(ipi_message, cpu); 240 241 smp_muxed_ipi_set_message(cpu, msg); 242 /* 243 * cause_ipi functions are required to include a full barrier 244 * before doing whatever causes the IPI. 245 */ 246 smp_ops->cause_ipi(cpu, info->data); 247 } 248 249 #ifdef __BIG_ENDIAN__ 250 #define IPI_MESSAGE(A) (1uL << ((BITS_PER_LONG - 8) - 8 * (A))) 251 #else 252 #define IPI_MESSAGE(A) (1uL << (8 * (A))) 253 #endif 254 255 irqreturn_t smp_ipi_demux(void) 256 { 257 struct cpu_messages *info = this_cpu_ptr(&ipi_message); 258 unsigned long all; 259 260 mb(); /* order any irq clear */ 261 262 do { 263 all = xchg(&info->messages, 0); 264 #if defined(CONFIG_KVM_XICS) && defined(CONFIG_KVM_BOOK3S_HV_POSSIBLE) 265 /* 266 * Must check for PPC_MSG_RM_HOST_ACTION messages 267 * before PPC_MSG_CALL_FUNCTION messages because when 268 * a VM is destroyed, we call kick_all_cpus_sync() 269 * to ensure that any pending PPC_MSG_RM_HOST_ACTION 270 * messages have completed before we free any VCPUs. 271 */ 272 if (all & IPI_MESSAGE(PPC_MSG_RM_HOST_ACTION)) 273 kvmppc_xics_ipi_action(); 274 #endif 275 if (all & IPI_MESSAGE(PPC_MSG_CALL_FUNCTION)) 276 generic_smp_call_function_interrupt(); 277 if (all & IPI_MESSAGE(PPC_MSG_RESCHEDULE)) 278 scheduler_ipi(); 279 if (all & IPI_MESSAGE(PPC_MSG_TICK_BROADCAST)) 280 tick_broadcast_ipi_handler(); 281 if (all & IPI_MESSAGE(PPC_MSG_DEBUGGER_BREAK)) 282 debug_ipi_action(0, NULL); 283 } while (info->messages); 284 285 return IRQ_HANDLED; 286 } 287 #endif /* CONFIG_PPC_SMP_MUXED_IPI */ 288 289 static inline void do_message_pass(int cpu, int msg) 290 { 291 if (smp_ops->message_pass) 292 smp_ops->message_pass(cpu, msg); 293 #ifdef CONFIG_PPC_SMP_MUXED_IPI 294 else 295 smp_muxed_ipi_message_pass(cpu, msg); 296 #endif 297 } 298 299 void smp_send_reschedule(int cpu) 300 { 301 if (likely(smp_ops)) 302 do_message_pass(cpu, PPC_MSG_RESCHEDULE); 303 } 304 EXPORT_SYMBOL_GPL(smp_send_reschedule); 305 306 void arch_send_call_function_single_ipi(int cpu) 307 { 308 do_message_pass(cpu, PPC_MSG_CALL_FUNCTION); 309 } 310 311 void arch_send_call_function_ipi_mask(const struct cpumask *mask) 312 { 313 unsigned int cpu; 314 315 for_each_cpu(cpu, mask) 316 do_message_pass(cpu, PPC_MSG_CALL_FUNCTION); 317 } 318 319 #ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST 320 void tick_broadcast(const struct cpumask *mask) 321 { 322 unsigned int cpu; 323 324 for_each_cpu(cpu, mask) 325 do_message_pass(cpu, PPC_MSG_TICK_BROADCAST); 326 } 327 #endif 328 329 #if defined(CONFIG_DEBUGGER) || defined(CONFIG_KEXEC_CORE) 330 void smp_send_debugger_break(void) 331 { 332 int cpu; 333 int me = raw_smp_processor_id(); 334 335 if (unlikely(!smp_ops)) 336 return; 337 338 for_each_online_cpu(cpu) 339 if (cpu != me) 340 do_message_pass(cpu, PPC_MSG_DEBUGGER_BREAK); 341 } 342 #endif 343 344 #ifdef CONFIG_KEXEC_CORE 345 void crash_send_ipi(void (*crash_ipi_callback)(struct pt_regs *)) 346 { 347 crash_ipi_function_ptr = crash_ipi_callback; 348 if (crash_ipi_callback) { 349 mb(); 350 smp_send_debugger_break(); 351 } 352 } 353 #endif 354 355 static void stop_this_cpu(void *dummy) 356 { 357 /* Remove this CPU */ 358 set_cpu_online(smp_processor_id(), false); 359 360 local_irq_disable(); 361 while (1) 362 ; 363 } 364 365 void smp_send_stop(void) 366 { 367 smp_call_function(stop_this_cpu, NULL, 0); 368 } 369 370 struct thread_info *current_set[NR_CPUS]; 371 372 static void smp_store_cpu_info(int id) 373 { 374 per_cpu(cpu_pvr, id) = mfspr(SPRN_PVR); 375 #ifdef CONFIG_PPC_FSL_BOOK3E 376 per_cpu(next_tlbcam_idx, id) 377 = (mfspr(SPRN_TLB1CFG) & TLBnCFG_N_ENTRY) - 1; 378 #endif 379 } 380 381 void __init smp_prepare_cpus(unsigned int max_cpus) 382 { 383 unsigned int cpu; 384 385 DBG("smp_prepare_cpus\n"); 386 387 /* 388 * setup_cpu may need to be called on the boot cpu. We havent 389 * spun any cpus up but lets be paranoid. 390 */ 391 BUG_ON(boot_cpuid != smp_processor_id()); 392 393 /* Fixup boot cpu */ 394 smp_store_cpu_info(boot_cpuid); 395 cpu_callin_map[boot_cpuid] = 1; 396 397 for_each_possible_cpu(cpu) { 398 zalloc_cpumask_var_node(&per_cpu(cpu_sibling_map, cpu), 399 GFP_KERNEL, cpu_to_node(cpu)); 400 zalloc_cpumask_var_node(&per_cpu(cpu_core_map, cpu), 401 GFP_KERNEL, cpu_to_node(cpu)); 402 /* 403 * numa_node_id() works after this. 404 */ 405 if (cpu_present(cpu)) { 406 set_cpu_numa_node(cpu, numa_cpu_lookup_table[cpu]); 407 set_cpu_numa_mem(cpu, 408 local_memory_node(numa_cpu_lookup_table[cpu])); 409 } 410 } 411 412 cpumask_set_cpu(boot_cpuid, cpu_sibling_mask(boot_cpuid)); 413 cpumask_set_cpu(boot_cpuid, cpu_core_mask(boot_cpuid)); 414 415 if (smp_ops && smp_ops->probe) 416 smp_ops->probe(); 417 } 418 419 void smp_prepare_boot_cpu(void) 420 { 421 BUG_ON(smp_processor_id() != boot_cpuid); 422 #ifdef CONFIG_PPC64 423 paca[boot_cpuid].__current = current; 424 #endif 425 set_numa_node(numa_cpu_lookup_table[boot_cpuid]); 426 current_set[boot_cpuid] = task_thread_info(current); 427 } 428 429 #ifdef CONFIG_HOTPLUG_CPU 430 431 int generic_cpu_disable(void) 432 { 433 unsigned int cpu = smp_processor_id(); 434 435 if (cpu == boot_cpuid) 436 return -EBUSY; 437 438 set_cpu_online(cpu, false); 439 #ifdef CONFIG_PPC64 440 vdso_data->processorCount--; 441 #endif 442 migrate_irqs(); 443 return 0; 444 } 445 446 void generic_cpu_die(unsigned int cpu) 447 { 448 int i; 449 450 for (i = 0; i < 100; i++) { 451 smp_rmb(); 452 if (is_cpu_dead(cpu)) 453 return; 454 msleep(100); 455 } 456 printk(KERN_ERR "CPU%d didn't die...\n", cpu); 457 } 458 459 void generic_set_cpu_dead(unsigned int cpu) 460 { 461 per_cpu(cpu_state, cpu) = CPU_DEAD; 462 } 463 464 /* 465 * The cpu_state should be set to CPU_UP_PREPARE in kick_cpu(), otherwise 466 * the cpu_state is always CPU_DEAD after calling generic_set_cpu_dead(), 467 * which makes the delay in generic_cpu_die() not happen. 468 */ 469 void generic_set_cpu_up(unsigned int cpu) 470 { 471 per_cpu(cpu_state, cpu) = CPU_UP_PREPARE; 472 } 473 474 int generic_check_cpu_restart(unsigned int cpu) 475 { 476 return per_cpu(cpu_state, cpu) == CPU_UP_PREPARE; 477 } 478 479 int is_cpu_dead(unsigned int cpu) 480 { 481 return per_cpu(cpu_state, cpu) == CPU_DEAD; 482 } 483 484 static bool secondaries_inhibited(void) 485 { 486 return kvm_hv_mode_active(); 487 } 488 489 #else /* HOTPLUG_CPU */ 490 491 #define secondaries_inhibited() 0 492 493 #endif 494 495 static void cpu_idle_thread_init(unsigned int cpu, struct task_struct *idle) 496 { 497 struct thread_info *ti = task_thread_info(idle); 498 499 #ifdef CONFIG_PPC64 500 paca[cpu].__current = idle; 501 paca[cpu].kstack = (unsigned long)ti + THREAD_SIZE - STACK_FRAME_OVERHEAD; 502 #endif 503 ti->cpu = cpu; 504 secondary_ti = current_set[cpu] = ti; 505 } 506 507 int __cpu_up(unsigned int cpu, struct task_struct *tidle) 508 { 509 int rc, c; 510 511 /* 512 * Don't allow secondary threads to come online if inhibited 513 */ 514 if (threads_per_core > 1 && secondaries_inhibited() && 515 cpu_thread_in_subcore(cpu)) 516 return -EBUSY; 517 518 if (smp_ops == NULL || 519 (smp_ops->cpu_bootable && !smp_ops->cpu_bootable(cpu))) 520 return -EINVAL; 521 522 cpu_idle_thread_init(cpu, tidle); 523 524 /* Make sure callin-map entry is 0 (can be leftover a CPU 525 * hotplug 526 */ 527 cpu_callin_map[cpu] = 0; 528 529 /* The information for processor bringup must 530 * be written out to main store before we release 531 * the processor. 532 */ 533 smp_mb(); 534 535 /* wake up cpus */ 536 DBG("smp: kicking cpu %d\n", cpu); 537 rc = smp_ops->kick_cpu(cpu); 538 if (rc) { 539 pr_err("smp: failed starting cpu %d (rc %d)\n", cpu, rc); 540 return rc; 541 } 542 543 /* 544 * wait to see if the cpu made a callin (is actually up). 545 * use this value that I found through experimentation. 546 * -- Cort 547 */ 548 if (system_state < SYSTEM_RUNNING) 549 for (c = 50000; c && !cpu_callin_map[cpu]; c--) 550 udelay(100); 551 #ifdef CONFIG_HOTPLUG_CPU 552 else 553 /* 554 * CPUs can take much longer to come up in the 555 * hotplug case. Wait five seconds. 556 */ 557 for (c = 5000; c && !cpu_callin_map[cpu]; c--) 558 msleep(1); 559 #endif 560 561 if (!cpu_callin_map[cpu]) { 562 printk(KERN_ERR "Processor %u is stuck.\n", cpu); 563 return -ENOENT; 564 } 565 566 DBG("Processor %u found.\n", cpu); 567 568 if (smp_ops->give_timebase) 569 smp_ops->give_timebase(); 570 571 /* Wait until cpu puts itself in the online & active maps */ 572 while (!cpu_online(cpu)) 573 cpu_relax(); 574 575 return 0; 576 } 577 578 /* Return the value of the reg property corresponding to the given 579 * logical cpu. 580 */ 581 int cpu_to_core_id(int cpu) 582 { 583 struct device_node *np; 584 const __be32 *reg; 585 int id = -1; 586 587 np = of_get_cpu_node(cpu, NULL); 588 if (!np) 589 goto out; 590 591 reg = of_get_property(np, "reg", NULL); 592 if (!reg) 593 goto out; 594 595 id = be32_to_cpup(reg); 596 out: 597 of_node_put(np); 598 return id; 599 } 600 EXPORT_SYMBOL_GPL(cpu_to_core_id); 601 602 /* Helper routines for cpu to core mapping */ 603 int cpu_core_index_of_thread(int cpu) 604 { 605 return cpu >> threads_shift; 606 } 607 EXPORT_SYMBOL_GPL(cpu_core_index_of_thread); 608 609 int cpu_first_thread_of_core(int core) 610 { 611 return core << threads_shift; 612 } 613 EXPORT_SYMBOL_GPL(cpu_first_thread_of_core); 614 615 static void traverse_siblings_chip_id(int cpu, bool add, int chipid) 616 { 617 const struct cpumask *mask; 618 struct device_node *np; 619 int i, plen; 620 const __be32 *prop; 621 622 mask = add ? cpu_online_mask : cpu_present_mask; 623 for_each_cpu(i, mask) { 624 np = of_get_cpu_node(i, NULL); 625 if (!np) 626 continue; 627 prop = of_get_property(np, "ibm,chip-id", &plen); 628 if (prop && plen == sizeof(int) && 629 of_read_number(prop, 1) == chipid) { 630 if (add) { 631 cpumask_set_cpu(cpu, cpu_core_mask(i)); 632 cpumask_set_cpu(i, cpu_core_mask(cpu)); 633 } else { 634 cpumask_clear_cpu(cpu, cpu_core_mask(i)); 635 cpumask_clear_cpu(i, cpu_core_mask(cpu)); 636 } 637 } 638 of_node_put(np); 639 } 640 } 641 642 /* Must be called when no change can occur to cpu_present_mask, 643 * i.e. during cpu online or offline. 644 */ 645 static struct device_node *cpu_to_l2cache(int cpu) 646 { 647 struct device_node *np; 648 struct device_node *cache; 649 650 if (!cpu_present(cpu)) 651 return NULL; 652 653 np = of_get_cpu_node(cpu, NULL); 654 if (np == NULL) 655 return NULL; 656 657 cache = of_find_next_cache_node(np); 658 659 of_node_put(np); 660 661 return cache; 662 } 663 664 static void traverse_core_siblings(int cpu, bool add) 665 { 666 struct device_node *l2_cache, *np; 667 const struct cpumask *mask; 668 int i, chip, plen; 669 const __be32 *prop; 670 671 /* First see if we have ibm,chip-id properties in cpu nodes */ 672 np = of_get_cpu_node(cpu, NULL); 673 if (np) { 674 chip = -1; 675 prop = of_get_property(np, "ibm,chip-id", &plen); 676 if (prop && plen == sizeof(int)) 677 chip = of_read_number(prop, 1); 678 of_node_put(np); 679 if (chip >= 0) { 680 traverse_siblings_chip_id(cpu, add, chip); 681 return; 682 } 683 } 684 685 l2_cache = cpu_to_l2cache(cpu); 686 mask = add ? cpu_online_mask : cpu_present_mask; 687 for_each_cpu(i, mask) { 688 np = cpu_to_l2cache(i); 689 if (!np) 690 continue; 691 if (np == l2_cache) { 692 if (add) { 693 cpumask_set_cpu(cpu, cpu_core_mask(i)); 694 cpumask_set_cpu(i, cpu_core_mask(cpu)); 695 } else { 696 cpumask_clear_cpu(cpu, cpu_core_mask(i)); 697 cpumask_clear_cpu(i, cpu_core_mask(cpu)); 698 } 699 } 700 of_node_put(np); 701 } 702 of_node_put(l2_cache); 703 } 704 705 /* Activate a secondary processor. */ 706 void start_secondary(void *unused) 707 { 708 unsigned int cpu = smp_processor_id(); 709 int i, base; 710 711 mmgrab(&init_mm); 712 current->active_mm = &init_mm; 713 714 smp_store_cpu_info(cpu); 715 set_dec(tb_ticks_per_jiffy); 716 preempt_disable(); 717 cpu_callin_map[cpu] = 1; 718 719 if (smp_ops->setup_cpu) 720 smp_ops->setup_cpu(cpu); 721 if (smp_ops->take_timebase) 722 smp_ops->take_timebase(); 723 724 secondary_cpu_time_init(); 725 726 #ifdef CONFIG_PPC64 727 if (system_state == SYSTEM_RUNNING) 728 vdso_data->processorCount++; 729 730 vdso_getcpu_init(); 731 #endif 732 /* Update sibling maps */ 733 base = cpu_first_thread_sibling(cpu); 734 for (i = 0; i < threads_per_core; i++) { 735 if (cpu_is_offline(base + i) && (cpu != base + i)) 736 continue; 737 cpumask_set_cpu(cpu, cpu_sibling_mask(base + i)); 738 cpumask_set_cpu(base + i, cpu_sibling_mask(cpu)); 739 740 /* cpu_core_map should be a superset of 741 * cpu_sibling_map even if we don't have cache 742 * information, so update the former here, too. 743 */ 744 cpumask_set_cpu(cpu, cpu_core_mask(base + i)); 745 cpumask_set_cpu(base + i, cpu_core_mask(cpu)); 746 } 747 traverse_core_siblings(cpu, true); 748 749 set_numa_node(numa_cpu_lookup_table[cpu]); 750 set_numa_mem(local_memory_node(numa_cpu_lookup_table[cpu])); 751 752 smp_wmb(); 753 notify_cpu_starting(cpu); 754 set_cpu_online(cpu, true); 755 756 local_irq_enable(); 757 758 cpu_startup_entry(CPUHP_AP_ONLINE_IDLE); 759 760 BUG(); 761 } 762 763 int setup_profiling_timer(unsigned int multiplier) 764 { 765 return 0; 766 } 767 768 #ifdef CONFIG_SCHED_SMT 769 /* cpumask of CPUs with asymetric SMT dependancy */ 770 static int powerpc_smt_flags(void) 771 { 772 int flags = SD_SHARE_CPUCAPACITY | SD_SHARE_PKG_RESOURCES; 773 774 if (cpu_has_feature(CPU_FTR_ASYM_SMT)) { 775 printk_once(KERN_INFO "Enabling Asymmetric SMT scheduling\n"); 776 flags |= SD_ASYM_PACKING; 777 } 778 return flags; 779 } 780 #endif 781 782 static struct sched_domain_topology_level powerpc_topology[] = { 783 #ifdef CONFIG_SCHED_SMT 784 { cpu_smt_mask, powerpc_smt_flags, SD_INIT_NAME(SMT) }, 785 #endif 786 { cpu_cpu_mask, SD_INIT_NAME(DIE) }, 787 { NULL, }, 788 }; 789 790 void __init smp_cpus_done(unsigned int max_cpus) 791 { 792 cpumask_var_t old_mask; 793 794 /* We want the setup_cpu() here to be called from CPU 0, but our 795 * init thread may have been "borrowed" by another CPU in the meantime 796 * se we pin us down to CPU 0 for a short while 797 */ 798 alloc_cpumask_var(&old_mask, GFP_NOWAIT); 799 cpumask_copy(old_mask, ¤t->cpus_allowed); 800 set_cpus_allowed_ptr(current, cpumask_of(boot_cpuid)); 801 802 if (smp_ops && smp_ops->setup_cpu) 803 smp_ops->setup_cpu(boot_cpuid); 804 805 set_cpus_allowed_ptr(current, old_mask); 806 807 free_cpumask_var(old_mask); 808 809 if (smp_ops && smp_ops->bringup_done) 810 smp_ops->bringup_done(); 811 812 dump_numa_cpu_topology(); 813 814 set_sched_topology(powerpc_topology); 815 816 } 817 818 #ifdef CONFIG_HOTPLUG_CPU 819 int __cpu_disable(void) 820 { 821 int cpu = smp_processor_id(); 822 int base, i; 823 int err; 824 825 if (!smp_ops->cpu_disable) 826 return -ENOSYS; 827 828 err = smp_ops->cpu_disable(); 829 if (err) 830 return err; 831 832 /* Update sibling maps */ 833 base = cpu_first_thread_sibling(cpu); 834 for (i = 0; i < threads_per_core && base + i < nr_cpu_ids; i++) { 835 cpumask_clear_cpu(cpu, cpu_sibling_mask(base + i)); 836 cpumask_clear_cpu(base + i, cpu_sibling_mask(cpu)); 837 cpumask_clear_cpu(cpu, cpu_core_mask(base + i)); 838 cpumask_clear_cpu(base + i, cpu_core_mask(cpu)); 839 } 840 traverse_core_siblings(cpu, false); 841 842 return 0; 843 } 844 845 void __cpu_die(unsigned int cpu) 846 { 847 if (smp_ops->cpu_die) 848 smp_ops->cpu_die(cpu); 849 } 850 851 void cpu_die(void) 852 { 853 if (ppc_md.cpu_die) 854 ppc_md.cpu_die(); 855 856 /* If we return, we re-enter start_secondary */ 857 start_secondary_resume(); 858 } 859 860 #endif 861