1 /* 2 * SMP support for ppc. 3 * 4 * Written by Cort Dougan (cort@cs.nmt.edu) borrowing a great 5 * deal of code from the sparc and intel versions. 6 * 7 * Copyright (C) 1999 Cort Dougan <cort@cs.nmt.edu> 8 * 9 * PowerPC-64 Support added by Dave Engebretsen, Peter Bergner, and 10 * Mike Corrigan {engebret|bergner|mikec}@us.ibm.com 11 * 12 * This program is free software; you can redistribute it and/or 13 * modify it under the terms of the GNU General Public License 14 * as published by the Free Software Foundation; either version 15 * 2 of the License, or (at your option) any later version. 16 */ 17 18 #undef DEBUG 19 20 #include <linux/kernel.h> 21 #include <linux/export.h> 22 #include <linux/sched/mm.h> 23 #include <linux/sched/topology.h> 24 #include <linux/smp.h> 25 #include <linux/interrupt.h> 26 #include <linux/delay.h> 27 #include <linux/init.h> 28 #include <linux/spinlock.h> 29 #include <linux/cache.h> 30 #include <linux/err.h> 31 #include <linux/device.h> 32 #include <linux/cpu.h> 33 #include <linux/notifier.h> 34 #include <linux/topology.h> 35 #include <linux/profile.h> 36 #include <linux/processor.h> 37 38 #include <asm/ptrace.h> 39 #include <linux/atomic.h> 40 #include <asm/irq.h> 41 #include <asm/hw_irq.h> 42 #include <asm/kvm_ppc.h> 43 #include <asm/dbell.h> 44 #include <asm/page.h> 45 #include <asm/pgtable.h> 46 #include <asm/prom.h> 47 #include <asm/smp.h> 48 #include <asm/time.h> 49 #include <asm/machdep.h> 50 #include <asm/cputhreads.h> 51 #include <asm/cputable.h> 52 #include <asm/mpic.h> 53 #include <asm/vdso_datapage.h> 54 #ifdef CONFIG_PPC64 55 #include <asm/paca.h> 56 #endif 57 #include <asm/vdso.h> 58 #include <asm/debug.h> 59 #include <asm/kexec.h> 60 #include <asm/asm-prototypes.h> 61 #include <asm/cpu_has_feature.h> 62 #include <asm/ftrace.h> 63 64 #ifdef DEBUG 65 #include <asm/udbg.h> 66 #define DBG(fmt...) udbg_printf(fmt) 67 #else 68 #define DBG(fmt...) 69 #endif 70 71 #ifdef CONFIG_HOTPLUG_CPU 72 /* State of each CPU during hotplug phases */ 73 static DEFINE_PER_CPU(int, cpu_state) = { 0 }; 74 #endif 75 76 struct thread_info *secondary_ti; 77 78 DEFINE_PER_CPU(cpumask_var_t, cpu_sibling_map); 79 DEFINE_PER_CPU(cpumask_var_t, cpu_l2_cache_map); 80 DEFINE_PER_CPU(cpumask_var_t, cpu_core_map); 81 82 EXPORT_PER_CPU_SYMBOL(cpu_sibling_map); 83 EXPORT_PER_CPU_SYMBOL(cpu_l2_cache_map); 84 EXPORT_PER_CPU_SYMBOL(cpu_core_map); 85 86 /* SMP operations for this machine */ 87 struct smp_ops_t *smp_ops; 88 89 /* Can't be static due to PowerMac hackery */ 90 volatile unsigned int cpu_callin_map[NR_CPUS]; 91 92 int smt_enabled_at_boot = 1; 93 94 /* 95 * Returns 1 if the specified cpu should be brought up during boot. 96 * Used to inhibit booting threads if they've been disabled or 97 * limited on the command line 98 */ 99 int smp_generic_cpu_bootable(unsigned int nr) 100 { 101 /* Special case - we inhibit secondary thread startup 102 * during boot if the user requests it. 103 */ 104 if (system_state < SYSTEM_RUNNING && cpu_has_feature(CPU_FTR_SMT)) { 105 if (!smt_enabled_at_boot && cpu_thread_in_core(nr) != 0) 106 return 0; 107 if (smt_enabled_at_boot 108 && cpu_thread_in_core(nr) >= smt_enabled_at_boot) 109 return 0; 110 } 111 112 return 1; 113 } 114 115 116 #ifdef CONFIG_PPC64 117 int smp_generic_kick_cpu(int nr) 118 { 119 if (nr < 0 || nr >= nr_cpu_ids) 120 return -EINVAL; 121 122 /* 123 * The processor is currently spinning, waiting for the 124 * cpu_start field to become non-zero After we set cpu_start, 125 * the processor will continue on to secondary_start 126 */ 127 if (!paca_ptrs[nr]->cpu_start) { 128 paca_ptrs[nr]->cpu_start = 1; 129 smp_mb(); 130 return 0; 131 } 132 133 #ifdef CONFIG_HOTPLUG_CPU 134 /* 135 * Ok it's not there, so it might be soft-unplugged, let's 136 * try to bring it back 137 */ 138 generic_set_cpu_up(nr); 139 smp_wmb(); 140 smp_send_reschedule(nr); 141 #endif /* CONFIG_HOTPLUG_CPU */ 142 143 return 0; 144 } 145 #endif /* CONFIG_PPC64 */ 146 147 static irqreturn_t call_function_action(int irq, void *data) 148 { 149 generic_smp_call_function_interrupt(); 150 return IRQ_HANDLED; 151 } 152 153 static irqreturn_t reschedule_action(int irq, void *data) 154 { 155 scheduler_ipi(); 156 return IRQ_HANDLED; 157 } 158 159 #ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST 160 static irqreturn_t tick_broadcast_ipi_action(int irq, void *data) 161 { 162 timer_broadcast_interrupt(); 163 return IRQ_HANDLED; 164 } 165 #endif 166 167 #ifdef CONFIG_NMI_IPI 168 static irqreturn_t nmi_ipi_action(int irq, void *data) 169 { 170 smp_handle_nmi_ipi(get_irq_regs()); 171 return IRQ_HANDLED; 172 } 173 #endif 174 175 static irq_handler_t smp_ipi_action[] = { 176 [PPC_MSG_CALL_FUNCTION] = call_function_action, 177 [PPC_MSG_RESCHEDULE] = reschedule_action, 178 #ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST 179 [PPC_MSG_TICK_BROADCAST] = tick_broadcast_ipi_action, 180 #endif 181 #ifdef CONFIG_NMI_IPI 182 [PPC_MSG_NMI_IPI] = nmi_ipi_action, 183 #endif 184 }; 185 186 /* 187 * The NMI IPI is a fallback and not truly non-maskable. It is simpler 188 * than going through the call function infrastructure, and strongly 189 * serialized, so it is more appropriate for debugging. 190 */ 191 const char *smp_ipi_name[] = { 192 [PPC_MSG_CALL_FUNCTION] = "ipi call function", 193 [PPC_MSG_RESCHEDULE] = "ipi reschedule", 194 #ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST 195 [PPC_MSG_TICK_BROADCAST] = "ipi tick-broadcast", 196 #endif 197 #ifdef CONFIG_NMI_IPI 198 [PPC_MSG_NMI_IPI] = "nmi ipi", 199 #endif 200 }; 201 202 /* optional function to request ipi, for controllers with >= 4 ipis */ 203 int smp_request_message_ipi(int virq, int msg) 204 { 205 int err; 206 207 if (msg < 0 || msg > PPC_MSG_NMI_IPI) 208 return -EINVAL; 209 #ifndef CONFIG_NMI_IPI 210 if (msg == PPC_MSG_NMI_IPI) 211 return 1; 212 #endif 213 214 err = request_irq(virq, smp_ipi_action[msg], 215 IRQF_PERCPU | IRQF_NO_THREAD | IRQF_NO_SUSPEND, 216 smp_ipi_name[msg], NULL); 217 WARN(err < 0, "unable to request_irq %d for %s (rc %d)\n", 218 virq, smp_ipi_name[msg], err); 219 220 return err; 221 } 222 223 #ifdef CONFIG_PPC_SMP_MUXED_IPI 224 struct cpu_messages { 225 long messages; /* current messages */ 226 }; 227 static DEFINE_PER_CPU_SHARED_ALIGNED(struct cpu_messages, ipi_message); 228 229 void smp_muxed_ipi_set_message(int cpu, int msg) 230 { 231 struct cpu_messages *info = &per_cpu(ipi_message, cpu); 232 char *message = (char *)&info->messages; 233 234 /* 235 * Order previous accesses before accesses in the IPI handler. 236 */ 237 smp_mb(); 238 message[msg] = 1; 239 } 240 241 void smp_muxed_ipi_message_pass(int cpu, int msg) 242 { 243 smp_muxed_ipi_set_message(cpu, msg); 244 245 /* 246 * cause_ipi functions are required to include a full barrier 247 * before doing whatever causes the IPI. 248 */ 249 smp_ops->cause_ipi(cpu); 250 } 251 252 #ifdef __BIG_ENDIAN__ 253 #define IPI_MESSAGE(A) (1uL << ((BITS_PER_LONG - 8) - 8 * (A))) 254 #else 255 #define IPI_MESSAGE(A) (1uL << (8 * (A))) 256 #endif 257 258 irqreturn_t smp_ipi_demux(void) 259 { 260 mb(); /* order any irq clear */ 261 262 return smp_ipi_demux_relaxed(); 263 } 264 265 /* sync-free variant. Callers should ensure synchronization */ 266 irqreturn_t smp_ipi_demux_relaxed(void) 267 { 268 struct cpu_messages *info; 269 unsigned long all; 270 271 info = this_cpu_ptr(&ipi_message); 272 do { 273 all = xchg(&info->messages, 0); 274 #if defined(CONFIG_KVM_XICS) && defined(CONFIG_KVM_BOOK3S_HV_POSSIBLE) 275 /* 276 * Must check for PPC_MSG_RM_HOST_ACTION messages 277 * before PPC_MSG_CALL_FUNCTION messages because when 278 * a VM is destroyed, we call kick_all_cpus_sync() 279 * to ensure that any pending PPC_MSG_RM_HOST_ACTION 280 * messages have completed before we free any VCPUs. 281 */ 282 if (all & IPI_MESSAGE(PPC_MSG_RM_HOST_ACTION)) 283 kvmppc_xics_ipi_action(); 284 #endif 285 if (all & IPI_MESSAGE(PPC_MSG_CALL_FUNCTION)) 286 generic_smp_call_function_interrupt(); 287 if (all & IPI_MESSAGE(PPC_MSG_RESCHEDULE)) 288 scheduler_ipi(); 289 #ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST 290 if (all & IPI_MESSAGE(PPC_MSG_TICK_BROADCAST)) 291 timer_broadcast_interrupt(); 292 #endif 293 #ifdef CONFIG_NMI_IPI 294 if (all & IPI_MESSAGE(PPC_MSG_NMI_IPI)) 295 nmi_ipi_action(0, NULL); 296 #endif 297 } while (info->messages); 298 299 return IRQ_HANDLED; 300 } 301 #endif /* CONFIG_PPC_SMP_MUXED_IPI */ 302 303 static inline void do_message_pass(int cpu, int msg) 304 { 305 if (smp_ops->message_pass) 306 smp_ops->message_pass(cpu, msg); 307 #ifdef CONFIG_PPC_SMP_MUXED_IPI 308 else 309 smp_muxed_ipi_message_pass(cpu, msg); 310 #endif 311 } 312 313 void smp_send_reschedule(int cpu) 314 { 315 if (likely(smp_ops)) 316 do_message_pass(cpu, PPC_MSG_RESCHEDULE); 317 } 318 EXPORT_SYMBOL_GPL(smp_send_reschedule); 319 320 void arch_send_call_function_single_ipi(int cpu) 321 { 322 do_message_pass(cpu, PPC_MSG_CALL_FUNCTION); 323 } 324 325 void arch_send_call_function_ipi_mask(const struct cpumask *mask) 326 { 327 unsigned int cpu; 328 329 for_each_cpu(cpu, mask) 330 do_message_pass(cpu, PPC_MSG_CALL_FUNCTION); 331 } 332 333 #ifdef CONFIG_NMI_IPI 334 335 /* 336 * "NMI IPI" system. 337 * 338 * NMI IPIs may not be recoverable, so should not be used as ongoing part of 339 * a running system. They can be used for crash, debug, halt/reboot, etc. 340 * 341 * NMI IPIs are globally single threaded. No more than one in progress at 342 * any time. 343 * 344 * The IPI call waits with interrupts disabled until all targets enter the 345 * NMI handler, then the call returns. 346 * 347 * No new NMI can be initiated until targets exit the handler. 348 * 349 * The IPI call may time out without all targets entering the NMI handler. 350 * In that case, there is some logic to recover (and ignore subsequent 351 * NMI interrupts that may eventually be raised), but the platform interrupt 352 * handler may not be able to distinguish this from other exception causes, 353 * which may cause a crash. 354 */ 355 356 static atomic_t __nmi_ipi_lock = ATOMIC_INIT(0); 357 static struct cpumask nmi_ipi_pending_mask; 358 static int nmi_ipi_busy_count = 0; 359 static void (*nmi_ipi_function)(struct pt_regs *) = NULL; 360 361 static void nmi_ipi_lock_start(unsigned long *flags) 362 { 363 raw_local_irq_save(*flags); 364 hard_irq_disable(); 365 while (atomic_cmpxchg(&__nmi_ipi_lock, 0, 1) == 1) { 366 raw_local_irq_restore(*flags); 367 spin_until_cond(atomic_read(&__nmi_ipi_lock) == 0); 368 raw_local_irq_save(*flags); 369 hard_irq_disable(); 370 } 371 } 372 373 static void nmi_ipi_lock(void) 374 { 375 while (atomic_cmpxchg(&__nmi_ipi_lock, 0, 1) == 1) 376 spin_until_cond(atomic_read(&__nmi_ipi_lock) == 0); 377 } 378 379 static void nmi_ipi_unlock(void) 380 { 381 smp_mb(); 382 WARN_ON(atomic_read(&__nmi_ipi_lock) != 1); 383 atomic_set(&__nmi_ipi_lock, 0); 384 } 385 386 static void nmi_ipi_unlock_end(unsigned long *flags) 387 { 388 nmi_ipi_unlock(); 389 raw_local_irq_restore(*flags); 390 } 391 392 /* 393 * Platform NMI handler calls this to ack 394 */ 395 int smp_handle_nmi_ipi(struct pt_regs *regs) 396 { 397 void (*fn)(struct pt_regs *); 398 unsigned long flags; 399 int me = raw_smp_processor_id(); 400 int ret = 0; 401 402 /* 403 * Unexpected NMIs are possible here because the interrupt may not 404 * be able to distinguish NMI IPIs from other types of NMIs, or 405 * because the caller may have timed out. 406 */ 407 nmi_ipi_lock_start(&flags); 408 if (!nmi_ipi_busy_count) 409 goto out; 410 if (!cpumask_test_cpu(me, &nmi_ipi_pending_mask)) 411 goto out; 412 413 fn = nmi_ipi_function; 414 if (!fn) 415 goto out; 416 417 cpumask_clear_cpu(me, &nmi_ipi_pending_mask); 418 nmi_ipi_busy_count++; 419 nmi_ipi_unlock(); 420 421 ret = 1; 422 423 fn(regs); 424 425 nmi_ipi_lock(); 426 nmi_ipi_busy_count--; 427 out: 428 nmi_ipi_unlock_end(&flags); 429 430 return ret; 431 } 432 433 static void do_smp_send_nmi_ipi(int cpu, bool safe) 434 { 435 if (!safe && smp_ops->cause_nmi_ipi && smp_ops->cause_nmi_ipi(cpu)) 436 return; 437 438 if (cpu >= 0) { 439 do_message_pass(cpu, PPC_MSG_NMI_IPI); 440 } else { 441 int c; 442 443 for_each_online_cpu(c) { 444 if (c == raw_smp_processor_id()) 445 continue; 446 do_message_pass(c, PPC_MSG_NMI_IPI); 447 } 448 } 449 } 450 451 void smp_flush_nmi_ipi(u64 delay_us) 452 { 453 unsigned long flags; 454 455 nmi_ipi_lock_start(&flags); 456 while (nmi_ipi_busy_count) { 457 nmi_ipi_unlock_end(&flags); 458 udelay(1); 459 if (delay_us) { 460 delay_us--; 461 if (!delay_us) 462 return; 463 } 464 nmi_ipi_lock_start(&flags); 465 } 466 nmi_ipi_unlock_end(&flags); 467 } 468 469 /* 470 * - cpu is the target CPU (must not be this CPU), or NMI_IPI_ALL_OTHERS. 471 * - fn is the target callback function. 472 * - delay_us > 0 is the delay before giving up waiting for targets to 473 * enter the handler, == 0 specifies indefinite delay. 474 */ 475 int __smp_send_nmi_ipi(int cpu, void (*fn)(struct pt_regs *), u64 delay_us, bool safe) 476 { 477 unsigned long flags; 478 int me = raw_smp_processor_id(); 479 int ret = 1; 480 481 BUG_ON(cpu == me); 482 BUG_ON(cpu < 0 && cpu != NMI_IPI_ALL_OTHERS); 483 484 if (unlikely(!smp_ops)) 485 return 0; 486 487 /* Take the nmi_ipi_busy count/lock with interrupts hard disabled */ 488 nmi_ipi_lock_start(&flags); 489 while (nmi_ipi_busy_count) { 490 nmi_ipi_unlock_end(&flags); 491 spin_until_cond(nmi_ipi_busy_count == 0); 492 nmi_ipi_lock_start(&flags); 493 } 494 495 nmi_ipi_function = fn; 496 497 if (cpu < 0) { 498 /* ALL_OTHERS */ 499 cpumask_copy(&nmi_ipi_pending_mask, cpu_online_mask); 500 cpumask_clear_cpu(me, &nmi_ipi_pending_mask); 501 } else { 502 /* cpumask starts clear */ 503 cpumask_set_cpu(cpu, &nmi_ipi_pending_mask); 504 } 505 nmi_ipi_busy_count++; 506 nmi_ipi_unlock(); 507 508 do_smp_send_nmi_ipi(cpu, safe); 509 510 while (!cpumask_empty(&nmi_ipi_pending_mask)) { 511 udelay(1); 512 if (delay_us) { 513 delay_us--; 514 if (!delay_us) 515 break; 516 } 517 } 518 519 nmi_ipi_lock(); 520 if (!cpumask_empty(&nmi_ipi_pending_mask)) { 521 /* Could not gather all CPUs */ 522 ret = 0; 523 cpumask_clear(&nmi_ipi_pending_mask); 524 } 525 nmi_ipi_busy_count--; 526 nmi_ipi_unlock_end(&flags); 527 528 return ret; 529 } 530 531 int smp_send_nmi_ipi(int cpu, void (*fn)(struct pt_regs *), u64 delay_us) 532 { 533 return __smp_send_nmi_ipi(cpu, fn, delay_us, false); 534 } 535 536 int smp_send_safe_nmi_ipi(int cpu, void (*fn)(struct pt_regs *), u64 delay_us) 537 { 538 return __smp_send_nmi_ipi(cpu, fn, delay_us, true); 539 } 540 #endif /* CONFIG_NMI_IPI */ 541 542 #ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST 543 void tick_broadcast(const struct cpumask *mask) 544 { 545 unsigned int cpu; 546 547 for_each_cpu(cpu, mask) 548 do_message_pass(cpu, PPC_MSG_TICK_BROADCAST); 549 } 550 #endif 551 552 #ifdef CONFIG_DEBUGGER 553 void debugger_ipi_callback(struct pt_regs *regs) 554 { 555 debugger_ipi(regs); 556 } 557 558 void smp_send_debugger_break(void) 559 { 560 smp_send_nmi_ipi(NMI_IPI_ALL_OTHERS, debugger_ipi_callback, 1000000); 561 } 562 #endif 563 564 #ifdef CONFIG_KEXEC_CORE 565 void crash_send_ipi(void (*crash_ipi_callback)(struct pt_regs *)) 566 { 567 int cpu; 568 569 smp_send_nmi_ipi(NMI_IPI_ALL_OTHERS, crash_ipi_callback, 1000000); 570 if (kdump_in_progress() && crash_wake_offline) { 571 for_each_present_cpu(cpu) { 572 if (cpu_online(cpu)) 573 continue; 574 /* 575 * crash_ipi_callback will wait for 576 * all cpus, including offline CPUs. 577 * We don't care about nmi_ipi_function. 578 * Offline cpus will jump straight into 579 * crash_ipi_callback, we can skip the 580 * entire NMI dance and waiting for 581 * cpus to clear pending mask, etc. 582 */ 583 do_smp_send_nmi_ipi(cpu, false); 584 } 585 } 586 } 587 #endif 588 589 #ifdef CONFIG_NMI_IPI 590 static void nmi_stop_this_cpu(struct pt_regs *regs) 591 { 592 /* 593 * This is a special case because it never returns, so the NMI IPI 594 * handling would never mark it as done, which makes any later 595 * smp_send_nmi_ipi() call spin forever. Mark it done now. 596 * 597 * IRQs are already hard disabled by the smp_handle_nmi_ipi. 598 */ 599 nmi_ipi_lock(); 600 nmi_ipi_busy_count--; 601 nmi_ipi_unlock(); 602 603 spin_begin(); 604 while (1) 605 spin_cpu_relax(); 606 } 607 608 void smp_send_stop(void) 609 { 610 smp_send_nmi_ipi(NMI_IPI_ALL_OTHERS, nmi_stop_this_cpu, 1000000); 611 } 612 613 #else /* CONFIG_NMI_IPI */ 614 615 static void stop_this_cpu(void *dummy) 616 { 617 hard_irq_disable(); 618 spin_begin(); 619 while (1) 620 spin_cpu_relax(); 621 } 622 623 void smp_send_stop(void) 624 { 625 static bool stopped = false; 626 627 /* 628 * Prevent waiting on csd lock from a previous smp_send_stop. 629 * This is racy, but in general callers try to do the right 630 * thing and only fire off one smp_send_stop (e.g., see 631 * kernel/panic.c) 632 */ 633 if (stopped) 634 return; 635 636 stopped = true; 637 638 smp_call_function(stop_this_cpu, NULL, 0); 639 } 640 #endif /* CONFIG_NMI_IPI */ 641 642 struct thread_info *current_set[NR_CPUS]; 643 644 static void smp_store_cpu_info(int id) 645 { 646 per_cpu(cpu_pvr, id) = mfspr(SPRN_PVR); 647 #ifdef CONFIG_PPC_FSL_BOOK3E 648 per_cpu(next_tlbcam_idx, id) 649 = (mfspr(SPRN_TLB1CFG) & TLBnCFG_N_ENTRY) - 1; 650 #endif 651 } 652 653 /* 654 * Relationships between CPUs are maintained in a set of per-cpu cpumasks so 655 * rather than just passing around the cpumask we pass around a function that 656 * returns the that cpumask for the given CPU. 657 */ 658 static void set_cpus_related(int i, int j, struct cpumask *(*get_cpumask)(int)) 659 { 660 cpumask_set_cpu(i, get_cpumask(j)); 661 cpumask_set_cpu(j, get_cpumask(i)); 662 } 663 664 #ifdef CONFIG_HOTPLUG_CPU 665 static void set_cpus_unrelated(int i, int j, 666 struct cpumask *(*get_cpumask)(int)) 667 { 668 cpumask_clear_cpu(i, get_cpumask(j)); 669 cpumask_clear_cpu(j, get_cpumask(i)); 670 } 671 #endif 672 673 void __init smp_prepare_cpus(unsigned int max_cpus) 674 { 675 unsigned int cpu; 676 677 DBG("smp_prepare_cpus\n"); 678 679 /* 680 * setup_cpu may need to be called on the boot cpu. We havent 681 * spun any cpus up but lets be paranoid. 682 */ 683 BUG_ON(boot_cpuid != smp_processor_id()); 684 685 /* Fixup boot cpu */ 686 smp_store_cpu_info(boot_cpuid); 687 cpu_callin_map[boot_cpuid] = 1; 688 689 for_each_possible_cpu(cpu) { 690 zalloc_cpumask_var_node(&per_cpu(cpu_sibling_map, cpu), 691 GFP_KERNEL, cpu_to_node(cpu)); 692 zalloc_cpumask_var_node(&per_cpu(cpu_l2_cache_map, cpu), 693 GFP_KERNEL, cpu_to_node(cpu)); 694 zalloc_cpumask_var_node(&per_cpu(cpu_core_map, cpu), 695 GFP_KERNEL, cpu_to_node(cpu)); 696 /* 697 * numa_node_id() works after this. 698 */ 699 if (cpu_present(cpu)) { 700 set_cpu_numa_node(cpu, numa_cpu_lookup_table[cpu]); 701 set_cpu_numa_mem(cpu, 702 local_memory_node(numa_cpu_lookup_table[cpu])); 703 } 704 } 705 706 /* Init the cpumasks so the boot CPU is related to itself */ 707 cpumask_set_cpu(boot_cpuid, cpu_sibling_mask(boot_cpuid)); 708 cpumask_set_cpu(boot_cpuid, cpu_l2_cache_mask(boot_cpuid)); 709 cpumask_set_cpu(boot_cpuid, cpu_core_mask(boot_cpuid)); 710 711 if (smp_ops && smp_ops->probe) 712 smp_ops->probe(); 713 } 714 715 void smp_prepare_boot_cpu(void) 716 { 717 BUG_ON(smp_processor_id() != boot_cpuid); 718 #ifdef CONFIG_PPC64 719 paca_ptrs[boot_cpuid]->__current = current; 720 #endif 721 set_numa_node(numa_cpu_lookup_table[boot_cpuid]); 722 current_set[boot_cpuid] = task_thread_info(current); 723 } 724 725 #ifdef CONFIG_HOTPLUG_CPU 726 727 int generic_cpu_disable(void) 728 { 729 unsigned int cpu = smp_processor_id(); 730 731 if (cpu == boot_cpuid) 732 return -EBUSY; 733 734 set_cpu_online(cpu, false); 735 #ifdef CONFIG_PPC64 736 vdso_data->processorCount--; 737 #endif 738 /* Update affinity of all IRQs previously aimed at this CPU */ 739 irq_migrate_all_off_this_cpu(); 740 741 /* 742 * Depending on the details of the interrupt controller, it's possible 743 * that one of the interrupts we just migrated away from this CPU is 744 * actually already pending on this CPU. If we leave it in that state 745 * the interrupt will never be EOI'ed, and will never fire again. So 746 * temporarily enable interrupts here, to allow any pending interrupt to 747 * be received (and EOI'ed), before we take this CPU offline. 748 */ 749 local_irq_enable(); 750 mdelay(1); 751 local_irq_disable(); 752 753 return 0; 754 } 755 756 void generic_cpu_die(unsigned int cpu) 757 { 758 int i; 759 760 for (i = 0; i < 100; i++) { 761 smp_rmb(); 762 if (is_cpu_dead(cpu)) 763 return; 764 msleep(100); 765 } 766 printk(KERN_ERR "CPU%d didn't die...\n", cpu); 767 } 768 769 void generic_set_cpu_dead(unsigned int cpu) 770 { 771 per_cpu(cpu_state, cpu) = CPU_DEAD; 772 } 773 774 /* 775 * The cpu_state should be set to CPU_UP_PREPARE in kick_cpu(), otherwise 776 * the cpu_state is always CPU_DEAD after calling generic_set_cpu_dead(), 777 * which makes the delay in generic_cpu_die() not happen. 778 */ 779 void generic_set_cpu_up(unsigned int cpu) 780 { 781 per_cpu(cpu_state, cpu) = CPU_UP_PREPARE; 782 } 783 784 int generic_check_cpu_restart(unsigned int cpu) 785 { 786 return per_cpu(cpu_state, cpu) == CPU_UP_PREPARE; 787 } 788 789 int is_cpu_dead(unsigned int cpu) 790 { 791 return per_cpu(cpu_state, cpu) == CPU_DEAD; 792 } 793 794 static bool secondaries_inhibited(void) 795 { 796 return kvm_hv_mode_active(); 797 } 798 799 #else /* HOTPLUG_CPU */ 800 801 #define secondaries_inhibited() 0 802 803 #endif 804 805 static void cpu_idle_thread_init(unsigned int cpu, struct task_struct *idle) 806 { 807 struct thread_info *ti = task_thread_info(idle); 808 809 #ifdef CONFIG_PPC64 810 paca_ptrs[cpu]->__current = idle; 811 paca_ptrs[cpu]->kstack = (unsigned long)ti + THREAD_SIZE - STACK_FRAME_OVERHEAD; 812 #endif 813 ti->cpu = cpu; 814 secondary_ti = current_set[cpu] = ti; 815 } 816 817 int __cpu_up(unsigned int cpu, struct task_struct *tidle) 818 { 819 int rc, c; 820 821 /* 822 * Don't allow secondary threads to come online if inhibited 823 */ 824 if (threads_per_core > 1 && secondaries_inhibited() && 825 cpu_thread_in_subcore(cpu)) 826 return -EBUSY; 827 828 if (smp_ops == NULL || 829 (smp_ops->cpu_bootable && !smp_ops->cpu_bootable(cpu))) 830 return -EINVAL; 831 832 cpu_idle_thread_init(cpu, tidle); 833 834 /* 835 * The platform might need to allocate resources prior to bringing 836 * up the CPU 837 */ 838 if (smp_ops->prepare_cpu) { 839 rc = smp_ops->prepare_cpu(cpu); 840 if (rc) 841 return rc; 842 } 843 844 /* Make sure callin-map entry is 0 (can be leftover a CPU 845 * hotplug 846 */ 847 cpu_callin_map[cpu] = 0; 848 849 /* The information for processor bringup must 850 * be written out to main store before we release 851 * the processor. 852 */ 853 smp_mb(); 854 855 /* wake up cpus */ 856 DBG("smp: kicking cpu %d\n", cpu); 857 rc = smp_ops->kick_cpu(cpu); 858 if (rc) { 859 pr_err("smp: failed starting cpu %d (rc %d)\n", cpu, rc); 860 return rc; 861 } 862 863 /* 864 * wait to see if the cpu made a callin (is actually up). 865 * use this value that I found through experimentation. 866 * -- Cort 867 */ 868 if (system_state < SYSTEM_RUNNING) 869 for (c = 50000; c && !cpu_callin_map[cpu]; c--) 870 udelay(100); 871 #ifdef CONFIG_HOTPLUG_CPU 872 else 873 /* 874 * CPUs can take much longer to come up in the 875 * hotplug case. Wait five seconds. 876 */ 877 for (c = 5000; c && !cpu_callin_map[cpu]; c--) 878 msleep(1); 879 #endif 880 881 if (!cpu_callin_map[cpu]) { 882 printk(KERN_ERR "Processor %u is stuck.\n", cpu); 883 return -ENOENT; 884 } 885 886 DBG("Processor %u found.\n", cpu); 887 888 if (smp_ops->give_timebase) 889 smp_ops->give_timebase(); 890 891 /* Wait until cpu puts itself in the online & active maps */ 892 spin_until_cond(cpu_online(cpu)); 893 894 return 0; 895 } 896 897 /* Return the value of the reg property corresponding to the given 898 * logical cpu. 899 */ 900 int cpu_to_core_id(int cpu) 901 { 902 struct device_node *np; 903 const __be32 *reg; 904 int id = -1; 905 906 np = of_get_cpu_node(cpu, NULL); 907 if (!np) 908 goto out; 909 910 reg = of_get_property(np, "reg", NULL); 911 if (!reg) 912 goto out; 913 914 id = be32_to_cpup(reg); 915 out: 916 of_node_put(np); 917 return id; 918 } 919 EXPORT_SYMBOL_GPL(cpu_to_core_id); 920 921 /* Helper routines for cpu to core mapping */ 922 int cpu_core_index_of_thread(int cpu) 923 { 924 return cpu >> threads_shift; 925 } 926 EXPORT_SYMBOL_GPL(cpu_core_index_of_thread); 927 928 int cpu_first_thread_of_core(int core) 929 { 930 return core << threads_shift; 931 } 932 EXPORT_SYMBOL_GPL(cpu_first_thread_of_core); 933 934 /* Must be called when no change can occur to cpu_present_mask, 935 * i.e. during cpu online or offline. 936 */ 937 static struct device_node *cpu_to_l2cache(int cpu) 938 { 939 struct device_node *np; 940 struct device_node *cache; 941 942 if (!cpu_present(cpu)) 943 return NULL; 944 945 np = of_get_cpu_node(cpu, NULL); 946 if (np == NULL) 947 return NULL; 948 949 cache = of_find_next_cache_node(np); 950 951 of_node_put(np); 952 953 return cache; 954 } 955 956 static bool update_mask_by_l2(int cpu, struct cpumask *(*mask_fn)(int)) 957 { 958 struct device_node *l2_cache, *np; 959 int i; 960 961 l2_cache = cpu_to_l2cache(cpu); 962 if (!l2_cache) 963 return false; 964 965 for_each_cpu(i, cpu_online_mask) { 966 /* 967 * when updating the marks the current CPU has not been marked 968 * online, but we need to update the cache masks 969 */ 970 np = cpu_to_l2cache(i); 971 if (!np) 972 continue; 973 974 if (np == l2_cache) 975 set_cpus_related(cpu, i, mask_fn); 976 977 of_node_put(np); 978 } 979 of_node_put(l2_cache); 980 981 return true; 982 } 983 984 #ifdef CONFIG_HOTPLUG_CPU 985 static void remove_cpu_from_masks(int cpu) 986 { 987 int i; 988 989 /* NB: cpu_core_mask is a superset of the others */ 990 for_each_cpu(i, cpu_core_mask(cpu)) { 991 set_cpus_unrelated(cpu, i, cpu_core_mask); 992 set_cpus_unrelated(cpu, i, cpu_l2_cache_mask); 993 set_cpus_unrelated(cpu, i, cpu_sibling_mask); 994 } 995 } 996 #endif 997 998 static void add_cpu_to_masks(int cpu) 999 { 1000 int first_thread = cpu_first_thread_sibling(cpu); 1001 int chipid = cpu_to_chip_id(cpu); 1002 int i; 1003 1004 /* 1005 * This CPU will not be in the online mask yet so we need to manually 1006 * add it to it's own thread sibling mask. 1007 */ 1008 cpumask_set_cpu(cpu, cpu_sibling_mask(cpu)); 1009 1010 for (i = first_thread; i < first_thread + threads_per_core; i++) 1011 if (cpu_online(i)) 1012 set_cpus_related(i, cpu, cpu_sibling_mask); 1013 1014 /* 1015 * Copy the thread sibling mask into the cache sibling mask 1016 * and mark any CPUs that share an L2 with this CPU. 1017 */ 1018 for_each_cpu(i, cpu_sibling_mask(cpu)) 1019 set_cpus_related(cpu, i, cpu_l2_cache_mask); 1020 update_mask_by_l2(cpu, cpu_l2_cache_mask); 1021 1022 /* 1023 * Copy the cache sibling mask into core sibling mask and mark 1024 * any CPUs on the same chip as this CPU. 1025 */ 1026 for_each_cpu(i, cpu_l2_cache_mask(cpu)) 1027 set_cpus_related(cpu, i, cpu_core_mask); 1028 1029 if (chipid == -1) 1030 return; 1031 1032 for_each_cpu(i, cpu_online_mask) 1033 if (cpu_to_chip_id(i) == chipid) 1034 set_cpus_related(cpu, i, cpu_core_mask); 1035 } 1036 1037 static bool shared_caches; 1038 1039 /* Activate a secondary processor. */ 1040 void start_secondary(void *unused) 1041 { 1042 unsigned int cpu = smp_processor_id(); 1043 1044 mmgrab(&init_mm); 1045 current->active_mm = &init_mm; 1046 1047 smp_store_cpu_info(cpu); 1048 set_dec(tb_ticks_per_jiffy); 1049 preempt_disable(); 1050 cpu_callin_map[cpu] = 1; 1051 1052 if (smp_ops->setup_cpu) 1053 smp_ops->setup_cpu(cpu); 1054 if (smp_ops->take_timebase) 1055 smp_ops->take_timebase(); 1056 1057 secondary_cpu_time_init(); 1058 1059 #ifdef CONFIG_PPC64 1060 if (system_state == SYSTEM_RUNNING) 1061 vdso_data->processorCount++; 1062 1063 vdso_getcpu_init(); 1064 #endif 1065 /* Update topology CPU masks */ 1066 add_cpu_to_masks(cpu); 1067 1068 /* 1069 * Check for any shared caches. Note that this must be done on a 1070 * per-core basis because one core in the pair might be disabled. 1071 */ 1072 if (!cpumask_equal(cpu_l2_cache_mask(cpu), cpu_sibling_mask(cpu))) 1073 shared_caches = true; 1074 1075 set_numa_node(numa_cpu_lookup_table[cpu]); 1076 set_numa_mem(local_memory_node(numa_cpu_lookup_table[cpu])); 1077 1078 smp_wmb(); 1079 notify_cpu_starting(cpu); 1080 set_cpu_online(cpu, true); 1081 1082 local_irq_enable(); 1083 1084 /* We can enable ftrace for secondary cpus now */ 1085 this_cpu_enable_ftrace(); 1086 1087 cpu_startup_entry(CPUHP_AP_ONLINE_IDLE); 1088 1089 BUG(); 1090 } 1091 1092 int setup_profiling_timer(unsigned int multiplier) 1093 { 1094 return 0; 1095 } 1096 1097 #ifdef CONFIG_SCHED_SMT 1098 /* cpumask of CPUs with asymetric SMT dependancy */ 1099 static int powerpc_smt_flags(void) 1100 { 1101 int flags = SD_SHARE_CPUCAPACITY | SD_SHARE_PKG_RESOURCES; 1102 1103 if (cpu_has_feature(CPU_FTR_ASYM_SMT)) { 1104 printk_once(KERN_INFO "Enabling Asymmetric SMT scheduling\n"); 1105 flags |= SD_ASYM_PACKING; 1106 } 1107 return flags; 1108 } 1109 #endif 1110 1111 static struct sched_domain_topology_level powerpc_topology[] = { 1112 #ifdef CONFIG_SCHED_SMT 1113 { cpu_smt_mask, powerpc_smt_flags, SD_INIT_NAME(SMT) }, 1114 #endif 1115 { cpu_cpu_mask, SD_INIT_NAME(DIE) }, 1116 { NULL, }, 1117 }; 1118 1119 /* 1120 * P9 has a slightly odd architecture where pairs of cores share an L2 cache. 1121 * This topology makes it *much* cheaper to migrate tasks between adjacent cores 1122 * since the migrated task remains cache hot. We want to take advantage of this 1123 * at the scheduler level so an extra topology level is required. 1124 */ 1125 static int powerpc_shared_cache_flags(void) 1126 { 1127 return SD_SHARE_PKG_RESOURCES; 1128 } 1129 1130 /* 1131 * We can't just pass cpu_l2_cache_mask() directly because 1132 * returns a non-const pointer and the compiler barfs on that. 1133 */ 1134 static const struct cpumask *shared_cache_mask(int cpu) 1135 { 1136 return cpu_l2_cache_mask(cpu); 1137 } 1138 1139 static struct sched_domain_topology_level power9_topology[] = { 1140 #ifdef CONFIG_SCHED_SMT 1141 { cpu_smt_mask, powerpc_smt_flags, SD_INIT_NAME(SMT) }, 1142 #endif 1143 { shared_cache_mask, powerpc_shared_cache_flags, SD_INIT_NAME(CACHE) }, 1144 { cpu_cpu_mask, SD_INIT_NAME(DIE) }, 1145 { NULL, }, 1146 }; 1147 1148 void __init smp_cpus_done(unsigned int max_cpus) 1149 { 1150 /* 1151 * We are running pinned to the boot CPU, see rest_init(). 1152 */ 1153 if (smp_ops && smp_ops->setup_cpu) 1154 smp_ops->setup_cpu(boot_cpuid); 1155 1156 if (smp_ops && smp_ops->bringup_done) 1157 smp_ops->bringup_done(); 1158 1159 dump_numa_cpu_topology(); 1160 1161 /* 1162 * If any CPU detects that it's sharing a cache with another CPU then 1163 * use the deeper topology that is aware of this sharing. 1164 */ 1165 if (shared_caches) { 1166 pr_info("Using shared cache scheduler topology\n"); 1167 set_sched_topology(power9_topology); 1168 } else { 1169 pr_info("Using standard scheduler topology\n"); 1170 set_sched_topology(powerpc_topology); 1171 } 1172 } 1173 1174 #ifdef CONFIG_HOTPLUG_CPU 1175 int __cpu_disable(void) 1176 { 1177 int cpu = smp_processor_id(); 1178 int err; 1179 1180 if (!smp_ops->cpu_disable) 1181 return -ENOSYS; 1182 1183 this_cpu_disable_ftrace(); 1184 1185 err = smp_ops->cpu_disable(); 1186 if (err) 1187 return err; 1188 1189 /* Update sibling maps */ 1190 remove_cpu_from_masks(cpu); 1191 1192 return 0; 1193 } 1194 1195 void __cpu_die(unsigned int cpu) 1196 { 1197 if (smp_ops->cpu_die) 1198 smp_ops->cpu_die(cpu); 1199 } 1200 1201 void cpu_die(void) 1202 { 1203 /* 1204 * Disable on the down path. This will be re-enabled by 1205 * start_secondary() via start_secondary_resume() below 1206 */ 1207 this_cpu_disable_ftrace(); 1208 1209 if (ppc_md.cpu_die) 1210 ppc_md.cpu_die(); 1211 1212 /* If we return, we re-enter start_secondary */ 1213 start_secondary_resume(); 1214 } 1215 1216 #endif 1217