1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* 3 * SMP support for ppc. 4 * 5 * Written by Cort Dougan (cort@cs.nmt.edu) borrowing a great 6 * deal of code from the sparc and intel versions. 7 * 8 * Copyright (C) 1999 Cort Dougan <cort@cs.nmt.edu> 9 * 10 * PowerPC-64 Support added by Dave Engebretsen, Peter Bergner, and 11 * Mike Corrigan {engebret|bergner|mikec}@us.ibm.com 12 */ 13 14 #undef DEBUG 15 16 #include <linux/kernel.h> 17 #include <linux/export.h> 18 #include <linux/sched/mm.h> 19 #include <linux/sched/task_stack.h> 20 #include <linux/sched/topology.h> 21 #include <linux/smp.h> 22 #include <linux/interrupt.h> 23 #include <linux/delay.h> 24 #include <linux/init.h> 25 #include <linux/spinlock.h> 26 #include <linux/cache.h> 27 #include <linux/err.h> 28 #include <linux/device.h> 29 #include <linux/cpu.h> 30 #include <linux/notifier.h> 31 #include <linux/topology.h> 32 #include <linux/profile.h> 33 #include <linux/processor.h> 34 #include <linux/random.h> 35 #include <linux/stackprotector.h> 36 #include <linux/pgtable.h> 37 #include <linux/clockchips.h> 38 #include <linux/kexec.h> 39 40 #include <asm/ptrace.h> 41 #include <linux/atomic.h> 42 #include <asm/irq.h> 43 #include <asm/hw_irq.h> 44 #include <asm/kvm_ppc.h> 45 #include <asm/dbell.h> 46 #include <asm/page.h> 47 #include <asm/smp.h> 48 #include <asm/time.h> 49 #include <asm/machdep.h> 50 #include <asm/cputhreads.h> 51 #include <asm/cputable.h> 52 #include <asm/mpic.h> 53 #include <asm/vdso_datapage.h> 54 #ifdef CONFIG_PPC64 55 #include <asm/paca.h> 56 #endif 57 #include <asm/vdso.h> 58 #include <asm/debug.h> 59 #include <asm/cpu_has_feature.h> 60 #include <asm/ftrace.h> 61 #include <asm/kup.h> 62 #include <asm/fadump.h> 63 64 #ifdef DEBUG 65 #include <asm/udbg.h> 66 #define DBG(fmt...) udbg_printf(fmt) 67 #else 68 #define DBG(fmt...) 69 #endif 70 71 #ifdef CONFIG_HOTPLUG_CPU 72 /* State of each CPU during hotplug phases */ 73 static DEFINE_PER_CPU(int, cpu_state) = { 0 }; 74 #endif 75 76 struct task_struct *secondary_current; 77 bool has_big_cores; 78 bool coregroup_enabled; 79 bool thread_group_shares_l2; 80 bool thread_group_shares_l3; 81 82 DEFINE_PER_CPU(cpumask_var_t, cpu_sibling_map); 83 DEFINE_PER_CPU(cpumask_var_t, cpu_smallcore_map); 84 DEFINE_PER_CPU(cpumask_var_t, cpu_l2_cache_map); 85 DEFINE_PER_CPU(cpumask_var_t, cpu_core_map); 86 static DEFINE_PER_CPU(cpumask_var_t, cpu_coregroup_map); 87 88 EXPORT_PER_CPU_SYMBOL(cpu_sibling_map); 89 EXPORT_PER_CPU_SYMBOL(cpu_l2_cache_map); 90 EXPORT_PER_CPU_SYMBOL(cpu_core_map); 91 EXPORT_SYMBOL_GPL(has_big_cores); 92 93 enum { 94 #ifdef CONFIG_SCHED_SMT 95 smt_idx, 96 #endif 97 cache_idx, 98 mc_idx, 99 die_idx, 100 }; 101 102 #define MAX_THREAD_LIST_SIZE 8 103 #define THREAD_GROUP_SHARE_L1 1 104 #define THREAD_GROUP_SHARE_L2_L3 2 105 struct thread_groups { 106 unsigned int property; 107 unsigned int nr_groups; 108 unsigned int threads_per_group; 109 unsigned int thread_list[MAX_THREAD_LIST_SIZE]; 110 }; 111 112 /* Maximum number of properties that groups of threads within a core can share */ 113 #define MAX_THREAD_GROUP_PROPERTIES 2 114 115 struct thread_groups_list { 116 unsigned int nr_properties; 117 struct thread_groups property_tgs[MAX_THREAD_GROUP_PROPERTIES]; 118 }; 119 120 static struct thread_groups_list tgl[NR_CPUS] __initdata; 121 /* 122 * On big-cores system, thread_group_l1_cache_map for each CPU corresponds to 123 * the set its siblings that share the L1-cache. 124 */ 125 DEFINE_PER_CPU(cpumask_var_t, thread_group_l1_cache_map); 126 127 /* 128 * On some big-cores system, thread_group_l2_cache_map for each CPU 129 * corresponds to the set its siblings within the core that share the 130 * L2-cache. 131 */ 132 DEFINE_PER_CPU(cpumask_var_t, thread_group_l2_cache_map); 133 134 /* 135 * On P10, thread_group_l3_cache_map for each CPU is equal to the 136 * thread_group_l2_cache_map 137 */ 138 DEFINE_PER_CPU(cpumask_var_t, thread_group_l3_cache_map); 139 140 /* SMP operations for this machine */ 141 struct smp_ops_t *smp_ops; 142 143 /* Can't be static due to PowerMac hackery */ 144 volatile unsigned int cpu_callin_map[NR_CPUS]; 145 146 int smt_enabled_at_boot = 1; 147 148 /* 149 * Returns 1 if the specified cpu should be brought up during boot. 150 * Used to inhibit booting threads if they've been disabled or 151 * limited on the command line 152 */ 153 int smp_generic_cpu_bootable(unsigned int nr) 154 { 155 /* Special case - we inhibit secondary thread startup 156 * during boot if the user requests it. 157 */ 158 if (system_state < SYSTEM_RUNNING && cpu_has_feature(CPU_FTR_SMT)) { 159 if (!smt_enabled_at_boot && cpu_thread_in_core(nr) != 0) 160 return 0; 161 if (smt_enabled_at_boot 162 && cpu_thread_in_core(nr) >= smt_enabled_at_boot) 163 return 0; 164 } 165 166 return 1; 167 } 168 169 170 #ifdef CONFIG_PPC64 171 int smp_generic_kick_cpu(int nr) 172 { 173 if (nr < 0 || nr >= nr_cpu_ids) 174 return -EINVAL; 175 176 /* 177 * The processor is currently spinning, waiting for the 178 * cpu_start field to become non-zero After we set cpu_start, 179 * the processor will continue on to secondary_start 180 */ 181 if (!paca_ptrs[nr]->cpu_start) { 182 paca_ptrs[nr]->cpu_start = 1; 183 smp_mb(); 184 return 0; 185 } 186 187 #ifdef CONFIG_HOTPLUG_CPU 188 /* 189 * Ok it's not there, so it might be soft-unplugged, let's 190 * try to bring it back 191 */ 192 generic_set_cpu_up(nr); 193 smp_wmb(); 194 smp_send_reschedule(nr); 195 #endif /* CONFIG_HOTPLUG_CPU */ 196 197 return 0; 198 } 199 #endif /* CONFIG_PPC64 */ 200 201 static irqreturn_t call_function_action(int irq, void *data) 202 { 203 generic_smp_call_function_interrupt(); 204 return IRQ_HANDLED; 205 } 206 207 static irqreturn_t reschedule_action(int irq, void *data) 208 { 209 scheduler_ipi(); 210 return IRQ_HANDLED; 211 } 212 213 #ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST 214 static irqreturn_t tick_broadcast_ipi_action(int irq, void *data) 215 { 216 timer_broadcast_interrupt(); 217 return IRQ_HANDLED; 218 } 219 #endif 220 221 #ifdef CONFIG_NMI_IPI 222 static irqreturn_t nmi_ipi_action(int irq, void *data) 223 { 224 smp_handle_nmi_ipi(get_irq_regs()); 225 return IRQ_HANDLED; 226 } 227 #endif 228 229 static irq_handler_t smp_ipi_action[] = { 230 [PPC_MSG_CALL_FUNCTION] = call_function_action, 231 [PPC_MSG_RESCHEDULE] = reschedule_action, 232 #ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST 233 [PPC_MSG_TICK_BROADCAST] = tick_broadcast_ipi_action, 234 #endif 235 #ifdef CONFIG_NMI_IPI 236 [PPC_MSG_NMI_IPI] = nmi_ipi_action, 237 #endif 238 }; 239 240 /* 241 * The NMI IPI is a fallback and not truly non-maskable. It is simpler 242 * than going through the call function infrastructure, and strongly 243 * serialized, so it is more appropriate for debugging. 244 */ 245 const char *smp_ipi_name[] = { 246 [PPC_MSG_CALL_FUNCTION] = "ipi call function", 247 [PPC_MSG_RESCHEDULE] = "ipi reschedule", 248 #ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST 249 [PPC_MSG_TICK_BROADCAST] = "ipi tick-broadcast", 250 #endif 251 #ifdef CONFIG_NMI_IPI 252 [PPC_MSG_NMI_IPI] = "nmi ipi", 253 #endif 254 }; 255 256 /* optional function to request ipi, for controllers with >= 4 ipis */ 257 int smp_request_message_ipi(int virq, int msg) 258 { 259 int err; 260 261 if (msg < 0 || msg > PPC_MSG_NMI_IPI) 262 return -EINVAL; 263 #ifndef CONFIG_NMI_IPI 264 if (msg == PPC_MSG_NMI_IPI) 265 return 1; 266 #endif 267 268 err = request_irq(virq, smp_ipi_action[msg], 269 IRQF_PERCPU | IRQF_NO_THREAD | IRQF_NO_SUSPEND, 270 smp_ipi_name[msg], NULL); 271 WARN(err < 0, "unable to request_irq %d for %s (rc %d)\n", 272 virq, smp_ipi_name[msg], err); 273 274 return err; 275 } 276 277 #ifdef CONFIG_PPC_SMP_MUXED_IPI 278 struct cpu_messages { 279 long messages; /* current messages */ 280 }; 281 static DEFINE_PER_CPU_SHARED_ALIGNED(struct cpu_messages, ipi_message); 282 283 void smp_muxed_ipi_set_message(int cpu, int msg) 284 { 285 struct cpu_messages *info = &per_cpu(ipi_message, cpu); 286 char *message = (char *)&info->messages; 287 288 /* 289 * Order previous accesses before accesses in the IPI handler. 290 */ 291 smp_mb(); 292 message[msg] = 1; 293 } 294 295 void smp_muxed_ipi_message_pass(int cpu, int msg) 296 { 297 smp_muxed_ipi_set_message(cpu, msg); 298 299 /* 300 * cause_ipi functions are required to include a full barrier 301 * before doing whatever causes the IPI. 302 */ 303 smp_ops->cause_ipi(cpu); 304 } 305 306 #ifdef __BIG_ENDIAN__ 307 #define IPI_MESSAGE(A) (1uL << ((BITS_PER_LONG - 8) - 8 * (A))) 308 #else 309 #define IPI_MESSAGE(A) (1uL << (8 * (A))) 310 #endif 311 312 irqreturn_t smp_ipi_demux(void) 313 { 314 mb(); /* order any irq clear */ 315 316 return smp_ipi_demux_relaxed(); 317 } 318 319 /* sync-free variant. Callers should ensure synchronization */ 320 irqreturn_t smp_ipi_demux_relaxed(void) 321 { 322 struct cpu_messages *info; 323 unsigned long all; 324 325 info = this_cpu_ptr(&ipi_message); 326 do { 327 all = xchg(&info->messages, 0); 328 #if defined(CONFIG_KVM_XICS) && defined(CONFIG_KVM_BOOK3S_HV_POSSIBLE) 329 /* 330 * Must check for PPC_MSG_RM_HOST_ACTION messages 331 * before PPC_MSG_CALL_FUNCTION messages because when 332 * a VM is destroyed, we call kick_all_cpus_sync() 333 * to ensure that any pending PPC_MSG_RM_HOST_ACTION 334 * messages have completed before we free any VCPUs. 335 */ 336 if (all & IPI_MESSAGE(PPC_MSG_RM_HOST_ACTION)) 337 kvmppc_xics_ipi_action(); 338 #endif 339 if (all & IPI_MESSAGE(PPC_MSG_CALL_FUNCTION)) 340 generic_smp_call_function_interrupt(); 341 if (all & IPI_MESSAGE(PPC_MSG_RESCHEDULE)) 342 scheduler_ipi(); 343 #ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST 344 if (all & IPI_MESSAGE(PPC_MSG_TICK_BROADCAST)) 345 timer_broadcast_interrupt(); 346 #endif 347 #ifdef CONFIG_NMI_IPI 348 if (all & IPI_MESSAGE(PPC_MSG_NMI_IPI)) 349 nmi_ipi_action(0, NULL); 350 #endif 351 } while (info->messages); 352 353 return IRQ_HANDLED; 354 } 355 #endif /* CONFIG_PPC_SMP_MUXED_IPI */ 356 357 static inline void do_message_pass(int cpu, int msg) 358 { 359 if (smp_ops->message_pass) 360 smp_ops->message_pass(cpu, msg); 361 #ifdef CONFIG_PPC_SMP_MUXED_IPI 362 else 363 smp_muxed_ipi_message_pass(cpu, msg); 364 #endif 365 } 366 367 void smp_send_reschedule(int cpu) 368 { 369 if (likely(smp_ops)) 370 do_message_pass(cpu, PPC_MSG_RESCHEDULE); 371 } 372 EXPORT_SYMBOL_GPL(smp_send_reschedule); 373 374 void arch_send_call_function_single_ipi(int cpu) 375 { 376 do_message_pass(cpu, PPC_MSG_CALL_FUNCTION); 377 } 378 379 void arch_send_call_function_ipi_mask(const struct cpumask *mask) 380 { 381 unsigned int cpu; 382 383 for_each_cpu(cpu, mask) 384 do_message_pass(cpu, PPC_MSG_CALL_FUNCTION); 385 } 386 387 #ifdef CONFIG_NMI_IPI 388 389 /* 390 * "NMI IPI" system. 391 * 392 * NMI IPIs may not be recoverable, so should not be used as ongoing part of 393 * a running system. They can be used for crash, debug, halt/reboot, etc. 394 * 395 * The IPI call waits with interrupts disabled until all targets enter the 396 * NMI handler, then returns. Subsequent IPIs can be issued before targets 397 * have returned from their handlers, so there is no guarantee about 398 * concurrency or re-entrancy. 399 * 400 * A new NMI can be issued before all targets exit the handler. 401 * 402 * The IPI call may time out without all targets entering the NMI handler. 403 * In that case, there is some logic to recover (and ignore subsequent 404 * NMI interrupts that may eventually be raised), but the platform interrupt 405 * handler may not be able to distinguish this from other exception causes, 406 * which may cause a crash. 407 */ 408 409 static atomic_t __nmi_ipi_lock = ATOMIC_INIT(0); 410 static struct cpumask nmi_ipi_pending_mask; 411 static bool nmi_ipi_busy = false; 412 static void (*nmi_ipi_function)(struct pt_regs *) = NULL; 413 414 noinstr static void nmi_ipi_lock_start(unsigned long *flags) 415 { 416 raw_local_irq_save(*flags); 417 hard_irq_disable(); 418 while (arch_atomic_cmpxchg(&__nmi_ipi_lock, 0, 1) == 1) { 419 raw_local_irq_restore(*flags); 420 spin_until_cond(arch_atomic_read(&__nmi_ipi_lock) == 0); 421 raw_local_irq_save(*flags); 422 hard_irq_disable(); 423 } 424 } 425 426 noinstr static void nmi_ipi_lock(void) 427 { 428 while (arch_atomic_cmpxchg(&__nmi_ipi_lock, 0, 1) == 1) 429 spin_until_cond(arch_atomic_read(&__nmi_ipi_lock) == 0); 430 } 431 432 noinstr static void nmi_ipi_unlock(void) 433 { 434 smp_mb(); 435 WARN_ON(arch_atomic_read(&__nmi_ipi_lock) != 1); 436 arch_atomic_set(&__nmi_ipi_lock, 0); 437 } 438 439 noinstr static void nmi_ipi_unlock_end(unsigned long *flags) 440 { 441 nmi_ipi_unlock(); 442 raw_local_irq_restore(*flags); 443 } 444 445 /* 446 * Platform NMI handler calls this to ack 447 */ 448 noinstr int smp_handle_nmi_ipi(struct pt_regs *regs) 449 { 450 void (*fn)(struct pt_regs *) = NULL; 451 unsigned long flags; 452 int me = raw_smp_processor_id(); 453 int ret = 0; 454 455 /* 456 * Unexpected NMIs are possible here because the interrupt may not 457 * be able to distinguish NMI IPIs from other types of NMIs, or 458 * because the caller may have timed out. 459 */ 460 nmi_ipi_lock_start(&flags); 461 if (cpumask_test_cpu(me, &nmi_ipi_pending_mask)) { 462 cpumask_clear_cpu(me, &nmi_ipi_pending_mask); 463 fn = READ_ONCE(nmi_ipi_function); 464 WARN_ON_ONCE(!fn); 465 ret = 1; 466 } 467 nmi_ipi_unlock_end(&flags); 468 469 if (fn) 470 fn(regs); 471 472 return ret; 473 } 474 475 static void do_smp_send_nmi_ipi(int cpu, bool safe) 476 { 477 if (!safe && smp_ops->cause_nmi_ipi && smp_ops->cause_nmi_ipi(cpu)) 478 return; 479 480 if (cpu >= 0) { 481 do_message_pass(cpu, PPC_MSG_NMI_IPI); 482 } else { 483 int c; 484 485 for_each_online_cpu(c) { 486 if (c == raw_smp_processor_id()) 487 continue; 488 do_message_pass(c, PPC_MSG_NMI_IPI); 489 } 490 } 491 } 492 493 /* 494 * - cpu is the target CPU (must not be this CPU), or NMI_IPI_ALL_OTHERS. 495 * - fn is the target callback function. 496 * - delay_us > 0 is the delay before giving up waiting for targets to 497 * begin executing the handler, == 0 specifies indefinite delay. 498 */ 499 static int __smp_send_nmi_ipi(int cpu, void (*fn)(struct pt_regs *), 500 u64 delay_us, bool safe) 501 { 502 unsigned long flags; 503 int me = raw_smp_processor_id(); 504 int ret = 1; 505 506 BUG_ON(cpu == me); 507 BUG_ON(cpu < 0 && cpu != NMI_IPI_ALL_OTHERS); 508 509 if (unlikely(!smp_ops)) 510 return 0; 511 512 nmi_ipi_lock_start(&flags); 513 while (nmi_ipi_busy) { 514 nmi_ipi_unlock_end(&flags); 515 spin_until_cond(!nmi_ipi_busy); 516 nmi_ipi_lock_start(&flags); 517 } 518 nmi_ipi_busy = true; 519 nmi_ipi_function = fn; 520 521 WARN_ON_ONCE(!cpumask_empty(&nmi_ipi_pending_mask)); 522 523 if (cpu < 0) { 524 /* ALL_OTHERS */ 525 cpumask_copy(&nmi_ipi_pending_mask, cpu_online_mask); 526 cpumask_clear_cpu(me, &nmi_ipi_pending_mask); 527 } else { 528 cpumask_set_cpu(cpu, &nmi_ipi_pending_mask); 529 } 530 531 nmi_ipi_unlock(); 532 533 /* Interrupts remain hard disabled */ 534 535 do_smp_send_nmi_ipi(cpu, safe); 536 537 nmi_ipi_lock(); 538 /* nmi_ipi_busy is set here, so unlock/lock is okay */ 539 while (!cpumask_empty(&nmi_ipi_pending_mask)) { 540 nmi_ipi_unlock(); 541 udelay(1); 542 nmi_ipi_lock(); 543 if (delay_us) { 544 delay_us--; 545 if (!delay_us) 546 break; 547 } 548 } 549 550 if (!cpumask_empty(&nmi_ipi_pending_mask)) { 551 /* Timeout waiting for CPUs to call smp_handle_nmi_ipi */ 552 ret = 0; 553 cpumask_clear(&nmi_ipi_pending_mask); 554 } 555 556 nmi_ipi_function = NULL; 557 nmi_ipi_busy = false; 558 559 nmi_ipi_unlock_end(&flags); 560 561 return ret; 562 } 563 564 int smp_send_nmi_ipi(int cpu, void (*fn)(struct pt_regs *), u64 delay_us) 565 { 566 return __smp_send_nmi_ipi(cpu, fn, delay_us, false); 567 } 568 569 int smp_send_safe_nmi_ipi(int cpu, void (*fn)(struct pt_regs *), u64 delay_us) 570 { 571 return __smp_send_nmi_ipi(cpu, fn, delay_us, true); 572 } 573 #endif /* CONFIG_NMI_IPI */ 574 575 #ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST 576 void tick_broadcast(const struct cpumask *mask) 577 { 578 unsigned int cpu; 579 580 for_each_cpu(cpu, mask) 581 do_message_pass(cpu, PPC_MSG_TICK_BROADCAST); 582 } 583 #endif 584 585 #ifdef CONFIG_DEBUGGER 586 static void debugger_ipi_callback(struct pt_regs *regs) 587 { 588 debugger_ipi(regs); 589 } 590 591 void smp_send_debugger_break(void) 592 { 593 smp_send_nmi_ipi(NMI_IPI_ALL_OTHERS, debugger_ipi_callback, 1000000); 594 } 595 #endif 596 597 #ifdef CONFIG_KEXEC_CORE 598 void crash_send_ipi(void (*crash_ipi_callback)(struct pt_regs *)) 599 { 600 int cpu; 601 602 smp_send_nmi_ipi(NMI_IPI_ALL_OTHERS, crash_ipi_callback, 1000000); 603 if (kdump_in_progress() && crash_wake_offline) { 604 for_each_present_cpu(cpu) { 605 if (cpu_online(cpu)) 606 continue; 607 /* 608 * crash_ipi_callback will wait for 609 * all cpus, including offline CPUs. 610 * We don't care about nmi_ipi_function. 611 * Offline cpus will jump straight into 612 * crash_ipi_callback, we can skip the 613 * entire NMI dance and waiting for 614 * cpus to clear pending mask, etc. 615 */ 616 do_smp_send_nmi_ipi(cpu, false); 617 } 618 } 619 } 620 #endif 621 622 void crash_smp_send_stop(void) 623 { 624 static bool stopped = false; 625 626 /* 627 * In case of fadump, register data for all CPUs is captured by f/w 628 * on ibm,os-term rtas call. Skip IPI callbacks to other CPUs before 629 * this rtas call to avoid tricky post processing of those CPUs' 630 * backtraces. 631 */ 632 if (should_fadump_crash()) 633 return; 634 635 if (stopped) 636 return; 637 638 stopped = true; 639 640 #ifdef CONFIG_KEXEC_CORE 641 if (kexec_crash_image) { 642 crash_kexec_prepare(); 643 return; 644 } 645 #endif 646 647 smp_send_stop(); 648 } 649 650 #ifdef CONFIG_NMI_IPI 651 static void nmi_stop_this_cpu(struct pt_regs *regs) 652 { 653 /* 654 * IRQs are already hard disabled by the smp_handle_nmi_ipi. 655 */ 656 set_cpu_online(smp_processor_id(), false); 657 658 spin_begin(); 659 while (1) 660 spin_cpu_relax(); 661 } 662 663 void smp_send_stop(void) 664 { 665 smp_send_nmi_ipi(NMI_IPI_ALL_OTHERS, nmi_stop_this_cpu, 1000000); 666 } 667 668 #else /* CONFIG_NMI_IPI */ 669 670 static void stop_this_cpu(void *dummy) 671 { 672 hard_irq_disable(); 673 674 /* 675 * Offlining CPUs in stop_this_cpu can result in scheduler warnings, 676 * (see commit de6e5d38417e), but printk_safe_flush_on_panic() wants 677 * to know other CPUs are offline before it breaks locks to flush 678 * printk buffers, in case we panic()ed while holding the lock. 679 */ 680 set_cpu_online(smp_processor_id(), false); 681 682 spin_begin(); 683 while (1) 684 spin_cpu_relax(); 685 } 686 687 void smp_send_stop(void) 688 { 689 static bool stopped = false; 690 691 /* 692 * Prevent waiting on csd lock from a previous smp_send_stop. 693 * This is racy, but in general callers try to do the right 694 * thing and only fire off one smp_send_stop (e.g., see 695 * kernel/panic.c) 696 */ 697 if (stopped) 698 return; 699 700 stopped = true; 701 702 smp_call_function(stop_this_cpu, NULL, 0); 703 } 704 #endif /* CONFIG_NMI_IPI */ 705 706 static struct task_struct *current_set[NR_CPUS]; 707 708 static void smp_store_cpu_info(int id) 709 { 710 per_cpu(cpu_pvr, id) = mfspr(SPRN_PVR); 711 #ifdef CONFIG_PPC_E500 712 per_cpu(next_tlbcam_idx, id) 713 = (mfspr(SPRN_TLB1CFG) & TLBnCFG_N_ENTRY) - 1; 714 #endif 715 } 716 717 /* 718 * Relationships between CPUs are maintained in a set of per-cpu cpumasks so 719 * rather than just passing around the cpumask we pass around a function that 720 * returns the that cpumask for the given CPU. 721 */ 722 static void set_cpus_related(int i, int j, struct cpumask *(*get_cpumask)(int)) 723 { 724 cpumask_set_cpu(i, get_cpumask(j)); 725 cpumask_set_cpu(j, get_cpumask(i)); 726 } 727 728 #ifdef CONFIG_HOTPLUG_CPU 729 static void set_cpus_unrelated(int i, int j, 730 struct cpumask *(*get_cpumask)(int)) 731 { 732 cpumask_clear_cpu(i, get_cpumask(j)); 733 cpumask_clear_cpu(j, get_cpumask(i)); 734 } 735 #endif 736 737 /* 738 * Extends set_cpus_related. Instead of setting one CPU at a time in 739 * dstmask, set srcmask at oneshot. dstmask should be super set of srcmask. 740 */ 741 static void or_cpumasks_related(int i, int j, struct cpumask *(*srcmask)(int), 742 struct cpumask *(*dstmask)(int)) 743 { 744 struct cpumask *mask; 745 int k; 746 747 mask = srcmask(j); 748 for_each_cpu(k, srcmask(i)) 749 cpumask_or(dstmask(k), dstmask(k), mask); 750 751 if (i == j) 752 return; 753 754 mask = srcmask(i); 755 for_each_cpu(k, srcmask(j)) 756 cpumask_or(dstmask(k), dstmask(k), mask); 757 } 758 759 /* 760 * parse_thread_groups: Parses the "ibm,thread-groups" device tree 761 * property for the CPU device node @dn and stores 762 * the parsed output in the thread_groups_list 763 * structure @tglp. 764 * 765 * @dn: The device node of the CPU device. 766 * @tglp: Pointer to a thread group list structure into which the parsed 767 * output of "ibm,thread-groups" is stored. 768 * 769 * ibm,thread-groups[0..N-1] array defines which group of threads in 770 * the CPU-device node can be grouped together based on the property. 771 * 772 * This array can represent thread groupings for multiple properties. 773 * 774 * ibm,thread-groups[i + 0] tells us the property based on which the 775 * threads are being grouped together. If this value is 1, it implies 776 * that the threads in the same group share L1, translation cache. If 777 * the value is 2, it implies that the threads in the same group share 778 * the same L2 cache. 779 * 780 * ibm,thread-groups[i+1] tells us how many such thread groups exist for the 781 * property ibm,thread-groups[i] 782 * 783 * ibm,thread-groups[i+2] tells us the number of threads in each such 784 * group. 785 * Suppose k = (ibm,thread-groups[i+1] * ibm,thread-groups[i+2]), then, 786 * 787 * ibm,thread-groups[i+3..i+k+2] (is the list of threads identified by 788 * "ibm,ppc-interrupt-server#s" arranged as per their membership in 789 * the grouping. 790 * 791 * Example: 792 * If "ibm,thread-groups" = [1,2,4,8,10,12,14,9,11,13,15,2,2,4,8,10,12,14,9,11,13,15] 793 * This can be decomposed up into two consecutive arrays: 794 * a) [1,2,4,8,10,12,14,9,11,13,15] 795 * b) [2,2,4,8,10,12,14,9,11,13,15] 796 * 797 * where in, 798 * 799 * a) provides information of Property "1" being shared by "2" groups, 800 * each with "4" threads each. The "ibm,ppc-interrupt-server#s" of 801 * the first group is {8,10,12,14} and the 802 * "ibm,ppc-interrupt-server#s" of the second group is 803 * {9,11,13,15}. Property "1" is indicative of the thread in the 804 * group sharing L1 cache, translation cache and Instruction Data 805 * flow. 806 * 807 * b) provides information of Property "2" being shared by "2" groups, 808 * each group with "4" threads. The "ibm,ppc-interrupt-server#s" of 809 * the first group is {8,10,12,14} and the 810 * "ibm,ppc-interrupt-server#s" of the second group is 811 * {9,11,13,15}. Property "2" indicates that the threads in each 812 * group share the L2-cache. 813 * 814 * Returns 0 on success, -EINVAL if the property does not exist, 815 * -ENODATA if property does not have a value, and -EOVERFLOW if the 816 * property data isn't large enough. 817 */ 818 static int parse_thread_groups(struct device_node *dn, 819 struct thread_groups_list *tglp) 820 { 821 unsigned int property_idx = 0; 822 u32 *thread_group_array; 823 size_t total_threads; 824 int ret = 0, count; 825 u32 *thread_list; 826 int i = 0; 827 828 count = of_property_count_u32_elems(dn, "ibm,thread-groups"); 829 thread_group_array = kcalloc(count, sizeof(u32), GFP_KERNEL); 830 ret = of_property_read_u32_array(dn, "ibm,thread-groups", 831 thread_group_array, count); 832 if (ret) 833 goto out_free; 834 835 while (i < count && property_idx < MAX_THREAD_GROUP_PROPERTIES) { 836 int j; 837 struct thread_groups *tg = &tglp->property_tgs[property_idx++]; 838 839 tg->property = thread_group_array[i]; 840 tg->nr_groups = thread_group_array[i + 1]; 841 tg->threads_per_group = thread_group_array[i + 2]; 842 total_threads = tg->nr_groups * tg->threads_per_group; 843 844 thread_list = &thread_group_array[i + 3]; 845 846 for (j = 0; j < total_threads; j++) 847 tg->thread_list[j] = thread_list[j]; 848 i = i + 3 + total_threads; 849 } 850 851 tglp->nr_properties = property_idx; 852 853 out_free: 854 kfree(thread_group_array); 855 return ret; 856 } 857 858 /* 859 * get_cpu_thread_group_start : Searches the thread group in tg->thread_list 860 * that @cpu belongs to. 861 * 862 * @cpu : The logical CPU whose thread group is being searched. 863 * @tg : The thread-group structure of the CPU node which @cpu belongs 864 * to. 865 * 866 * Returns the index to tg->thread_list that points to the start 867 * of the thread_group that @cpu belongs to. 868 * 869 * Returns -1 if cpu doesn't belong to any of the groups pointed to by 870 * tg->thread_list. 871 */ 872 static int get_cpu_thread_group_start(int cpu, struct thread_groups *tg) 873 { 874 int hw_cpu_id = get_hard_smp_processor_id(cpu); 875 int i, j; 876 877 for (i = 0; i < tg->nr_groups; i++) { 878 int group_start = i * tg->threads_per_group; 879 880 for (j = 0; j < tg->threads_per_group; j++) { 881 int idx = group_start + j; 882 883 if (tg->thread_list[idx] == hw_cpu_id) 884 return group_start; 885 } 886 } 887 888 return -1; 889 } 890 891 static struct thread_groups *__init get_thread_groups(int cpu, 892 int group_property, 893 int *err) 894 { 895 struct device_node *dn = of_get_cpu_node(cpu, NULL); 896 struct thread_groups_list *cpu_tgl = &tgl[cpu]; 897 struct thread_groups *tg = NULL; 898 int i; 899 *err = 0; 900 901 if (!dn) { 902 *err = -ENODATA; 903 return NULL; 904 } 905 906 if (!cpu_tgl->nr_properties) { 907 *err = parse_thread_groups(dn, cpu_tgl); 908 if (*err) 909 goto out; 910 } 911 912 for (i = 0; i < cpu_tgl->nr_properties; i++) { 913 if (cpu_tgl->property_tgs[i].property == group_property) { 914 tg = &cpu_tgl->property_tgs[i]; 915 break; 916 } 917 } 918 919 if (!tg) 920 *err = -EINVAL; 921 out: 922 of_node_put(dn); 923 return tg; 924 } 925 926 static int __init update_mask_from_threadgroup(cpumask_var_t *mask, struct thread_groups *tg, 927 int cpu, int cpu_group_start) 928 { 929 int first_thread = cpu_first_thread_sibling(cpu); 930 int i; 931 932 zalloc_cpumask_var_node(mask, GFP_KERNEL, cpu_to_node(cpu)); 933 934 for (i = first_thread; i < first_thread + threads_per_core; i++) { 935 int i_group_start = get_cpu_thread_group_start(i, tg); 936 937 if (unlikely(i_group_start == -1)) { 938 WARN_ON_ONCE(1); 939 return -ENODATA; 940 } 941 942 if (i_group_start == cpu_group_start) 943 cpumask_set_cpu(i, *mask); 944 } 945 946 return 0; 947 } 948 949 static int __init init_thread_group_cache_map(int cpu, int cache_property) 950 951 { 952 int cpu_group_start = -1, err = 0; 953 struct thread_groups *tg = NULL; 954 cpumask_var_t *mask = NULL; 955 956 if (cache_property != THREAD_GROUP_SHARE_L1 && 957 cache_property != THREAD_GROUP_SHARE_L2_L3) 958 return -EINVAL; 959 960 tg = get_thread_groups(cpu, cache_property, &err); 961 962 if (!tg) 963 return err; 964 965 cpu_group_start = get_cpu_thread_group_start(cpu, tg); 966 967 if (unlikely(cpu_group_start == -1)) { 968 WARN_ON_ONCE(1); 969 return -ENODATA; 970 } 971 972 if (cache_property == THREAD_GROUP_SHARE_L1) { 973 mask = &per_cpu(thread_group_l1_cache_map, cpu); 974 update_mask_from_threadgroup(mask, tg, cpu, cpu_group_start); 975 } 976 else if (cache_property == THREAD_GROUP_SHARE_L2_L3) { 977 mask = &per_cpu(thread_group_l2_cache_map, cpu); 978 update_mask_from_threadgroup(mask, tg, cpu, cpu_group_start); 979 mask = &per_cpu(thread_group_l3_cache_map, cpu); 980 update_mask_from_threadgroup(mask, tg, cpu, cpu_group_start); 981 } 982 983 984 return 0; 985 } 986 987 static bool shared_caches; 988 989 #ifdef CONFIG_SCHED_SMT 990 /* cpumask of CPUs with asymmetric SMT dependency */ 991 static int powerpc_smt_flags(void) 992 { 993 int flags = SD_SHARE_CPUCAPACITY | SD_SHARE_PKG_RESOURCES; 994 995 if (cpu_has_feature(CPU_FTR_ASYM_SMT)) { 996 printk_once(KERN_INFO "Enabling Asymmetric SMT scheduling\n"); 997 flags |= SD_ASYM_PACKING; 998 } 999 return flags; 1000 } 1001 #endif 1002 1003 /* 1004 * P9 has a slightly odd architecture where pairs of cores share an L2 cache. 1005 * This topology makes it *much* cheaper to migrate tasks between adjacent cores 1006 * since the migrated task remains cache hot. We want to take advantage of this 1007 * at the scheduler level so an extra topology level is required. 1008 */ 1009 static int powerpc_shared_cache_flags(void) 1010 { 1011 return SD_SHARE_PKG_RESOURCES; 1012 } 1013 1014 /* 1015 * We can't just pass cpu_l2_cache_mask() directly because 1016 * returns a non-const pointer and the compiler barfs on that. 1017 */ 1018 static const struct cpumask *shared_cache_mask(int cpu) 1019 { 1020 return per_cpu(cpu_l2_cache_map, cpu); 1021 } 1022 1023 #ifdef CONFIG_SCHED_SMT 1024 static const struct cpumask *smallcore_smt_mask(int cpu) 1025 { 1026 return cpu_smallcore_mask(cpu); 1027 } 1028 #endif 1029 1030 static struct cpumask *cpu_coregroup_mask(int cpu) 1031 { 1032 return per_cpu(cpu_coregroup_map, cpu); 1033 } 1034 1035 static bool has_coregroup_support(void) 1036 { 1037 return coregroup_enabled; 1038 } 1039 1040 static const struct cpumask *cpu_mc_mask(int cpu) 1041 { 1042 return cpu_coregroup_mask(cpu); 1043 } 1044 1045 static struct sched_domain_topology_level powerpc_topology[] = { 1046 #ifdef CONFIG_SCHED_SMT 1047 { cpu_smt_mask, powerpc_smt_flags, SD_INIT_NAME(SMT) }, 1048 #endif 1049 { shared_cache_mask, powerpc_shared_cache_flags, SD_INIT_NAME(CACHE) }, 1050 { cpu_mc_mask, SD_INIT_NAME(MC) }, 1051 { cpu_cpu_mask, SD_INIT_NAME(DIE) }, 1052 { NULL, }, 1053 }; 1054 1055 static int __init init_big_cores(void) 1056 { 1057 int cpu; 1058 1059 for_each_possible_cpu(cpu) { 1060 int err = init_thread_group_cache_map(cpu, THREAD_GROUP_SHARE_L1); 1061 1062 if (err) 1063 return err; 1064 1065 zalloc_cpumask_var_node(&per_cpu(cpu_smallcore_map, cpu), 1066 GFP_KERNEL, 1067 cpu_to_node(cpu)); 1068 } 1069 1070 has_big_cores = true; 1071 1072 for_each_possible_cpu(cpu) { 1073 int err = init_thread_group_cache_map(cpu, THREAD_GROUP_SHARE_L2_L3); 1074 1075 if (err) 1076 return err; 1077 } 1078 1079 thread_group_shares_l2 = true; 1080 thread_group_shares_l3 = true; 1081 pr_debug("L2/L3 cache only shared by the threads in the small core\n"); 1082 1083 return 0; 1084 } 1085 1086 void __init smp_prepare_cpus(unsigned int max_cpus) 1087 { 1088 unsigned int cpu; 1089 1090 DBG("smp_prepare_cpus\n"); 1091 1092 /* 1093 * setup_cpu may need to be called on the boot cpu. We haven't 1094 * spun any cpus up but lets be paranoid. 1095 */ 1096 BUG_ON(boot_cpuid != smp_processor_id()); 1097 1098 /* Fixup boot cpu */ 1099 smp_store_cpu_info(boot_cpuid); 1100 cpu_callin_map[boot_cpuid] = 1; 1101 1102 for_each_possible_cpu(cpu) { 1103 zalloc_cpumask_var_node(&per_cpu(cpu_sibling_map, cpu), 1104 GFP_KERNEL, cpu_to_node(cpu)); 1105 zalloc_cpumask_var_node(&per_cpu(cpu_l2_cache_map, cpu), 1106 GFP_KERNEL, cpu_to_node(cpu)); 1107 zalloc_cpumask_var_node(&per_cpu(cpu_core_map, cpu), 1108 GFP_KERNEL, cpu_to_node(cpu)); 1109 if (has_coregroup_support()) 1110 zalloc_cpumask_var_node(&per_cpu(cpu_coregroup_map, cpu), 1111 GFP_KERNEL, cpu_to_node(cpu)); 1112 1113 #ifdef CONFIG_NUMA 1114 /* 1115 * numa_node_id() works after this. 1116 */ 1117 if (cpu_present(cpu)) { 1118 set_cpu_numa_node(cpu, numa_cpu_lookup_table[cpu]); 1119 set_cpu_numa_mem(cpu, 1120 local_memory_node(numa_cpu_lookup_table[cpu])); 1121 } 1122 #endif 1123 } 1124 1125 /* Init the cpumasks so the boot CPU is related to itself */ 1126 cpumask_set_cpu(boot_cpuid, cpu_sibling_mask(boot_cpuid)); 1127 cpumask_set_cpu(boot_cpuid, cpu_l2_cache_mask(boot_cpuid)); 1128 cpumask_set_cpu(boot_cpuid, cpu_core_mask(boot_cpuid)); 1129 1130 if (has_coregroup_support()) 1131 cpumask_set_cpu(boot_cpuid, cpu_coregroup_mask(boot_cpuid)); 1132 1133 init_big_cores(); 1134 if (has_big_cores) { 1135 cpumask_set_cpu(boot_cpuid, 1136 cpu_smallcore_mask(boot_cpuid)); 1137 } 1138 1139 if (cpu_to_chip_id(boot_cpuid) != -1) { 1140 int idx = DIV_ROUND_UP(num_possible_cpus(), threads_per_core); 1141 1142 /* 1143 * All threads of a core will all belong to the same core, 1144 * chip_id_lookup_table will have one entry per core. 1145 * Assumption: if boot_cpuid doesn't have a chip-id, then no 1146 * other CPUs, will also not have chip-id. 1147 */ 1148 chip_id_lookup_table = kcalloc(idx, sizeof(int), GFP_KERNEL); 1149 if (chip_id_lookup_table) 1150 memset(chip_id_lookup_table, -1, sizeof(int) * idx); 1151 } 1152 1153 if (smp_ops && smp_ops->probe) 1154 smp_ops->probe(); 1155 } 1156 1157 void smp_prepare_boot_cpu(void) 1158 { 1159 BUG_ON(smp_processor_id() != boot_cpuid); 1160 #ifdef CONFIG_PPC64 1161 paca_ptrs[boot_cpuid]->__current = current; 1162 #endif 1163 set_numa_node(numa_cpu_lookup_table[boot_cpuid]); 1164 current_set[boot_cpuid] = current; 1165 } 1166 1167 #ifdef CONFIG_HOTPLUG_CPU 1168 1169 int generic_cpu_disable(void) 1170 { 1171 unsigned int cpu = smp_processor_id(); 1172 1173 if (cpu == boot_cpuid) 1174 return -EBUSY; 1175 1176 set_cpu_online(cpu, false); 1177 #ifdef CONFIG_PPC64 1178 vdso_data->processorCount--; 1179 #endif 1180 /* Update affinity of all IRQs previously aimed at this CPU */ 1181 irq_migrate_all_off_this_cpu(); 1182 1183 /* 1184 * Depending on the details of the interrupt controller, it's possible 1185 * that one of the interrupts we just migrated away from this CPU is 1186 * actually already pending on this CPU. If we leave it in that state 1187 * the interrupt will never be EOI'ed, and will never fire again. So 1188 * temporarily enable interrupts here, to allow any pending interrupt to 1189 * be received (and EOI'ed), before we take this CPU offline. 1190 */ 1191 local_irq_enable(); 1192 mdelay(1); 1193 local_irq_disable(); 1194 1195 return 0; 1196 } 1197 1198 void generic_cpu_die(unsigned int cpu) 1199 { 1200 int i; 1201 1202 for (i = 0; i < 100; i++) { 1203 smp_rmb(); 1204 if (is_cpu_dead(cpu)) 1205 return; 1206 msleep(100); 1207 } 1208 printk(KERN_ERR "CPU%d didn't die...\n", cpu); 1209 } 1210 1211 void generic_set_cpu_dead(unsigned int cpu) 1212 { 1213 per_cpu(cpu_state, cpu) = CPU_DEAD; 1214 } 1215 1216 /* 1217 * The cpu_state should be set to CPU_UP_PREPARE in kick_cpu(), otherwise 1218 * the cpu_state is always CPU_DEAD after calling generic_set_cpu_dead(), 1219 * which makes the delay in generic_cpu_die() not happen. 1220 */ 1221 void generic_set_cpu_up(unsigned int cpu) 1222 { 1223 per_cpu(cpu_state, cpu) = CPU_UP_PREPARE; 1224 } 1225 1226 int generic_check_cpu_restart(unsigned int cpu) 1227 { 1228 return per_cpu(cpu_state, cpu) == CPU_UP_PREPARE; 1229 } 1230 1231 int is_cpu_dead(unsigned int cpu) 1232 { 1233 return per_cpu(cpu_state, cpu) == CPU_DEAD; 1234 } 1235 1236 static bool secondaries_inhibited(void) 1237 { 1238 return kvm_hv_mode_active(); 1239 } 1240 1241 #else /* HOTPLUG_CPU */ 1242 1243 #define secondaries_inhibited() 0 1244 1245 #endif 1246 1247 static void cpu_idle_thread_init(unsigned int cpu, struct task_struct *idle) 1248 { 1249 #ifdef CONFIG_PPC64 1250 paca_ptrs[cpu]->__current = idle; 1251 paca_ptrs[cpu]->kstack = (unsigned long)task_stack_page(idle) + 1252 THREAD_SIZE - STACK_FRAME_OVERHEAD; 1253 #endif 1254 task_thread_info(idle)->cpu = cpu; 1255 secondary_current = current_set[cpu] = idle; 1256 } 1257 1258 int __cpu_up(unsigned int cpu, struct task_struct *tidle) 1259 { 1260 const unsigned long boot_spin_ms = 5 * MSEC_PER_SEC; 1261 const bool booting = system_state < SYSTEM_RUNNING; 1262 const unsigned long hp_spin_ms = 1; 1263 unsigned long deadline; 1264 int rc; 1265 const unsigned long spin_wait_ms = booting ? boot_spin_ms : hp_spin_ms; 1266 1267 /* 1268 * Don't allow secondary threads to come online if inhibited 1269 */ 1270 if (threads_per_core > 1 && secondaries_inhibited() && 1271 cpu_thread_in_subcore(cpu)) 1272 return -EBUSY; 1273 1274 if (smp_ops == NULL || 1275 (smp_ops->cpu_bootable && !smp_ops->cpu_bootable(cpu))) 1276 return -EINVAL; 1277 1278 cpu_idle_thread_init(cpu, tidle); 1279 1280 /* 1281 * The platform might need to allocate resources prior to bringing 1282 * up the CPU 1283 */ 1284 if (smp_ops->prepare_cpu) { 1285 rc = smp_ops->prepare_cpu(cpu); 1286 if (rc) 1287 return rc; 1288 } 1289 1290 /* Make sure callin-map entry is 0 (can be leftover a CPU 1291 * hotplug 1292 */ 1293 cpu_callin_map[cpu] = 0; 1294 1295 /* The information for processor bringup must 1296 * be written out to main store before we release 1297 * the processor. 1298 */ 1299 smp_mb(); 1300 1301 /* wake up cpus */ 1302 DBG("smp: kicking cpu %d\n", cpu); 1303 rc = smp_ops->kick_cpu(cpu); 1304 if (rc) { 1305 pr_err("smp: failed starting cpu %d (rc %d)\n", cpu, rc); 1306 return rc; 1307 } 1308 1309 /* 1310 * At boot time, simply spin on the callin word until the 1311 * deadline passes. 1312 * 1313 * At run time, spin for an optimistic amount of time to avoid 1314 * sleeping in the common case. 1315 */ 1316 deadline = jiffies + msecs_to_jiffies(spin_wait_ms); 1317 spin_until_cond(cpu_callin_map[cpu] || time_is_before_jiffies(deadline)); 1318 1319 if (!cpu_callin_map[cpu] && system_state >= SYSTEM_RUNNING) { 1320 const unsigned long sleep_interval_us = 10 * USEC_PER_MSEC; 1321 const unsigned long sleep_wait_ms = 100 * MSEC_PER_SEC; 1322 1323 deadline = jiffies + msecs_to_jiffies(sleep_wait_ms); 1324 while (!cpu_callin_map[cpu] && time_is_after_jiffies(deadline)) 1325 fsleep(sleep_interval_us); 1326 } 1327 1328 if (!cpu_callin_map[cpu]) { 1329 printk(KERN_ERR "Processor %u is stuck.\n", cpu); 1330 return -ENOENT; 1331 } 1332 1333 DBG("Processor %u found.\n", cpu); 1334 1335 if (smp_ops->give_timebase) 1336 smp_ops->give_timebase(); 1337 1338 /* Wait until cpu puts itself in the online & active maps */ 1339 spin_until_cond(cpu_online(cpu)); 1340 1341 return 0; 1342 } 1343 1344 /* Return the value of the reg property corresponding to the given 1345 * logical cpu. 1346 */ 1347 int cpu_to_core_id(int cpu) 1348 { 1349 struct device_node *np; 1350 int id = -1; 1351 1352 np = of_get_cpu_node(cpu, NULL); 1353 if (!np) 1354 goto out; 1355 1356 id = of_get_cpu_hwid(np, 0); 1357 out: 1358 of_node_put(np); 1359 return id; 1360 } 1361 EXPORT_SYMBOL_GPL(cpu_to_core_id); 1362 1363 /* Helper routines for cpu to core mapping */ 1364 int cpu_core_index_of_thread(int cpu) 1365 { 1366 return cpu >> threads_shift; 1367 } 1368 EXPORT_SYMBOL_GPL(cpu_core_index_of_thread); 1369 1370 int cpu_first_thread_of_core(int core) 1371 { 1372 return core << threads_shift; 1373 } 1374 EXPORT_SYMBOL_GPL(cpu_first_thread_of_core); 1375 1376 /* Must be called when no change can occur to cpu_present_mask, 1377 * i.e. during cpu online or offline. 1378 */ 1379 static struct device_node *cpu_to_l2cache(int cpu) 1380 { 1381 struct device_node *np; 1382 struct device_node *cache; 1383 1384 if (!cpu_present(cpu)) 1385 return NULL; 1386 1387 np = of_get_cpu_node(cpu, NULL); 1388 if (np == NULL) 1389 return NULL; 1390 1391 cache = of_find_next_cache_node(np); 1392 1393 of_node_put(np); 1394 1395 return cache; 1396 } 1397 1398 static bool update_mask_by_l2(int cpu, cpumask_var_t *mask) 1399 { 1400 struct cpumask *(*submask_fn)(int) = cpu_sibling_mask; 1401 struct device_node *l2_cache, *np; 1402 int i; 1403 1404 if (has_big_cores) 1405 submask_fn = cpu_smallcore_mask; 1406 1407 /* 1408 * If the threads in a thread-group share L2 cache, then the 1409 * L2-mask can be obtained from thread_group_l2_cache_map. 1410 */ 1411 if (thread_group_shares_l2) { 1412 cpumask_set_cpu(cpu, cpu_l2_cache_mask(cpu)); 1413 1414 for_each_cpu(i, per_cpu(thread_group_l2_cache_map, cpu)) { 1415 if (cpu_online(i)) 1416 set_cpus_related(i, cpu, cpu_l2_cache_mask); 1417 } 1418 1419 /* Verify that L1-cache siblings are a subset of L2 cache-siblings */ 1420 if (!cpumask_equal(submask_fn(cpu), cpu_l2_cache_mask(cpu)) && 1421 !cpumask_subset(submask_fn(cpu), cpu_l2_cache_mask(cpu))) { 1422 pr_warn_once("CPU %d : Inconsistent L1 and L2 cache siblings\n", 1423 cpu); 1424 } 1425 1426 return true; 1427 } 1428 1429 l2_cache = cpu_to_l2cache(cpu); 1430 if (!l2_cache || !*mask) { 1431 /* Assume only core siblings share cache with this CPU */ 1432 for_each_cpu(i, cpu_sibling_mask(cpu)) 1433 set_cpus_related(cpu, i, cpu_l2_cache_mask); 1434 1435 return false; 1436 } 1437 1438 cpumask_and(*mask, cpu_online_mask, cpu_cpu_mask(cpu)); 1439 1440 /* Update l2-cache mask with all the CPUs that are part of submask */ 1441 or_cpumasks_related(cpu, cpu, submask_fn, cpu_l2_cache_mask); 1442 1443 /* Skip all CPUs already part of current CPU l2-cache mask */ 1444 cpumask_andnot(*mask, *mask, cpu_l2_cache_mask(cpu)); 1445 1446 for_each_cpu(i, *mask) { 1447 /* 1448 * when updating the marks the current CPU has not been marked 1449 * online, but we need to update the cache masks 1450 */ 1451 np = cpu_to_l2cache(i); 1452 1453 /* Skip all CPUs already part of current CPU l2-cache */ 1454 if (np == l2_cache) { 1455 or_cpumasks_related(cpu, i, submask_fn, cpu_l2_cache_mask); 1456 cpumask_andnot(*mask, *mask, submask_fn(i)); 1457 } else { 1458 cpumask_andnot(*mask, *mask, cpu_l2_cache_mask(i)); 1459 } 1460 1461 of_node_put(np); 1462 } 1463 of_node_put(l2_cache); 1464 1465 return true; 1466 } 1467 1468 #ifdef CONFIG_HOTPLUG_CPU 1469 static void remove_cpu_from_masks(int cpu) 1470 { 1471 struct cpumask *(*mask_fn)(int) = cpu_sibling_mask; 1472 int i; 1473 1474 unmap_cpu_from_node(cpu); 1475 1476 if (shared_caches) 1477 mask_fn = cpu_l2_cache_mask; 1478 1479 for_each_cpu(i, mask_fn(cpu)) { 1480 set_cpus_unrelated(cpu, i, cpu_l2_cache_mask); 1481 set_cpus_unrelated(cpu, i, cpu_sibling_mask); 1482 if (has_big_cores) 1483 set_cpus_unrelated(cpu, i, cpu_smallcore_mask); 1484 } 1485 1486 for_each_cpu(i, cpu_core_mask(cpu)) 1487 set_cpus_unrelated(cpu, i, cpu_core_mask); 1488 1489 if (has_coregroup_support()) { 1490 for_each_cpu(i, cpu_coregroup_mask(cpu)) 1491 set_cpus_unrelated(cpu, i, cpu_coregroup_mask); 1492 } 1493 } 1494 #endif 1495 1496 static inline void add_cpu_to_smallcore_masks(int cpu) 1497 { 1498 int i; 1499 1500 if (!has_big_cores) 1501 return; 1502 1503 cpumask_set_cpu(cpu, cpu_smallcore_mask(cpu)); 1504 1505 for_each_cpu(i, per_cpu(thread_group_l1_cache_map, cpu)) { 1506 if (cpu_online(i)) 1507 set_cpus_related(i, cpu, cpu_smallcore_mask); 1508 } 1509 } 1510 1511 static void update_coregroup_mask(int cpu, cpumask_var_t *mask) 1512 { 1513 struct cpumask *(*submask_fn)(int) = cpu_sibling_mask; 1514 int coregroup_id = cpu_to_coregroup_id(cpu); 1515 int i; 1516 1517 if (shared_caches) 1518 submask_fn = cpu_l2_cache_mask; 1519 1520 if (!*mask) { 1521 /* Assume only siblings are part of this CPU's coregroup */ 1522 for_each_cpu(i, submask_fn(cpu)) 1523 set_cpus_related(cpu, i, cpu_coregroup_mask); 1524 1525 return; 1526 } 1527 1528 cpumask_and(*mask, cpu_online_mask, cpu_cpu_mask(cpu)); 1529 1530 /* Update coregroup mask with all the CPUs that are part of submask */ 1531 or_cpumasks_related(cpu, cpu, submask_fn, cpu_coregroup_mask); 1532 1533 /* Skip all CPUs already part of coregroup mask */ 1534 cpumask_andnot(*mask, *mask, cpu_coregroup_mask(cpu)); 1535 1536 for_each_cpu(i, *mask) { 1537 /* Skip all CPUs not part of this coregroup */ 1538 if (coregroup_id == cpu_to_coregroup_id(i)) { 1539 or_cpumasks_related(cpu, i, submask_fn, cpu_coregroup_mask); 1540 cpumask_andnot(*mask, *mask, submask_fn(i)); 1541 } else { 1542 cpumask_andnot(*mask, *mask, cpu_coregroup_mask(i)); 1543 } 1544 } 1545 } 1546 1547 static void add_cpu_to_masks(int cpu) 1548 { 1549 struct cpumask *(*submask_fn)(int) = cpu_sibling_mask; 1550 int first_thread = cpu_first_thread_sibling(cpu); 1551 cpumask_var_t mask; 1552 int chip_id = -1; 1553 bool ret; 1554 int i; 1555 1556 /* 1557 * This CPU will not be in the online mask yet so we need to manually 1558 * add it to it's own thread sibling mask. 1559 */ 1560 map_cpu_to_node(cpu, cpu_to_node(cpu)); 1561 cpumask_set_cpu(cpu, cpu_sibling_mask(cpu)); 1562 cpumask_set_cpu(cpu, cpu_core_mask(cpu)); 1563 1564 for (i = first_thread; i < first_thread + threads_per_core; i++) 1565 if (cpu_online(i)) 1566 set_cpus_related(i, cpu, cpu_sibling_mask); 1567 1568 add_cpu_to_smallcore_masks(cpu); 1569 1570 /* In CPU-hotplug path, hence use GFP_ATOMIC */ 1571 ret = alloc_cpumask_var_node(&mask, GFP_ATOMIC, cpu_to_node(cpu)); 1572 update_mask_by_l2(cpu, &mask); 1573 1574 if (has_coregroup_support()) 1575 update_coregroup_mask(cpu, &mask); 1576 1577 if (chip_id_lookup_table && ret) 1578 chip_id = cpu_to_chip_id(cpu); 1579 1580 if (shared_caches) 1581 submask_fn = cpu_l2_cache_mask; 1582 1583 /* Update core_mask with all the CPUs that are part of submask */ 1584 or_cpumasks_related(cpu, cpu, submask_fn, cpu_core_mask); 1585 1586 /* Skip all CPUs already part of current CPU core mask */ 1587 cpumask_andnot(mask, cpu_online_mask, cpu_core_mask(cpu)); 1588 1589 /* If chip_id is -1; limit the cpu_core_mask to within DIE*/ 1590 if (chip_id == -1) 1591 cpumask_and(mask, mask, cpu_cpu_mask(cpu)); 1592 1593 for_each_cpu(i, mask) { 1594 if (chip_id == cpu_to_chip_id(i)) { 1595 or_cpumasks_related(cpu, i, submask_fn, cpu_core_mask); 1596 cpumask_andnot(mask, mask, submask_fn(i)); 1597 } else { 1598 cpumask_andnot(mask, mask, cpu_core_mask(i)); 1599 } 1600 } 1601 1602 free_cpumask_var(mask); 1603 } 1604 1605 /* Activate a secondary processor. */ 1606 void start_secondary(void *unused) 1607 { 1608 unsigned int cpu = raw_smp_processor_id(); 1609 1610 /* PPC64 calls setup_kup() in early_setup_secondary() */ 1611 if (IS_ENABLED(CONFIG_PPC32)) 1612 setup_kup(); 1613 1614 mmgrab(&init_mm); 1615 current->active_mm = &init_mm; 1616 1617 smp_store_cpu_info(cpu); 1618 set_dec(tb_ticks_per_jiffy); 1619 rcu_cpu_starting(cpu); 1620 cpu_callin_map[cpu] = 1; 1621 1622 if (smp_ops->setup_cpu) 1623 smp_ops->setup_cpu(cpu); 1624 if (smp_ops->take_timebase) 1625 smp_ops->take_timebase(); 1626 1627 secondary_cpu_time_init(); 1628 1629 #ifdef CONFIG_PPC64 1630 if (system_state == SYSTEM_RUNNING) 1631 vdso_data->processorCount++; 1632 1633 vdso_getcpu_init(); 1634 #endif 1635 set_numa_node(numa_cpu_lookup_table[cpu]); 1636 set_numa_mem(local_memory_node(numa_cpu_lookup_table[cpu])); 1637 1638 /* Update topology CPU masks */ 1639 add_cpu_to_masks(cpu); 1640 1641 /* 1642 * Check for any shared caches. Note that this must be done on a 1643 * per-core basis because one core in the pair might be disabled. 1644 */ 1645 if (!shared_caches) { 1646 struct cpumask *(*sibling_mask)(int) = cpu_sibling_mask; 1647 struct cpumask *mask = cpu_l2_cache_mask(cpu); 1648 1649 if (has_big_cores) 1650 sibling_mask = cpu_smallcore_mask; 1651 1652 if (cpumask_weight(mask) > cpumask_weight(sibling_mask(cpu))) 1653 shared_caches = true; 1654 } 1655 1656 smp_wmb(); 1657 notify_cpu_starting(cpu); 1658 set_cpu_online(cpu, true); 1659 1660 boot_init_stack_canary(); 1661 1662 local_irq_enable(); 1663 1664 /* We can enable ftrace for secondary cpus now */ 1665 this_cpu_enable_ftrace(); 1666 1667 cpu_startup_entry(CPUHP_AP_ONLINE_IDLE); 1668 1669 BUG(); 1670 } 1671 1672 static void __init fixup_topology(void) 1673 { 1674 int i; 1675 1676 #ifdef CONFIG_SCHED_SMT 1677 if (has_big_cores) { 1678 pr_info("Big cores detected but using small core scheduling\n"); 1679 powerpc_topology[smt_idx].mask = smallcore_smt_mask; 1680 } 1681 #endif 1682 1683 if (!has_coregroup_support()) 1684 powerpc_topology[mc_idx].mask = powerpc_topology[cache_idx].mask; 1685 1686 /* 1687 * Try to consolidate topology levels here instead of 1688 * allowing scheduler to degenerate. 1689 * - Dont consolidate if masks are different. 1690 * - Dont consolidate if sd_flags exists and are different. 1691 */ 1692 for (i = 1; i <= die_idx; i++) { 1693 if (powerpc_topology[i].mask != powerpc_topology[i - 1].mask) 1694 continue; 1695 1696 if (powerpc_topology[i].sd_flags && powerpc_topology[i - 1].sd_flags && 1697 powerpc_topology[i].sd_flags != powerpc_topology[i - 1].sd_flags) 1698 continue; 1699 1700 if (!powerpc_topology[i - 1].sd_flags) 1701 powerpc_topology[i - 1].sd_flags = powerpc_topology[i].sd_flags; 1702 1703 powerpc_topology[i].mask = powerpc_topology[i + 1].mask; 1704 powerpc_topology[i].sd_flags = powerpc_topology[i + 1].sd_flags; 1705 #ifdef CONFIG_SCHED_DEBUG 1706 powerpc_topology[i].name = powerpc_topology[i + 1].name; 1707 #endif 1708 } 1709 } 1710 1711 void __init smp_cpus_done(unsigned int max_cpus) 1712 { 1713 /* 1714 * We are running pinned to the boot CPU, see rest_init(). 1715 */ 1716 if (smp_ops && smp_ops->setup_cpu) 1717 smp_ops->setup_cpu(boot_cpuid); 1718 1719 if (smp_ops && smp_ops->bringup_done) 1720 smp_ops->bringup_done(); 1721 1722 dump_numa_cpu_topology(); 1723 1724 fixup_topology(); 1725 set_sched_topology(powerpc_topology); 1726 } 1727 1728 #ifdef CONFIG_HOTPLUG_CPU 1729 int __cpu_disable(void) 1730 { 1731 int cpu = smp_processor_id(); 1732 int err; 1733 1734 if (!smp_ops->cpu_disable) 1735 return -ENOSYS; 1736 1737 this_cpu_disable_ftrace(); 1738 1739 err = smp_ops->cpu_disable(); 1740 if (err) 1741 return err; 1742 1743 /* Update sibling maps */ 1744 remove_cpu_from_masks(cpu); 1745 1746 return 0; 1747 } 1748 1749 void __cpu_die(unsigned int cpu) 1750 { 1751 if (smp_ops->cpu_die) 1752 smp_ops->cpu_die(cpu); 1753 } 1754 1755 void arch_cpu_idle_dead(void) 1756 { 1757 /* 1758 * Disable on the down path. This will be re-enabled by 1759 * start_secondary() via start_secondary_resume() below 1760 */ 1761 this_cpu_disable_ftrace(); 1762 1763 if (smp_ops->cpu_offline_self) 1764 smp_ops->cpu_offline_self(); 1765 1766 /* If we return, we re-enter start_secondary */ 1767 start_secondary_resume(); 1768 } 1769 1770 #endif 1771