1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * SMP initialisation and IPI support 4 * Based on arch/arm/kernel/smp.c 5 * 6 * Copyright (C) 2012 ARM Ltd. 7 */ 8 9 #include <linux/acpi.h> 10 #include <linux/arm_sdei.h> 11 #include <linux/delay.h> 12 #include <linux/init.h> 13 #include <linux/spinlock.h> 14 #include <linux/sched/mm.h> 15 #include <linux/sched/hotplug.h> 16 #include <linux/sched/task_stack.h> 17 #include <linux/interrupt.h> 18 #include <linux/cache.h> 19 #include <linux/profile.h> 20 #include <linux/errno.h> 21 #include <linux/mm.h> 22 #include <linux/err.h> 23 #include <linux/cpu.h> 24 #include <linux/smp.h> 25 #include <linux/seq_file.h> 26 #include <linux/irq.h> 27 #include <linux/irqchip/arm-gic-v3.h> 28 #include <linux/percpu.h> 29 #include <linux/clockchips.h> 30 #include <linux/completion.h> 31 #include <linux/of.h> 32 #include <linux/irq_work.h> 33 #include <linux/kexec.h> 34 #include <linux/kvm_host.h> 35 36 #include <asm/alternative.h> 37 #include <asm/atomic.h> 38 #include <asm/cacheflush.h> 39 #include <asm/cpu.h> 40 #include <asm/cputype.h> 41 #include <asm/cpu_ops.h> 42 #include <asm/daifflags.h> 43 #include <asm/kvm_mmu.h> 44 #include <asm/mmu_context.h> 45 #include <asm/numa.h> 46 #include <asm/pgtable.h> 47 #include <asm/pgalloc.h> 48 #include <asm/processor.h> 49 #include <asm/smp_plat.h> 50 #include <asm/sections.h> 51 #include <asm/tlbflush.h> 52 #include <asm/ptrace.h> 53 #include <asm/virt.h> 54 55 #define CREATE_TRACE_POINTS 56 #include <trace/events/ipi.h> 57 58 DEFINE_PER_CPU_READ_MOSTLY(int, cpu_number); 59 EXPORT_PER_CPU_SYMBOL(cpu_number); 60 61 /* 62 * as from 2.5, kernels no longer have an init_tasks structure 63 * so we need some other way of telling a new secondary core 64 * where to place its SVC stack 65 */ 66 struct secondary_data secondary_data; 67 /* Number of CPUs which aren't online, but looping in kernel text. */ 68 int cpus_stuck_in_kernel; 69 70 enum ipi_msg_type { 71 IPI_RESCHEDULE, 72 IPI_CALL_FUNC, 73 IPI_CPU_STOP, 74 IPI_CPU_CRASH_STOP, 75 IPI_TIMER, 76 IPI_IRQ_WORK, 77 IPI_WAKEUP 78 }; 79 80 #ifdef CONFIG_HOTPLUG_CPU 81 static int op_cpu_kill(unsigned int cpu); 82 #else 83 static inline int op_cpu_kill(unsigned int cpu) 84 { 85 return -ENOSYS; 86 } 87 #endif 88 89 90 /* 91 * Boot a secondary CPU, and assign it the specified idle task. 92 * This also gives us the initial stack to use for this CPU. 93 */ 94 static int boot_secondary(unsigned int cpu, struct task_struct *idle) 95 { 96 const struct cpu_operations *ops = get_cpu_ops(cpu); 97 98 if (ops->cpu_boot) 99 return ops->cpu_boot(cpu); 100 101 return -EOPNOTSUPP; 102 } 103 104 static DECLARE_COMPLETION(cpu_running); 105 106 int __cpu_up(unsigned int cpu, struct task_struct *idle) 107 { 108 int ret; 109 long status; 110 111 /* 112 * We need to tell the secondary core where to find its stack and the 113 * page tables. 114 */ 115 secondary_data.task = idle; 116 secondary_data.stack = task_stack_page(idle) + THREAD_SIZE; 117 #if defined(CONFIG_ARM64_PTR_AUTH) 118 secondary_data.ptrauth_key.apia.lo = idle->thread.keys_kernel.apia.lo; 119 secondary_data.ptrauth_key.apia.hi = idle->thread.keys_kernel.apia.hi; 120 #endif 121 update_cpu_boot_status(CPU_MMU_OFF); 122 __flush_dcache_area(&secondary_data, sizeof(secondary_data)); 123 124 /* Now bring the CPU into our world */ 125 ret = boot_secondary(cpu, idle); 126 if (ret) { 127 pr_err("CPU%u: failed to boot: %d\n", cpu, ret); 128 return ret; 129 } 130 131 /* 132 * CPU was successfully started, wait for it to come online or 133 * time out. 134 */ 135 wait_for_completion_timeout(&cpu_running, 136 msecs_to_jiffies(5000)); 137 if (cpu_online(cpu)) 138 return 0; 139 140 pr_crit("CPU%u: failed to come online\n", cpu); 141 secondary_data.task = NULL; 142 secondary_data.stack = NULL; 143 #if defined(CONFIG_ARM64_PTR_AUTH) 144 secondary_data.ptrauth_key.apia.lo = 0; 145 secondary_data.ptrauth_key.apia.hi = 0; 146 #endif 147 __flush_dcache_area(&secondary_data, sizeof(secondary_data)); 148 status = READ_ONCE(secondary_data.status); 149 if (status == CPU_MMU_OFF) 150 status = READ_ONCE(__early_cpu_boot_status); 151 152 switch (status & CPU_BOOT_STATUS_MASK) { 153 default: 154 pr_err("CPU%u: failed in unknown state : 0x%lx\n", 155 cpu, status); 156 cpus_stuck_in_kernel++; 157 break; 158 case CPU_KILL_ME: 159 if (!op_cpu_kill(cpu)) { 160 pr_crit("CPU%u: died during early boot\n", cpu); 161 break; 162 } 163 pr_crit("CPU%u: may not have shut down cleanly\n", cpu); 164 /* Fall through */ 165 case CPU_STUCK_IN_KERNEL: 166 pr_crit("CPU%u: is stuck in kernel\n", cpu); 167 if (status & CPU_STUCK_REASON_52_BIT_VA) 168 pr_crit("CPU%u: does not support 52-bit VAs\n", cpu); 169 if (status & CPU_STUCK_REASON_NO_GRAN) { 170 pr_crit("CPU%u: does not support %luK granule\n", 171 cpu, PAGE_SIZE / SZ_1K); 172 } 173 cpus_stuck_in_kernel++; 174 break; 175 case CPU_PANIC_KERNEL: 176 panic("CPU%u detected unsupported configuration\n", cpu); 177 } 178 179 return ret; 180 } 181 182 static void init_gic_priority_masking(void) 183 { 184 u32 cpuflags; 185 186 if (WARN_ON(!gic_enable_sre())) 187 return; 188 189 cpuflags = read_sysreg(daif); 190 191 WARN_ON(!(cpuflags & PSR_I_BIT)); 192 193 gic_write_pmr(GIC_PRIO_IRQON | GIC_PRIO_PSR_I_SET); 194 } 195 196 /* 197 * This is the secondary CPU boot entry. We're using this CPUs 198 * idle thread stack, but a set of temporary page tables. 199 */ 200 asmlinkage notrace void secondary_start_kernel(void) 201 { 202 u64 mpidr = read_cpuid_mpidr() & MPIDR_HWID_BITMASK; 203 struct mm_struct *mm = &init_mm; 204 const struct cpu_operations *ops; 205 unsigned int cpu; 206 207 cpu = task_cpu(current); 208 set_my_cpu_offset(per_cpu_offset(cpu)); 209 210 /* 211 * All kernel threads share the same mm context; grab a 212 * reference and switch to it. 213 */ 214 mmgrab(mm); 215 current->active_mm = mm; 216 217 /* 218 * TTBR0 is only used for the identity mapping at this stage. Make it 219 * point to zero page to avoid speculatively fetching new entries. 220 */ 221 cpu_uninstall_idmap(); 222 223 if (system_uses_irq_prio_masking()) 224 init_gic_priority_masking(); 225 226 preempt_disable(); 227 trace_hardirqs_off(); 228 229 /* 230 * If the system has established the capabilities, make sure 231 * this CPU ticks all of those. If it doesn't, the CPU will 232 * fail to come online. 233 */ 234 check_local_cpu_capabilities(); 235 236 ops = get_cpu_ops(cpu); 237 if (ops->cpu_postboot) 238 ops->cpu_postboot(); 239 240 /* 241 * Log the CPU info before it is marked online and might get read. 242 */ 243 cpuinfo_store_cpu(); 244 245 /* 246 * Enable GIC and timers. 247 */ 248 notify_cpu_starting(cpu); 249 250 store_cpu_topology(cpu); 251 numa_add_cpu(cpu); 252 253 /* 254 * OK, now it's safe to let the boot CPU continue. Wait for 255 * the CPU migration code to notice that the CPU is online 256 * before we continue. 257 */ 258 pr_info("CPU%u: Booted secondary processor 0x%010lx [0x%08x]\n", 259 cpu, (unsigned long)mpidr, 260 read_cpuid_id()); 261 update_cpu_boot_status(CPU_BOOT_SUCCESS); 262 set_cpu_online(cpu, true); 263 complete(&cpu_running); 264 265 local_daif_restore(DAIF_PROCCTX); 266 267 /* 268 * OK, it's off to the idle thread for us 269 */ 270 cpu_startup_entry(CPUHP_AP_ONLINE_IDLE); 271 } 272 273 #ifdef CONFIG_HOTPLUG_CPU 274 static int op_cpu_disable(unsigned int cpu) 275 { 276 const struct cpu_operations *ops = get_cpu_ops(cpu); 277 278 /* 279 * If we don't have a cpu_die method, abort before we reach the point 280 * of no return. CPU0 may not have an cpu_ops, so test for it. 281 */ 282 if (!ops || !ops->cpu_die) 283 return -EOPNOTSUPP; 284 285 /* 286 * We may need to abort a hot unplug for some other mechanism-specific 287 * reason. 288 */ 289 if (ops->cpu_disable) 290 return ops->cpu_disable(cpu); 291 292 return 0; 293 } 294 295 /* 296 * __cpu_disable runs on the processor to be shutdown. 297 */ 298 int __cpu_disable(void) 299 { 300 unsigned int cpu = smp_processor_id(); 301 int ret; 302 303 ret = op_cpu_disable(cpu); 304 if (ret) 305 return ret; 306 307 remove_cpu_topology(cpu); 308 numa_remove_cpu(cpu); 309 310 /* 311 * Take this CPU offline. Once we clear this, we can't return, 312 * and we must not schedule until we're ready to give up the cpu. 313 */ 314 set_cpu_online(cpu, false); 315 316 /* 317 * OK - migrate IRQs away from this CPU 318 */ 319 irq_migrate_all_off_this_cpu(); 320 321 return 0; 322 } 323 324 static int op_cpu_kill(unsigned int cpu) 325 { 326 const struct cpu_operations *ops = get_cpu_ops(cpu); 327 328 /* 329 * If we have no means of synchronising with the dying CPU, then assume 330 * that it is really dead. We can only wait for an arbitrary length of 331 * time and hope that it's dead, so let's skip the wait and just hope. 332 */ 333 if (!ops->cpu_kill) 334 return 0; 335 336 return ops->cpu_kill(cpu); 337 } 338 339 /* 340 * called on the thread which is asking for a CPU to be shutdown - 341 * waits until shutdown has completed, or it is timed out. 342 */ 343 void __cpu_die(unsigned int cpu) 344 { 345 int err; 346 347 if (!cpu_wait_death(cpu, 5)) { 348 pr_crit("CPU%u: cpu didn't die\n", cpu); 349 return; 350 } 351 pr_notice("CPU%u: shutdown\n", cpu); 352 353 /* 354 * Now that the dying CPU is beyond the point of no return w.r.t. 355 * in-kernel synchronisation, try to get the firwmare to help us to 356 * verify that it has really left the kernel before we consider 357 * clobbering anything it might still be using. 358 */ 359 err = op_cpu_kill(cpu); 360 if (err) 361 pr_warn("CPU%d may not have shut down cleanly: %d\n", cpu, err); 362 } 363 364 /* 365 * Called from the idle thread for the CPU which has been shutdown. 366 * 367 */ 368 void cpu_die(void) 369 { 370 unsigned int cpu = smp_processor_id(); 371 const struct cpu_operations *ops = get_cpu_ops(cpu); 372 373 idle_task_exit(); 374 375 local_daif_mask(); 376 377 /* Tell __cpu_die() that this CPU is now safe to dispose of */ 378 (void)cpu_report_death(); 379 380 /* 381 * Actually shutdown the CPU. This must never fail. The specific hotplug 382 * mechanism must perform all required cache maintenance to ensure that 383 * no dirty lines are lost in the process of shutting down the CPU. 384 */ 385 ops->cpu_die(cpu); 386 387 BUG(); 388 } 389 #endif 390 391 static void __cpu_try_die(int cpu) 392 { 393 #ifdef CONFIG_HOTPLUG_CPU 394 const struct cpu_operations *ops = get_cpu_ops(cpu); 395 396 if (ops && ops->cpu_die) 397 ops->cpu_die(cpu); 398 #endif 399 } 400 401 /* 402 * Kill the calling secondary CPU, early in bringup before it is turned 403 * online. 404 */ 405 void cpu_die_early(void) 406 { 407 int cpu = smp_processor_id(); 408 409 pr_crit("CPU%d: will not boot\n", cpu); 410 411 /* Mark this CPU absent */ 412 set_cpu_present(cpu, 0); 413 414 if (IS_ENABLED(CONFIG_HOTPLUG_CPU)) { 415 update_cpu_boot_status(CPU_KILL_ME); 416 __cpu_try_die(cpu); 417 } 418 419 update_cpu_boot_status(CPU_STUCK_IN_KERNEL); 420 421 cpu_park_loop(); 422 } 423 424 static void __init hyp_mode_check(void) 425 { 426 if (is_hyp_mode_available()) 427 pr_info("CPU: All CPU(s) started at EL2\n"); 428 else if (is_hyp_mode_mismatched()) 429 WARN_TAINT(1, TAINT_CPU_OUT_OF_SPEC, 430 "CPU: CPUs started in inconsistent modes"); 431 else 432 pr_info("CPU: All CPU(s) started at EL1\n"); 433 if (IS_ENABLED(CONFIG_KVM_ARM_HOST)) 434 kvm_compute_layout(); 435 } 436 437 void __init smp_cpus_done(unsigned int max_cpus) 438 { 439 pr_info("SMP: Total of %d processors activated.\n", num_online_cpus()); 440 setup_cpu_features(); 441 hyp_mode_check(); 442 apply_alternatives_all(); 443 mark_linear_text_alias_ro(); 444 } 445 446 void __init smp_prepare_boot_cpu(void) 447 { 448 set_my_cpu_offset(per_cpu_offset(smp_processor_id())); 449 cpuinfo_store_boot_cpu(); 450 451 /* 452 * We now know enough about the boot CPU to apply the 453 * alternatives that cannot wait until interrupt handling 454 * and/or scheduling is enabled. 455 */ 456 apply_boot_alternatives(); 457 458 /* Conditionally switch to GIC PMR for interrupt masking */ 459 if (system_uses_irq_prio_masking()) 460 init_gic_priority_masking(); 461 } 462 463 static u64 __init of_get_cpu_mpidr(struct device_node *dn) 464 { 465 const __be32 *cell; 466 u64 hwid; 467 468 /* 469 * A cpu node with missing "reg" property is 470 * considered invalid to build a cpu_logical_map 471 * entry. 472 */ 473 cell = of_get_property(dn, "reg", NULL); 474 if (!cell) { 475 pr_err("%pOF: missing reg property\n", dn); 476 return INVALID_HWID; 477 } 478 479 hwid = of_read_number(cell, of_n_addr_cells(dn)); 480 /* 481 * Non affinity bits must be set to 0 in the DT 482 */ 483 if (hwid & ~MPIDR_HWID_BITMASK) { 484 pr_err("%pOF: invalid reg property\n", dn); 485 return INVALID_HWID; 486 } 487 return hwid; 488 } 489 490 /* 491 * Duplicate MPIDRs are a recipe for disaster. Scan all initialized 492 * entries and check for duplicates. If any is found just ignore the 493 * cpu. cpu_logical_map was initialized to INVALID_HWID to avoid 494 * matching valid MPIDR values. 495 */ 496 static bool __init is_mpidr_duplicate(unsigned int cpu, u64 hwid) 497 { 498 unsigned int i; 499 500 for (i = 1; (i < cpu) && (i < NR_CPUS); i++) 501 if (cpu_logical_map(i) == hwid) 502 return true; 503 return false; 504 } 505 506 /* 507 * Initialize cpu operations for a logical cpu and 508 * set it in the possible mask on success 509 */ 510 static int __init smp_cpu_setup(int cpu) 511 { 512 const struct cpu_operations *ops; 513 514 if (init_cpu_ops(cpu)) 515 return -ENODEV; 516 517 ops = get_cpu_ops(cpu); 518 if (ops->cpu_init(cpu)) 519 return -ENODEV; 520 521 set_cpu_possible(cpu, true); 522 523 return 0; 524 } 525 526 static bool bootcpu_valid __initdata; 527 static unsigned int cpu_count = 1; 528 529 #ifdef CONFIG_ACPI 530 static struct acpi_madt_generic_interrupt cpu_madt_gicc[NR_CPUS]; 531 532 struct acpi_madt_generic_interrupt *acpi_cpu_get_madt_gicc(int cpu) 533 { 534 return &cpu_madt_gicc[cpu]; 535 } 536 537 /* 538 * acpi_map_gic_cpu_interface - parse processor MADT entry 539 * 540 * Carry out sanity checks on MADT processor entry and initialize 541 * cpu_logical_map on success 542 */ 543 static void __init 544 acpi_map_gic_cpu_interface(struct acpi_madt_generic_interrupt *processor) 545 { 546 u64 hwid = processor->arm_mpidr; 547 548 if (!(processor->flags & ACPI_MADT_ENABLED)) { 549 pr_debug("skipping disabled CPU entry with 0x%llx MPIDR\n", hwid); 550 return; 551 } 552 553 if (hwid & ~MPIDR_HWID_BITMASK || hwid == INVALID_HWID) { 554 pr_err("skipping CPU entry with invalid MPIDR 0x%llx\n", hwid); 555 return; 556 } 557 558 if (is_mpidr_duplicate(cpu_count, hwid)) { 559 pr_err("duplicate CPU MPIDR 0x%llx in MADT\n", hwid); 560 return; 561 } 562 563 /* Check if GICC structure of boot CPU is available in the MADT */ 564 if (cpu_logical_map(0) == hwid) { 565 if (bootcpu_valid) { 566 pr_err("duplicate boot CPU MPIDR: 0x%llx in MADT\n", 567 hwid); 568 return; 569 } 570 bootcpu_valid = true; 571 cpu_madt_gicc[0] = *processor; 572 return; 573 } 574 575 if (cpu_count >= NR_CPUS) 576 return; 577 578 /* map the logical cpu id to cpu MPIDR */ 579 cpu_logical_map(cpu_count) = hwid; 580 581 cpu_madt_gicc[cpu_count] = *processor; 582 583 /* 584 * Set-up the ACPI parking protocol cpu entries 585 * while initializing the cpu_logical_map to 586 * avoid parsing MADT entries multiple times for 587 * nothing (ie a valid cpu_logical_map entry should 588 * contain a valid parking protocol data set to 589 * initialize the cpu if the parking protocol is 590 * the only available enable method). 591 */ 592 acpi_set_mailbox_entry(cpu_count, processor); 593 594 cpu_count++; 595 } 596 597 static int __init 598 acpi_parse_gic_cpu_interface(union acpi_subtable_headers *header, 599 const unsigned long end) 600 { 601 struct acpi_madt_generic_interrupt *processor; 602 603 processor = (struct acpi_madt_generic_interrupt *)header; 604 if (BAD_MADT_GICC_ENTRY(processor, end)) 605 return -EINVAL; 606 607 acpi_table_print_madt_entry(&header->common); 608 609 acpi_map_gic_cpu_interface(processor); 610 611 return 0; 612 } 613 614 static void __init acpi_parse_and_init_cpus(void) 615 { 616 int i; 617 618 /* 619 * do a walk of MADT to determine how many CPUs 620 * we have including disabled CPUs, and get information 621 * we need for SMP init. 622 */ 623 acpi_table_parse_madt(ACPI_MADT_TYPE_GENERIC_INTERRUPT, 624 acpi_parse_gic_cpu_interface, 0); 625 626 /* 627 * In ACPI, SMP and CPU NUMA information is provided in separate 628 * static tables, namely the MADT and the SRAT. 629 * 630 * Thus, it is simpler to first create the cpu logical map through 631 * an MADT walk and then map the logical cpus to their node ids 632 * as separate steps. 633 */ 634 acpi_map_cpus_to_nodes(); 635 636 for (i = 0; i < nr_cpu_ids; i++) 637 early_map_cpu_to_node(i, acpi_numa_get_nid(i)); 638 } 639 #else 640 #define acpi_parse_and_init_cpus(...) do { } while (0) 641 #endif 642 643 /* 644 * Enumerate the possible CPU set from the device tree and build the 645 * cpu logical map array containing MPIDR values related to logical 646 * cpus. Assumes that cpu_logical_map(0) has already been initialized. 647 */ 648 static void __init of_parse_and_init_cpus(void) 649 { 650 struct device_node *dn; 651 652 for_each_of_cpu_node(dn) { 653 u64 hwid = of_get_cpu_mpidr(dn); 654 655 if (hwid == INVALID_HWID) 656 goto next; 657 658 if (is_mpidr_duplicate(cpu_count, hwid)) { 659 pr_err("%pOF: duplicate cpu reg properties in the DT\n", 660 dn); 661 goto next; 662 } 663 664 /* 665 * The numbering scheme requires that the boot CPU 666 * must be assigned logical id 0. Record it so that 667 * the logical map built from DT is validated and can 668 * be used. 669 */ 670 if (hwid == cpu_logical_map(0)) { 671 if (bootcpu_valid) { 672 pr_err("%pOF: duplicate boot cpu reg property in DT\n", 673 dn); 674 goto next; 675 } 676 677 bootcpu_valid = true; 678 early_map_cpu_to_node(0, of_node_to_nid(dn)); 679 680 /* 681 * cpu_logical_map has already been 682 * initialized and the boot cpu doesn't need 683 * the enable-method so continue without 684 * incrementing cpu. 685 */ 686 continue; 687 } 688 689 if (cpu_count >= NR_CPUS) 690 goto next; 691 692 pr_debug("cpu logical map 0x%llx\n", hwid); 693 cpu_logical_map(cpu_count) = hwid; 694 695 early_map_cpu_to_node(cpu_count, of_node_to_nid(dn)); 696 next: 697 cpu_count++; 698 } 699 } 700 701 /* 702 * Enumerate the possible CPU set from the device tree or ACPI and build the 703 * cpu logical map array containing MPIDR values related to logical 704 * cpus. Assumes that cpu_logical_map(0) has already been initialized. 705 */ 706 void __init smp_init_cpus(void) 707 { 708 int i; 709 710 if (acpi_disabled) 711 of_parse_and_init_cpus(); 712 else 713 acpi_parse_and_init_cpus(); 714 715 if (cpu_count > nr_cpu_ids) 716 pr_warn("Number of cores (%d) exceeds configured maximum of %u - clipping\n", 717 cpu_count, nr_cpu_ids); 718 719 if (!bootcpu_valid) { 720 pr_err("missing boot CPU MPIDR, not enabling secondaries\n"); 721 return; 722 } 723 724 /* 725 * We need to set the cpu_logical_map entries before enabling 726 * the cpus so that cpu processor description entries (DT cpu nodes 727 * and ACPI MADT entries) can be retrieved by matching the cpu hwid 728 * with entries in cpu_logical_map while initializing the cpus. 729 * If the cpu set-up fails, invalidate the cpu_logical_map entry. 730 */ 731 for (i = 1; i < nr_cpu_ids; i++) { 732 if (cpu_logical_map(i) != INVALID_HWID) { 733 if (smp_cpu_setup(i)) 734 cpu_logical_map(i) = INVALID_HWID; 735 } 736 } 737 } 738 739 void __init smp_prepare_cpus(unsigned int max_cpus) 740 { 741 const struct cpu_operations *ops; 742 int err; 743 unsigned int cpu; 744 unsigned int this_cpu; 745 746 init_cpu_topology(); 747 748 this_cpu = smp_processor_id(); 749 store_cpu_topology(this_cpu); 750 numa_store_cpu_info(this_cpu); 751 numa_add_cpu(this_cpu); 752 753 /* 754 * If UP is mandated by "nosmp" (which implies "maxcpus=0"), don't set 755 * secondary CPUs present. 756 */ 757 if (max_cpus == 0) 758 return; 759 760 /* 761 * Initialise the present map (which describes the set of CPUs 762 * actually populated at the present time) and release the 763 * secondaries from the bootloader. 764 */ 765 for_each_possible_cpu(cpu) { 766 767 per_cpu(cpu_number, cpu) = cpu; 768 769 if (cpu == smp_processor_id()) 770 continue; 771 772 ops = get_cpu_ops(cpu); 773 if (!ops) 774 continue; 775 776 err = ops->cpu_prepare(cpu); 777 if (err) 778 continue; 779 780 set_cpu_present(cpu, true); 781 numa_store_cpu_info(cpu); 782 } 783 } 784 785 void (*__smp_cross_call)(const struct cpumask *, unsigned int); 786 787 void __init set_smp_cross_call(void (*fn)(const struct cpumask *, unsigned int)) 788 { 789 __smp_cross_call = fn; 790 } 791 792 static const char *ipi_types[NR_IPI] __tracepoint_string = { 793 #define S(x,s) [x] = s 794 S(IPI_RESCHEDULE, "Rescheduling interrupts"), 795 S(IPI_CALL_FUNC, "Function call interrupts"), 796 S(IPI_CPU_STOP, "CPU stop interrupts"), 797 S(IPI_CPU_CRASH_STOP, "CPU stop (for crash dump) interrupts"), 798 S(IPI_TIMER, "Timer broadcast interrupts"), 799 S(IPI_IRQ_WORK, "IRQ work interrupts"), 800 S(IPI_WAKEUP, "CPU wake-up interrupts"), 801 }; 802 803 static void smp_cross_call(const struct cpumask *target, unsigned int ipinr) 804 { 805 trace_ipi_raise(target, ipi_types[ipinr]); 806 __smp_cross_call(target, ipinr); 807 } 808 809 void show_ipi_list(struct seq_file *p, int prec) 810 { 811 unsigned int cpu, i; 812 813 for (i = 0; i < NR_IPI; i++) { 814 seq_printf(p, "%*s%u:%s", prec - 1, "IPI", i, 815 prec >= 4 ? " " : ""); 816 for_each_online_cpu(cpu) 817 seq_printf(p, "%10u ", 818 __get_irq_stat(cpu, ipi_irqs[i])); 819 seq_printf(p, " %s\n", ipi_types[i]); 820 } 821 } 822 823 u64 smp_irq_stat_cpu(unsigned int cpu) 824 { 825 u64 sum = 0; 826 int i; 827 828 for (i = 0; i < NR_IPI; i++) 829 sum += __get_irq_stat(cpu, ipi_irqs[i]); 830 831 return sum; 832 } 833 834 void arch_send_call_function_ipi_mask(const struct cpumask *mask) 835 { 836 smp_cross_call(mask, IPI_CALL_FUNC); 837 } 838 839 void arch_send_call_function_single_ipi(int cpu) 840 { 841 smp_cross_call(cpumask_of(cpu), IPI_CALL_FUNC); 842 } 843 844 #ifdef CONFIG_ARM64_ACPI_PARKING_PROTOCOL 845 void arch_send_wakeup_ipi_mask(const struct cpumask *mask) 846 { 847 smp_cross_call(mask, IPI_WAKEUP); 848 } 849 #endif 850 851 #ifdef CONFIG_IRQ_WORK 852 void arch_irq_work_raise(void) 853 { 854 if (__smp_cross_call) 855 smp_cross_call(cpumask_of(smp_processor_id()), IPI_IRQ_WORK); 856 } 857 #endif 858 859 static void local_cpu_stop(void) 860 { 861 set_cpu_online(smp_processor_id(), false); 862 863 local_daif_mask(); 864 sdei_mask_local_cpu(); 865 cpu_park_loop(); 866 } 867 868 /* 869 * We need to implement panic_smp_self_stop() for parallel panic() calls, so 870 * that cpu_online_mask gets correctly updated and smp_send_stop() can skip 871 * CPUs that have already stopped themselves. 872 */ 873 void panic_smp_self_stop(void) 874 { 875 local_cpu_stop(); 876 } 877 878 #ifdef CONFIG_KEXEC_CORE 879 static atomic_t waiting_for_crash_ipi = ATOMIC_INIT(0); 880 #endif 881 882 static void ipi_cpu_crash_stop(unsigned int cpu, struct pt_regs *regs) 883 { 884 #ifdef CONFIG_KEXEC_CORE 885 crash_save_cpu(regs, cpu); 886 887 atomic_dec(&waiting_for_crash_ipi); 888 889 local_irq_disable(); 890 sdei_mask_local_cpu(); 891 892 if (IS_ENABLED(CONFIG_HOTPLUG_CPU)) 893 __cpu_try_die(cpu); 894 895 /* just in case */ 896 cpu_park_loop(); 897 #endif 898 } 899 900 /* 901 * Main handler for inter-processor interrupts 902 */ 903 void handle_IPI(int ipinr, struct pt_regs *regs) 904 { 905 unsigned int cpu = smp_processor_id(); 906 struct pt_regs *old_regs = set_irq_regs(regs); 907 908 if ((unsigned)ipinr < NR_IPI) { 909 trace_ipi_entry_rcuidle(ipi_types[ipinr]); 910 __inc_irq_stat(cpu, ipi_irqs[ipinr]); 911 } 912 913 switch (ipinr) { 914 case IPI_RESCHEDULE: 915 scheduler_ipi(); 916 break; 917 918 case IPI_CALL_FUNC: 919 irq_enter(); 920 generic_smp_call_function_interrupt(); 921 irq_exit(); 922 break; 923 924 case IPI_CPU_STOP: 925 irq_enter(); 926 local_cpu_stop(); 927 irq_exit(); 928 break; 929 930 case IPI_CPU_CRASH_STOP: 931 if (IS_ENABLED(CONFIG_KEXEC_CORE)) { 932 irq_enter(); 933 ipi_cpu_crash_stop(cpu, regs); 934 935 unreachable(); 936 } 937 break; 938 939 #ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST 940 case IPI_TIMER: 941 irq_enter(); 942 tick_receive_broadcast(); 943 irq_exit(); 944 break; 945 #endif 946 947 #ifdef CONFIG_IRQ_WORK 948 case IPI_IRQ_WORK: 949 irq_enter(); 950 irq_work_run(); 951 irq_exit(); 952 break; 953 #endif 954 955 #ifdef CONFIG_ARM64_ACPI_PARKING_PROTOCOL 956 case IPI_WAKEUP: 957 WARN_ONCE(!acpi_parking_protocol_valid(cpu), 958 "CPU%u: Wake-up IPI outside the ACPI parking protocol\n", 959 cpu); 960 break; 961 #endif 962 963 default: 964 pr_crit("CPU%u: Unknown IPI message 0x%x\n", cpu, ipinr); 965 break; 966 } 967 968 if ((unsigned)ipinr < NR_IPI) 969 trace_ipi_exit_rcuidle(ipi_types[ipinr]); 970 set_irq_regs(old_regs); 971 } 972 973 void smp_send_reschedule(int cpu) 974 { 975 smp_cross_call(cpumask_of(cpu), IPI_RESCHEDULE); 976 } 977 978 #ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST 979 void tick_broadcast(const struct cpumask *mask) 980 { 981 smp_cross_call(mask, IPI_TIMER); 982 } 983 #endif 984 985 /* 986 * The number of CPUs online, not counting this CPU (which may not be 987 * fully online and so not counted in num_online_cpus()). 988 */ 989 static inline unsigned int num_other_online_cpus(void) 990 { 991 unsigned int this_cpu_online = cpu_online(smp_processor_id()); 992 993 return num_online_cpus() - this_cpu_online; 994 } 995 996 void smp_send_stop(void) 997 { 998 unsigned long timeout; 999 1000 if (num_other_online_cpus()) { 1001 cpumask_t mask; 1002 1003 cpumask_copy(&mask, cpu_online_mask); 1004 cpumask_clear_cpu(smp_processor_id(), &mask); 1005 1006 if (system_state <= SYSTEM_RUNNING) 1007 pr_crit("SMP: stopping secondary CPUs\n"); 1008 smp_cross_call(&mask, IPI_CPU_STOP); 1009 } 1010 1011 /* Wait up to one second for other CPUs to stop */ 1012 timeout = USEC_PER_SEC; 1013 while (num_other_online_cpus() && timeout--) 1014 udelay(1); 1015 1016 if (num_other_online_cpus()) 1017 pr_warn("SMP: failed to stop secondary CPUs %*pbl\n", 1018 cpumask_pr_args(cpu_online_mask)); 1019 1020 sdei_mask_local_cpu(); 1021 } 1022 1023 #ifdef CONFIG_KEXEC_CORE 1024 void crash_smp_send_stop(void) 1025 { 1026 static int cpus_stopped; 1027 cpumask_t mask; 1028 unsigned long timeout; 1029 1030 /* 1031 * This function can be called twice in panic path, but obviously 1032 * we execute this only once. 1033 */ 1034 if (cpus_stopped) 1035 return; 1036 1037 cpus_stopped = 1; 1038 1039 /* 1040 * If this cpu is the only one alive at this point in time, online or 1041 * not, there are no stop messages to be sent around, so just back out. 1042 */ 1043 if (num_other_online_cpus() == 0) { 1044 sdei_mask_local_cpu(); 1045 return; 1046 } 1047 1048 cpumask_copy(&mask, cpu_online_mask); 1049 cpumask_clear_cpu(smp_processor_id(), &mask); 1050 1051 atomic_set(&waiting_for_crash_ipi, num_other_online_cpus()); 1052 1053 pr_crit("SMP: stopping secondary CPUs\n"); 1054 smp_cross_call(&mask, IPI_CPU_CRASH_STOP); 1055 1056 /* Wait up to one second for other CPUs to stop */ 1057 timeout = USEC_PER_SEC; 1058 while ((atomic_read(&waiting_for_crash_ipi) > 0) && timeout--) 1059 udelay(1); 1060 1061 if (atomic_read(&waiting_for_crash_ipi) > 0) 1062 pr_warn("SMP: failed to stop secondary CPUs %*pbl\n", 1063 cpumask_pr_args(&mask)); 1064 1065 sdei_mask_local_cpu(); 1066 } 1067 1068 bool smp_crash_stop_failed(void) 1069 { 1070 return (atomic_read(&waiting_for_crash_ipi) > 0); 1071 } 1072 #endif 1073 1074 /* 1075 * not supported here 1076 */ 1077 int setup_profiling_timer(unsigned int multiplier) 1078 { 1079 return -EINVAL; 1080 } 1081 1082 static bool have_cpu_die(void) 1083 { 1084 #ifdef CONFIG_HOTPLUG_CPU 1085 int any_cpu = raw_smp_processor_id(); 1086 const struct cpu_operations *ops = get_cpu_ops(any_cpu); 1087 1088 if (ops && ops->cpu_die) 1089 return true; 1090 #endif 1091 return false; 1092 } 1093 1094 bool cpus_are_stuck_in_kernel(void) 1095 { 1096 bool smp_spin_tables = (num_possible_cpus() > 1 && !have_cpu_die()); 1097 1098 return !!cpus_stuck_in_kernel || smp_spin_tables; 1099 } 1100