Lines Matching full:cpu
23 #include <linux/cpu.h>
40 #include <asm/cpu.h>
83 static void ipi_setup(int cpu);
86 static void ipi_teardown(int cpu);
87 static int op_cpu_kill(unsigned int cpu);
89 static inline int op_cpu_kill(unsigned int cpu) in op_cpu_kill() argument
97 * Boot a secondary CPU, and assign it the specified idle task.
98 * This also gives us the initial stack to use for this CPU.
100 static int boot_secondary(unsigned int cpu, struct task_struct *idle) in boot_secondary() argument
102 const struct cpu_operations *ops = get_cpu_ops(cpu); in boot_secondary()
105 return ops->cpu_boot(cpu); in boot_secondary()
112 int __cpu_up(unsigned int cpu, struct task_struct *idle) in __cpu_up() argument
124 /* Now bring the CPU into our world */ in __cpu_up()
125 ret = boot_secondary(cpu, idle); in __cpu_up()
127 pr_err("CPU%u: failed to boot: %d\n", cpu, ret); in __cpu_up()
132 * CPU was successfully started, wait for it to come online or in __cpu_up()
137 if (cpu_online(cpu)) in __cpu_up()
140 pr_crit("CPU%u: failed to come online\n", cpu); in __cpu_up()
148 pr_err("CPU%u: failed in unknown state : 0x%lx\n", in __cpu_up()
149 cpu, status); in __cpu_up()
153 if (!op_cpu_kill(cpu)) { in __cpu_up()
154 pr_crit("CPU%u: died during early boot\n", cpu); in __cpu_up()
157 pr_crit("CPU%u: may not have shut down cleanly\n", cpu); in __cpu_up()
160 pr_crit("CPU%u: is stuck in kernel\n", cpu); in __cpu_up()
162 pr_crit("CPU%u: does not support 52-bit VAs\n", cpu); in __cpu_up()
164 pr_crit("CPU%u: does not support %luK granule\n", in __cpu_up()
165 cpu, PAGE_SIZE / SZ_1K); in __cpu_up()
170 panic("CPU%u detected unsupported configuration\n", cpu); in __cpu_up()
192 * This is the secondary CPU boot entry. We're using this CPUs
200 unsigned int cpu = smp_processor_id(); in secondary_start_kernel() local
218 rcu_cpu_starting(cpu); in secondary_start_kernel()
223 * this CPU ticks all of those. If it doesn't, the CPU will in secondary_start_kernel()
228 ops = get_cpu_ops(cpu); in secondary_start_kernel()
233 * Log the CPU info before it is marked online and might get read. in secondary_start_kernel()
236 store_cpu_topology(cpu); in secondary_start_kernel()
241 notify_cpu_starting(cpu); in secondary_start_kernel()
243 ipi_setup(cpu); in secondary_start_kernel()
245 numa_add_cpu(cpu); in secondary_start_kernel()
248 * OK, now it's safe to let the boot CPU continue. Wait for in secondary_start_kernel()
249 * the CPU migration code to notice that the CPU is online in secondary_start_kernel()
252 pr_info("CPU%u: Booted secondary processor 0x%010lx [0x%08x]\n", in secondary_start_kernel()
253 cpu, (unsigned long)mpidr, in secondary_start_kernel()
256 set_cpu_online(cpu, true); in secondary_start_kernel()
268 static int op_cpu_disable(unsigned int cpu) in op_cpu_disable() argument
270 const struct cpu_operations *ops = get_cpu_ops(cpu); in op_cpu_disable()
284 return ops->cpu_disable(cpu); in op_cpu_disable()
294 unsigned int cpu = smp_processor_id(); in __cpu_disable() local
297 ret = op_cpu_disable(cpu); in __cpu_disable()
301 remove_cpu_topology(cpu); in __cpu_disable()
302 numa_remove_cpu(cpu); in __cpu_disable()
305 * Take this CPU offline. Once we clear this, we can't return, in __cpu_disable()
306 * and we must not schedule until we're ready to give up the cpu. in __cpu_disable()
308 set_cpu_online(cpu, false); in __cpu_disable()
309 ipi_teardown(cpu); in __cpu_disable()
312 * OK - migrate IRQs away from this CPU in __cpu_disable()
319 static int op_cpu_kill(unsigned int cpu) in op_cpu_kill() argument
321 const struct cpu_operations *ops = get_cpu_ops(cpu); in op_cpu_kill()
324 * If we have no means of synchronising with the dying CPU, then assume in op_cpu_kill()
331 return ops->cpu_kill(cpu); in op_cpu_kill()
335 * Called on the thread which is asking for a CPU to be shutdown after the
338 void arch_cpuhp_cleanup_dead_cpu(unsigned int cpu) in arch_cpuhp_cleanup_dead_cpu() argument
342 pr_debug("CPU%u: shutdown\n", cpu); in arch_cpuhp_cleanup_dead_cpu()
345 * Now that the dying CPU is beyond the point of no return w.r.t. in arch_cpuhp_cleanup_dead_cpu()
350 err = op_cpu_kill(cpu); in arch_cpuhp_cleanup_dead_cpu()
352 pr_warn("CPU%d may not have shut down cleanly: %d\n", cpu, err); in arch_cpuhp_cleanup_dead_cpu()
356 * Called from the idle thread for the CPU which has been shutdown.
361 unsigned int cpu = smp_processor_id(); in cpu_die() local
362 const struct cpu_operations *ops = get_cpu_ops(cpu); in cpu_die()
368 /* Tell cpuhp_bp_sync_dead() that this CPU is now safe to dispose of */ in cpu_die()
372 * Actually shutdown the CPU. This must never fail. The specific hotplug in cpu_die()
374 * no dirty lines are lost in the process of shutting down the CPU. in cpu_die()
376 ops->cpu_die(cpu); in cpu_die()
382 static void __cpu_try_die(int cpu) in __cpu_try_die() argument
385 const struct cpu_operations *ops = get_cpu_ops(cpu); in __cpu_try_die()
388 ops->cpu_die(cpu); in __cpu_try_die()
393 * Kill the calling secondary CPU, early in bringup before it is turned
398 int cpu = smp_processor_id(); in cpu_die_early() local
400 pr_crit("CPU%d: will not boot\n", cpu); in cpu_die_early()
402 /* Mark this CPU absent */ in cpu_die_early()
403 set_cpu_present(cpu, 0); in cpu_die_early()
404 rcu_report_dead(cpu); in cpu_die_early()
408 __cpu_try_die(cpu); in cpu_die_early()
419 pr_info("CPU: All CPU(s) started at EL2\n"); in hyp_mode_check()
422 "CPU: CPUs started in inconsistent modes"); in hyp_mode_check()
424 pr_info("CPU: All CPU(s) started at EL1\n"); in hyp_mode_check()
443 * The runtime per-cpu areas have been allocated by in smp_prepare_boot_cpu()
444 * setup_per_cpu_areas(), and CPU0's boot time per-cpu area will be in smp_prepare_boot_cpu()
445 * freed shortly, so we must move over to the runtime per-cpu area. in smp_prepare_boot_cpu()
451 * We now know enough about the boot CPU to apply the in smp_prepare_boot_cpu()
469 * cpu. cpu_logical_map was initialized to INVALID_HWID to avoid
472 static bool __init is_mpidr_duplicate(unsigned int cpu, u64 hwid) in is_mpidr_duplicate() argument
476 for (i = 1; (i < cpu) && (i < NR_CPUS); i++) in is_mpidr_duplicate()
483 * Initialize cpu operations for a logical cpu and
486 static int __init smp_cpu_setup(int cpu) in smp_cpu_setup() argument
490 if (init_cpu_ops(cpu)) in smp_cpu_setup()
493 ops = get_cpu_ops(cpu); in smp_cpu_setup()
494 if (ops->cpu_init(cpu)) in smp_cpu_setup()
497 set_cpu_possible(cpu, true); in smp_cpu_setup()
508 struct acpi_madt_generic_interrupt *acpi_cpu_get_madt_gicc(int cpu) in acpi_cpu_get_madt_gicc() argument
510 return &cpu_madt_gicc[cpu]; in acpi_cpu_get_madt_gicc()
526 pr_debug("skipping disabled CPU entry with 0x%llx MPIDR\n", hwid); in acpi_map_gic_cpu_interface()
531 pr_err("skipping CPU entry with invalid MPIDR 0x%llx\n", hwid); in acpi_map_gic_cpu_interface()
536 pr_err("duplicate CPU MPIDR 0x%llx in MADT\n", hwid); in acpi_map_gic_cpu_interface()
540 /* Check if GICC structure of boot CPU is available in the MADT */ in acpi_map_gic_cpu_interface()
543 pr_err("duplicate boot CPU MPIDR: 0x%llx in MADT\n", in acpi_map_gic_cpu_interface()
555 /* map the logical cpu id to cpu MPIDR */ in acpi_map_gic_cpu_interface()
561 * Set-up the ACPI parking protocol cpu entries in acpi_map_gic_cpu_interface()
566 * initialize the cpu if the parking protocol is in acpi_map_gic_cpu_interface()
604 * In ACPI, SMP and CPU NUMA information is provided in separate in acpi_parse_and_init_cpus()
607 * Thus, it is simpler to first create the cpu logical map through in acpi_parse_and_init_cpus()
621 * Enumerate the possible CPU set from the device tree and build the
622 * cpu logical map array containing MPIDR values related to logical
636 pr_err("%pOF: duplicate cpu reg properties in the DT\n", in of_parse_and_init_cpus()
642 * The numbering scheme requires that the boot CPU in of_parse_and_init_cpus()
649 pr_err("%pOF: duplicate boot cpu reg property in DT\n", in of_parse_and_init_cpus()
659 * initialized and the boot cpu doesn't need in of_parse_and_init_cpus()
661 * incrementing cpu. in of_parse_and_init_cpus()
669 pr_debug("cpu logical map 0x%llx\n", hwid); in of_parse_and_init_cpus()
679 * Enumerate the possible CPU set from the device tree or ACPI and build the
680 * cpu logical map array containing MPIDR values related to logical
697 pr_err("missing boot CPU MPIDR, not enabling secondaries\n"); in smp_init_cpus()
703 * the cpus so that cpu processor description entries (DT cpu nodes in smp_init_cpus()
704 * and ACPI MADT entries) can be retrieved by matching the cpu hwid in smp_init_cpus()
706 * If the cpu set-up fails, invalidate the cpu_logical_map entry. in smp_init_cpus()
720 unsigned int cpu; in smp_prepare_cpus() local
742 for_each_possible_cpu(cpu) { in smp_prepare_cpus()
744 per_cpu(cpu_number, cpu) = cpu; in smp_prepare_cpus()
746 if (cpu == smp_processor_id()) in smp_prepare_cpus()
749 ops = get_cpu_ops(cpu); in smp_prepare_cpus()
753 err = ops->cpu_prepare(cpu); in smp_prepare_cpus()
757 set_cpu_present(cpu, true); in smp_prepare_cpus()
758 numa_store_cpu_info(cpu); in smp_prepare_cpus()
765 [IPI_CPU_STOP] = "CPU stop interrupts",
766 [IPI_CPU_CRASH_STOP] = "CPU stop (for crash dump) interrupts",
769 [IPI_WAKEUP] = "CPU wake-up interrupts",
778 unsigned int cpu, i; in arch_show_interrupts() local
783 for_each_online_cpu(cpu) in arch_show_interrupts()
784 seq_printf(p, "%10u ", irq_desc_kstat_cpu(ipi_desc[i], cpu)); in arch_show_interrupts()
797 void arch_send_call_function_single_ipi(int cpu) in arch_send_call_function_single_ipi() argument
799 smp_cross_call(cpumask_of(cpu), IPI_CALL_FUNC); in arch_send_call_function_single_ipi()
839 static void __noreturn ipi_cpu_crash_stop(unsigned int cpu, struct pt_regs *regs) in ipi_cpu_crash_stop() argument
842 crash_save_cpu(regs, cpu); in ipi_cpu_crash_stop()
850 __cpu_try_die(cpu); in ipi_cpu_crash_stop()
864 unsigned int cpu = smp_processor_id(); in do_handle_IPI() local
884 ipi_cpu_crash_stop(cpu, get_irq_regs()); in do_handle_IPI()
904 WARN_ONCE(!acpi_parking_protocol_valid(cpu), in do_handle_IPI()
905 "CPU%u: Wake-up IPI outside the ACPI parking protocol\n", in do_handle_IPI()
906 cpu); in do_handle_IPI()
911 pr_crit("CPU%u: Unknown IPI message 0x%x\n", cpu, ipinr); in do_handle_IPI()
931 static void ipi_setup(int cpu) in ipi_setup() argument
943 static void ipi_teardown(int cpu) in ipi_teardown() argument
975 /* Setup the boot CPU immediately */ in set_smp_ipi_range()
979 void arch_smp_send_reschedule(int cpu) in arch_smp_send_reschedule() argument
981 smp_cross_call(cpumask_of(cpu), IPI_RESCHEDULE); in arch_smp_send_reschedule()
992 * The number of CPUs online, not counting this CPU (which may not be
1046 * If this cpu is the only one alive at this point in time, online or in crash_smp_send_stop()