1 /* SPDX-License-Identifier: GPL-2.0-only */ 2 /* 3 * Copyright (C) 2012 ARM Ltd. 4 */ 5 #ifndef __ASM_SMP_H 6 #define __ASM_SMP_H 7 8 #include <linux/const.h> 9 10 /* Values for secondary_data.status */ 11 #define CPU_STUCK_REASON_SHIFT (8) 12 #define CPU_BOOT_STATUS_MASK ((UL(1) << CPU_STUCK_REASON_SHIFT) - 1) 13 14 #define CPU_MMU_OFF (-1) 15 #define CPU_BOOT_SUCCESS (0) 16 /* The cpu invoked ops->cpu_die, synchronise it with cpu_kill */ 17 #define CPU_KILL_ME (1) 18 /* The cpu couldn't die gracefully and is looping in the kernel */ 19 #define CPU_STUCK_IN_KERNEL (2) 20 /* Fatal system error detected by secondary CPU, crash the system */ 21 #define CPU_PANIC_KERNEL (3) 22 23 #define CPU_STUCK_REASON_52_BIT_VA (UL(1) << CPU_STUCK_REASON_SHIFT) 24 #define CPU_STUCK_REASON_NO_GRAN (UL(2) << CPU_STUCK_REASON_SHIFT) 25 26 #ifndef __ASSEMBLY__ 27 28 #include <asm/percpu.h> 29 30 #include <linux/threads.h> 31 #include <linux/cpumask.h> 32 #include <linux/thread_info.h> 33 34 DECLARE_PER_CPU_READ_MOSTLY(int, cpu_number); 35 36 /* 37 * We don't use this_cpu_read(cpu_number) as that has implicit writes to 38 * preempt_count, and associated (compiler) barriers, that we'd like to avoid 39 * the expense of. If we're preemptible, the value can be stale at use anyway. 40 * And we can't use this_cpu_ptr() either, as that winds up recursing back 41 * here under CONFIG_DEBUG_PREEMPT=y. 42 */ 43 #define raw_smp_processor_id() (*raw_cpu_ptr(&cpu_number)) 44 45 /* 46 * Logical CPU mapping. 47 */ 48 extern u64 __cpu_logical_map[NR_CPUS]; 49 extern u64 cpu_logical_map(int cpu); 50 51 static inline void set_cpu_logical_map(int cpu, u64 hwid) 52 { 53 __cpu_logical_map[cpu] = hwid; 54 } 55 56 struct seq_file; 57 58 /* 59 * generate IPI list text 60 */ 61 extern void show_ipi_list(struct seq_file *p, int prec); 62 63 /* 64 * Called from C code, this handles an IPI. 65 */ 66 extern void handle_IPI(int ipinr, struct pt_regs *regs); 67 68 /* 69 * Discover the set of possible CPUs and determine their 70 * SMP operations. 71 */ 72 extern void smp_init_cpus(void); 73 74 /* 75 * Provide a function to raise an IPI cross call on CPUs in callmap. 76 */ 77 extern void set_smp_cross_call(void (*)(const struct cpumask *, unsigned int)); 78 79 extern void (*__smp_cross_call)(const struct cpumask *, unsigned int); 80 81 /* 82 * Called from the secondary holding pen, this is the secondary CPU entry point. 83 */ 84 asmlinkage void secondary_start_kernel(void); 85 86 /* 87 * Initial data for bringing up a secondary CPU. 88 * @stack - sp for the secondary CPU 89 * @status - Result passed back from the secondary CPU to 90 * indicate failure. 91 */ 92 struct secondary_data { 93 void *stack; 94 struct task_struct *task; 95 long status; 96 }; 97 98 extern struct secondary_data secondary_data; 99 extern long __early_cpu_boot_status; 100 extern void secondary_entry(void); 101 102 extern void arch_send_call_function_single_ipi(int cpu); 103 extern void arch_send_call_function_ipi_mask(const struct cpumask *mask); 104 105 #ifdef CONFIG_ARM64_ACPI_PARKING_PROTOCOL 106 extern void arch_send_wakeup_ipi_mask(const struct cpumask *mask); 107 #else 108 static inline void arch_send_wakeup_ipi_mask(const struct cpumask *mask) 109 { 110 BUILD_BUG(); 111 } 112 #endif 113 114 extern int __cpu_disable(void); 115 116 extern void __cpu_die(unsigned int cpu); 117 extern void cpu_die(void); 118 extern void cpu_die_early(void); 119 120 static inline void cpu_park_loop(void) 121 { 122 for (;;) { 123 wfe(); 124 wfi(); 125 } 126 } 127 128 static inline void update_cpu_boot_status(int val) 129 { 130 WRITE_ONCE(secondary_data.status, val); 131 /* Ensure the visibility of the status update */ 132 dsb(ishst); 133 } 134 135 /* 136 * The calling secondary CPU has detected serious configuration mismatch, 137 * which calls for a kernel panic. Update the boot status and park the calling 138 * CPU. 139 */ 140 static inline void cpu_panic_kernel(void) 141 { 142 update_cpu_boot_status(CPU_PANIC_KERNEL); 143 cpu_park_loop(); 144 } 145 146 /* 147 * If a secondary CPU enters the kernel but fails to come online, 148 * (e.g. due to mismatched features), and cannot exit the kernel, 149 * we increment cpus_stuck_in_kernel and leave the CPU in a 150 * quiesecent loop within the kernel text. The memory containing 151 * this loop must not be re-used for anything else as the 'stuck' 152 * core is executing it. 153 * 154 * This function is used to inhibit features like kexec and hibernate. 155 */ 156 bool cpus_are_stuck_in_kernel(void); 157 158 extern void crash_smp_send_stop(void); 159 extern bool smp_crash_stop_failed(void); 160 161 #endif /* ifndef __ASSEMBLY__ */ 162 163 #endif /* ifndef __ASM_SMP_H */ 164