1 // SPDX-License-Identifier: GPL-2.0
2
3 #include <linux/cpumask.h>
4 #include <linux/delay.h>
5 #include <linux/smp.h>
6
7 #include <asm/io_apic.h>
8
9 #include "local.h"
10
11 DEFINE_STATIC_KEY_FALSE(apic_use_ipi_shorthand);
12
13 #ifdef CONFIG_SMP
14 static int apic_ipi_shorthand_off __ro_after_init;
15
apic_ipi_shorthand(char * str)16 static __init int apic_ipi_shorthand(char *str)
17 {
18 get_option(&str, &apic_ipi_shorthand_off);
19 return 1;
20 }
21 __setup("no_ipi_broadcast=", apic_ipi_shorthand);
22
print_ipi_mode(void)23 static int __init print_ipi_mode(void)
24 {
25 pr_info("IPI shorthand broadcast: %s\n",
26 apic_ipi_shorthand_off ? "disabled" : "enabled");
27 return 0;
28 }
29 late_initcall(print_ipi_mode);
30
apic_smt_update(void)31 void apic_smt_update(void)
32 {
33 /*
34 * Do not switch to broadcast mode if:
35 * - Disabled on the command line
36 * - Only a single CPU is online
37 * - Not all present CPUs have been at least booted once
38 *
39 * The latter is important as the local APIC might be in some
40 * random state and a broadcast might cause havoc. That's
41 * especially true for NMI broadcasting.
42 */
43 if (apic_ipi_shorthand_off || num_online_cpus() == 1 ||
44 !cpumask_equal(cpu_present_mask, &cpus_booted_once_mask)) {
45 static_branch_disable(&apic_use_ipi_shorthand);
46 } else {
47 static_branch_enable(&apic_use_ipi_shorthand);
48 }
49 }
50
apic_send_IPI_allbutself(unsigned int vector)51 void apic_send_IPI_allbutself(unsigned int vector)
52 {
53 if (num_online_cpus() < 2)
54 return;
55
56 if (static_branch_likely(&apic_use_ipi_shorthand))
57 __apic_send_IPI_allbutself(vector);
58 else
59 __apic_send_IPI_mask_allbutself(cpu_online_mask, vector);
60 }
61
62 /*
63 * Send a 'reschedule' IPI to another CPU. It goes straight through and
64 * wastes no time serializing anything. Worst case is that we lose a
65 * reschedule ...
66 */
native_smp_send_reschedule(int cpu)67 void native_smp_send_reschedule(int cpu)
68 {
69 if (unlikely(cpu_is_offline(cpu))) {
70 WARN(1, "sched: Unexpected reschedule of offline CPU#%d!\n", cpu);
71 return;
72 }
73 __apic_send_IPI(cpu, RESCHEDULE_VECTOR);
74 }
75
native_send_call_func_single_ipi(int cpu)76 void native_send_call_func_single_ipi(int cpu)
77 {
78 __apic_send_IPI(cpu, CALL_FUNCTION_SINGLE_VECTOR);
79 }
80
native_send_call_func_ipi(const struct cpumask * mask)81 void native_send_call_func_ipi(const struct cpumask *mask)
82 {
83 if (static_branch_likely(&apic_use_ipi_shorthand)) {
84 unsigned int cpu = smp_processor_id();
85
86 if (!cpumask_or_equal(mask, cpumask_of(cpu), cpu_online_mask))
87 goto sendmask;
88
89 if (cpumask_test_cpu(cpu, mask))
90 __apic_send_IPI_all(CALL_FUNCTION_VECTOR);
91 else if (num_online_cpus() > 1)
92 __apic_send_IPI_allbutself(CALL_FUNCTION_VECTOR);
93 return;
94 }
95
96 sendmask:
97 __apic_send_IPI_mask(mask, CALL_FUNCTION_VECTOR);
98 }
99
100 #endif /* CONFIG_SMP */
101
__prepare_ICR2(unsigned int mask)102 static inline int __prepare_ICR2(unsigned int mask)
103 {
104 return SET_XAPIC_DEST_FIELD(mask);
105 }
106
apic_mem_wait_icr_idle_timeout(void)107 u32 apic_mem_wait_icr_idle_timeout(void)
108 {
109 int cnt;
110
111 for (cnt = 0; cnt < 1000; cnt++) {
112 if (!(apic_read(APIC_ICR) & APIC_ICR_BUSY))
113 return 0;
114 inc_irq_stat(icr_read_retry_count);
115 udelay(100);
116 }
117 return APIC_ICR_BUSY;
118 }
119
apic_mem_wait_icr_idle(void)120 void apic_mem_wait_icr_idle(void)
121 {
122 while (native_apic_mem_read(APIC_ICR) & APIC_ICR_BUSY)
123 cpu_relax();
124 }
125
126 /*
127 * This is safe against interruption because it only writes the lower 32
128 * bits of the APIC_ICR register. The destination field is ignored for
129 * short hand IPIs.
130 *
131 * wait_icr_idle()
132 * write(ICR2, dest)
133 * NMI
134 * wait_icr_idle()
135 * write(ICR)
136 * wait_icr_idle()
137 * write(ICR)
138 *
139 * This function does not need to disable interrupts as there is no ICR2
140 * interaction. The memory write is direct except when the machine is
141 * affected by the 11AP Pentium erratum, which turns the plain write into
142 * an XCHG operation.
143 */
__default_send_IPI_shortcut(unsigned int shortcut,int vector)144 static void __default_send_IPI_shortcut(unsigned int shortcut, int vector)
145 {
146 /*
147 * Wait for the previous ICR command to complete. Use
148 * safe_apic_wait_icr_idle() for the NMI vector as there have been
149 * issues where otherwise the system hangs when the panic CPU tries
150 * to stop the others before launching the kdump kernel.
151 */
152 if (unlikely(vector == NMI_VECTOR))
153 apic_mem_wait_icr_idle_timeout();
154 else
155 apic_mem_wait_icr_idle();
156
157 /* Destination field (ICR2) and the destination mode are ignored */
158 native_apic_mem_write(APIC_ICR, __prepare_ICR(shortcut, vector, 0));
159 }
160
161 /*
162 * This is used to send an IPI with no shorthand notation (the destination is
163 * specified in bits 56 to 63 of the ICR).
164 */
__default_send_IPI_dest_field(unsigned int dest_mask,int vector,unsigned int dest_mode)165 void __default_send_IPI_dest_field(unsigned int dest_mask, int vector,
166 unsigned int dest_mode)
167 {
168 /* See comment in __default_send_IPI_shortcut() */
169 if (unlikely(vector == NMI_VECTOR))
170 apic_mem_wait_icr_idle_timeout();
171 else
172 apic_mem_wait_icr_idle();
173
174 /* Set the IPI destination field in the ICR */
175 native_apic_mem_write(APIC_ICR2, __prepare_ICR2(dest_mask));
176 /* Send it with the proper destination mode */
177 native_apic_mem_write(APIC_ICR, __prepare_ICR(0, vector, dest_mode));
178 }
179
default_send_IPI_single_phys(int cpu,int vector)180 void default_send_IPI_single_phys(int cpu, int vector)
181 {
182 unsigned long flags;
183
184 local_irq_save(flags);
185 __default_send_IPI_dest_field(per_cpu(x86_cpu_to_apicid, cpu),
186 vector, APIC_DEST_PHYSICAL);
187 local_irq_restore(flags);
188 }
189
default_send_IPI_mask_sequence_phys(const struct cpumask * mask,int vector)190 void default_send_IPI_mask_sequence_phys(const struct cpumask *mask, int vector)
191 {
192 unsigned long flags;
193 unsigned long cpu;
194
195 local_irq_save(flags);
196 for_each_cpu(cpu, mask) {
197 __default_send_IPI_dest_field(per_cpu(x86_cpu_to_apicid,
198 cpu), vector, APIC_DEST_PHYSICAL);
199 }
200 local_irq_restore(flags);
201 }
202
default_send_IPI_mask_allbutself_phys(const struct cpumask * mask,int vector)203 void default_send_IPI_mask_allbutself_phys(const struct cpumask *mask,
204 int vector)
205 {
206 unsigned int cpu, this_cpu = smp_processor_id();
207 unsigned long flags;
208
209 local_irq_save(flags);
210 for_each_cpu(cpu, mask) {
211 if (cpu == this_cpu)
212 continue;
213 __default_send_IPI_dest_field(per_cpu(x86_cpu_to_apicid,
214 cpu), vector, APIC_DEST_PHYSICAL);
215 }
216 local_irq_restore(flags);
217 }
218
219 /*
220 * Helper function for APICs which insist on cpumasks
221 */
default_send_IPI_single(int cpu,int vector)222 void default_send_IPI_single(int cpu, int vector)
223 {
224 __apic_send_IPI_mask(cpumask_of(cpu), vector);
225 }
226
default_send_IPI_allbutself(int vector)227 void default_send_IPI_allbutself(int vector)
228 {
229 __default_send_IPI_shortcut(APIC_DEST_ALLBUT, vector);
230 }
231
default_send_IPI_all(int vector)232 void default_send_IPI_all(int vector)
233 {
234 __default_send_IPI_shortcut(APIC_DEST_ALLINC, vector);
235 }
236
default_send_IPI_self(int vector)237 void default_send_IPI_self(int vector)
238 {
239 __default_send_IPI_shortcut(APIC_DEST_SELF, vector);
240 }
241
242 #ifdef CONFIG_X86_32
default_send_IPI_mask_sequence_logical(const struct cpumask * mask,int vector)243 void default_send_IPI_mask_sequence_logical(const struct cpumask *mask, int vector)
244 {
245 unsigned long flags;
246 unsigned int cpu;
247
248 local_irq_save(flags);
249 for_each_cpu(cpu, mask)
250 __default_send_IPI_dest_field(1U << cpu, vector, APIC_DEST_LOGICAL);
251 local_irq_restore(flags);
252 }
253
default_send_IPI_mask_allbutself_logical(const struct cpumask * mask,int vector)254 void default_send_IPI_mask_allbutself_logical(const struct cpumask *mask,
255 int vector)
256 {
257 unsigned int cpu, this_cpu = smp_processor_id();
258 unsigned long flags;
259
260 local_irq_save(flags);
261 for_each_cpu(cpu, mask) {
262 if (cpu == this_cpu)
263 continue;
264 __default_send_IPI_dest_field(1U << cpu, vector, APIC_DEST_LOGICAL);
265 }
266 local_irq_restore(flags);
267 }
268
default_send_IPI_mask_logical(const struct cpumask * cpumask,int vector)269 void default_send_IPI_mask_logical(const struct cpumask *cpumask, int vector)
270 {
271 unsigned long mask = cpumask_bits(cpumask)[0];
272 unsigned long flags;
273
274 if (!mask)
275 return;
276
277 local_irq_save(flags);
278 WARN_ON(mask & ~cpumask_bits(cpu_online_mask)[0]);
279 __default_send_IPI_dest_field(mask, vector, APIC_DEST_LOGICAL);
280 local_irq_restore(flags);
281 }
282
283 #ifdef CONFIG_SMP
convert_apicid_to_cpu(int apic_id)284 static int convert_apicid_to_cpu(int apic_id)
285 {
286 int i;
287
288 for_each_possible_cpu(i) {
289 if (per_cpu(x86_cpu_to_apicid, i) == apic_id)
290 return i;
291 }
292 return -1;
293 }
294
safe_smp_processor_id(void)295 int safe_smp_processor_id(void)
296 {
297 int apicid, cpuid;
298
299 if (!boot_cpu_has(X86_FEATURE_APIC))
300 return 0;
301
302 apicid = read_apic_id();
303 if (apicid == BAD_APICID)
304 return 0;
305
306 cpuid = convert_apicid_to_cpu(apicid);
307
308 return cpuid >= 0 ? cpuid : 0;
309 }
310 #endif
311 #endif
312