xref: /openbmc/linux/arch/x86/kernel/apic/ipi.c (revision d0a7166b)
1 // SPDX-License-Identifier: GPL-2.0
2 
3 #include <linux/cpumask.h>
4 #include <linux/smp.h>
5 
6 #include "local.h"
7 
8 DEFINE_STATIC_KEY_FALSE(apic_use_ipi_shorthand);
9 
10 #ifdef CONFIG_SMP
11 #ifdef CONFIG_HOTPLUG_CPU
12 #define DEFAULT_SEND_IPI	(1)
13 #else
14 #define DEFAULT_SEND_IPI	(0)
15 #endif
16 
17 static int apic_ipi_shorthand_off __ro_after_init = DEFAULT_SEND_IPI;
18 
19 static __init int apic_ipi_shorthand(char *str)
20 {
21 	get_option(&str, &apic_ipi_shorthand_off);
22 	return 1;
23 }
24 __setup("no_ipi_broadcast=", apic_ipi_shorthand);
25 
26 static int __init print_ipi_mode(void)
27 {
28 	pr_info("IPI shorthand broadcast: %s\n",
29 		apic_ipi_shorthand_off ? "disabled" : "enabled");
30 	return 0;
31 }
32 late_initcall(print_ipi_mode);
33 
34 void apic_smt_update(void)
35 {
36 	/*
37 	 * Do not switch to broadcast mode if:
38 	 * - Disabled on the command line
39 	 * - Only a single CPU is online
40 	 * - Not all present CPUs have been at least booted once
41 	 *
42 	 * The latter is important as the local APIC might be in some
43 	 * random state and a broadcast might cause havoc. That's
44 	 * especially true for NMI broadcasting.
45 	 */
46 	if (apic_ipi_shorthand_off || num_online_cpus() == 1 ||
47 	    !cpumask_equal(cpu_present_mask, &cpus_booted_once_mask)) {
48 		static_branch_disable(&apic_use_ipi_shorthand);
49 	} else {
50 		static_branch_enable(&apic_use_ipi_shorthand);
51 	}
52 }
53 
54 void apic_send_IPI_allbutself(unsigned int vector)
55 {
56 	if (num_online_cpus() < 2)
57 		return;
58 
59 	if (static_branch_likely(&apic_use_ipi_shorthand))
60 		apic->send_IPI_allbutself(vector);
61 	else
62 		apic->send_IPI_mask_allbutself(cpu_online_mask, vector);
63 }
64 
65 /*
66  * Send a 'reschedule' IPI to another CPU. It goes straight through and
67  * wastes no time serializing anything. Worst case is that we lose a
68  * reschedule ...
69  */
70 void native_smp_send_reschedule(int cpu)
71 {
72 	if (unlikely(cpu_is_offline(cpu))) {
73 		WARN(1, "sched: Unexpected reschedule of offline CPU#%d!\n", cpu);
74 		return;
75 	}
76 	apic->send_IPI(cpu, RESCHEDULE_VECTOR);
77 }
78 
79 void native_send_call_func_single_ipi(int cpu)
80 {
81 	apic->send_IPI(cpu, CALL_FUNCTION_SINGLE_VECTOR);
82 }
83 
84 void native_send_call_func_ipi(const struct cpumask *mask)
85 {
86 	cpumask_var_t allbutself;
87 
88 	if (!alloc_cpumask_var(&allbutself, GFP_ATOMIC)) {
89 		apic->send_IPI_mask(mask, CALL_FUNCTION_VECTOR);
90 		return;
91 	}
92 
93 	cpumask_copy(allbutself, cpu_online_mask);
94 	__cpumask_clear_cpu(smp_processor_id(), allbutself);
95 
96 	if (cpumask_equal(mask, allbutself) &&
97 	    cpumask_equal(cpu_online_mask, cpu_callout_mask))
98 		apic->send_IPI_allbutself(CALL_FUNCTION_VECTOR);
99 	else
100 		apic->send_IPI_mask(mask, CALL_FUNCTION_VECTOR);
101 
102 	free_cpumask_var(allbutself);
103 }
104 
105 #endif /* CONFIG_SMP */
106 
107 static inline int __prepare_ICR2(unsigned int mask)
108 {
109 	return SET_APIC_DEST_FIELD(mask);
110 }
111 
112 static inline void __xapic_wait_icr_idle(void)
113 {
114 	while (native_apic_mem_read(APIC_ICR) & APIC_ICR_BUSY)
115 		cpu_relax();
116 }
117 
118 void __default_send_IPI_shortcut(unsigned int shortcut, int vector)
119 {
120 	/*
121 	 * Subtle. In the case of the 'never do double writes' workaround
122 	 * we have to lock out interrupts to be safe.  As we don't care
123 	 * of the value read we use an atomic rmw access to avoid costly
124 	 * cli/sti.  Otherwise we use an even cheaper single atomic write
125 	 * to the APIC.
126 	 */
127 	unsigned int cfg;
128 
129 	/*
130 	 * Wait for idle.
131 	 */
132 	if (unlikely(vector == NMI_VECTOR))
133 		safe_apic_wait_icr_idle();
134 	else
135 		__xapic_wait_icr_idle();
136 
137 	/*
138 	 * No need to touch the target chip field. Also the destination
139 	 * mode is ignored when a shorthand is used.
140 	 */
141 	cfg = __prepare_ICR(shortcut, vector, 0);
142 
143 	/*
144 	 * Send the IPI. The write to APIC_ICR fires this off.
145 	 */
146 	native_apic_mem_write(APIC_ICR, cfg);
147 }
148 
149 /*
150  * This is used to send an IPI with no shorthand notation (the destination is
151  * specified in bits 56 to 63 of the ICR).
152  */
153 void __default_send_IPI_dest_field(unsigned int mask, int vector, unsigned int dest)
154 {
155 	unsigned long cfg;
156 
157 	/*
158 	 * Wait for idle.
159 	 */
160 	if (unlikely(vector == NMI_VECTOR))
161 		safe_apic_wait_icr_idle();
162 	else
163 		__xapic_wait_icr_idle();
164 
165 	/*
166 	 * prepare target chip field
167 	 */
168 	cfg = __prepare_ICR2(mask);
169 	native_apic_mem_write(APIC_ICR2, cfg);
170 
171 	/*
172 	 * program the ICR
173 	 */
174 	cfg = __prepare_ICR(0, vector, dest);
175 
176 	/*
177 	 * Send the IPI. The write to APIC_ICR fires this off.
178 	 */
179 	native_apic_mem_write(APIC_ICR, cfg);
180 }
181 
182 void default_send_IPI_single_phys(int cpu, int vector)
183 {
184 	unsigned long flags;
185 
186 	local_irq_save(flags);
187 	__default_send_IPI_dest_field(per_cpu(x86_cpu_to_apicid, cpu),
188 				      vector, APIC_DEST_PHYSICAL);
189 	local_irq_restore(flags);
190 }
191 
192 void default_send_IPI_mask_sequence_phys(const struct cpumask *mask, int vector)
193 {
194 	unsigned long query_cpu;
195 	unsigned long flags;
196 
197 	/*
198 	 * Hack. The clustered APIC addressing mode doesn't allow us to send
199 	 * to an arbitrary mask, so I do a unicast to each CPU instead.
200 	 * - mbligh
201 	 */
202 	local_irq_save(flags);
203 	for_each_cpu(query_cpu, mask) {
204 		__default_send_IPI_dest_field(per_cpu(x86_cpu_to_apicid,
205 				query_cpu), vector, APIC_DEST_PHYSICAL);
206 	}
207 	local_irq_restore(flags);
208 }
209 
210 void default_send_IPI_mask_allbutself_phys(const struct cpumask *mask,
211 						 int vector)
212 {
213 	unsigned int this_cpu = smp_processor_id();
214 	unsigned int query_cpu;
215 	unsigned long flags;
216 
217 	/* See Hack comment above */
218 
219 	local_irq_save(flags);
220 	for_each_cpu(query_cpu, mask) {
221 		if (query_cpu == this_cpu)
222 			continue;
223 		__default_send_IPI_dest_field(per_cpu(x86_cpu_to_apicid,
224 				 query_cpu), vector, APIC_DEST_PHYSICAL);
225 	}
226 	local_irq_restore(flags);
227 }
228 
229 /*
230  * Helper function for APICs which insist on cpumasks
231  */
232 void default_send_IPI_single(int cpu, int vector)
233 {
234 	apic->send_IPI_mask(cpumask_of(cpu), vector);
235 }
236 
237 #ifdef CONFIG_X86_32
238 
239 void default_send_IPI_mask_sequence_logical(const struct cpumask *mask,
240 						 int vector)
241 {
242 	unsigned long flags;
243 	unsigned int query_cpu;
244 
245 	/*
246 	 * Hack. The clustered APIC addressing mode doesn't allow us to send
247 	 * to an arbitrary mask, so I do a unicasts to each CPU instead. This
248 	 * should be modified to do 1 message per cluster ID - mbligh
249 	 */
250 
251 	local_irq_save(flags);
252 	for_each_cpu(query_cpu, mask)
253 		__default_send_IPI_dest_field(
254 			early_per_cpu(x86_cpu_to_logical_apicid, query_cpu),
255 			vector, apic->dest_logical);
256 	local_irq_restore(flags);
257 }
258 
259 void default_send_IPI_mask_allbutself_logical(const struct cpumask *mask,
260 						 int vector)
261 {
262 	unsigned long flags;
263 	unsigned int query_cpu;
264 	unsigned int this_cpu = smp_processor_id();
265 
266 	/* See Hack comment above */
267 
268 	local_irq_save(flags);
269 	for_each_cpu(query_cpu, mask) {
270 		if (query_cpu == this_cpu)
271 			continue;
272 		__default_send_IPI_dest_field(
273 			early_per_cpu(x86_cpu_to_logical_apicid, query_cpu),
274 			vector, apic->dest_logical);
275 		}
276 	local_irq_restore(flags);
277 }
278 
279 /*
280  * This is only used on smaller machines.
281  */
282 void default_send_IPI_mask_logical(const struct cpumask *cpumask, int vector)
283 {
284 	unsigned long mask = cpumask_bits(cpumask)[0];
285 	unsigned long flags;
286 
287 	if (!mask)
288 		return;
289 
290 	local_irq_save(flags);
291 	WARN_ON(mask & ~cpumask_bits(cpu_online_mask)[0]);
292 	__default_send_IPI_dest_field(mask, vector, apic->dest_logical);
293 	local_irq_restore(flags);
294 }
295 
296 void default_send_IPI_allbutself(int vector)
297 {
298 	/*
299 	 * if there are no other CPUs in the system then we get an APIC send
300 	 * error if we try to broadcast, thus avoid sending IPIs in this case.
301 	 */
302 	if (num_online_cpus() < 2)
303 		return;
304 
305 	if (apic_ipi_shorthand_off || vector == NMI_VECTOR) {
306 		apic->send_IPI_mask_allbutself(cpu_online_mask, vector);
307 	} else {
308 		__default_send_IPI_shortcut(APIC_DEST_ALLBUT, vector);
309 	}
310 }
311 
312 void default_send_IPI_all(int vector)
313 {
314 	if (apic_ipi_shorthand_off || vector == NMI_VECTOR) {
315 		apic->send_IPI_mask(cpu_online_mask, vector);
316 	} else {
317 		__default_send_IPI_shortcut(APIC_DEST_ALLINC, vector);
318 	}
319 }
320 
321 void default_send_IPI_self(int vector)
322 {
323 	__default_send_IPI_shortcut(APIC_DEST_SELF, vector);
324 }
325 
326 /* must come after the send_IPI functions above for inlining */
327 static int convert_apicid_to_cpu(int apic_id)
328 {
329 	int i;
330 
331 	for_each_possible_cpu(i) {
332 		if (per_cpu(x86_cpu_to_apicid, i) == apic_id)
333 			return i;
334 	}
335 	return -1;
336 }
337 
338 int safe_smp_processor_id(void)
339 {
340 	int apicid, cpuid;
341 
342 	if (!boot_cpu_has(X86_FEATURE_APIC))
343 		return 0;
344 
345 	apicid = hard_smp_processor_id();
346 	if (apicid == BAD_APICID)
347 		return 0;
348 
349 	cpuid = convert_apicid_to_cpu(apicid);
350 
351 	return cpuid >= 0 ? cpuid : 0;
352 }
353 #endif
354