xref: /openbmc/linux/arch/x86/kernel/apic/ipi.c (revision 01363d4f)
1 // SPDX-License-Identifier: GPL-2.0
2 
3 #include <linux/cpumask.h>
4 #include <linux/smp.h>
5 #include <asm/io_apic.h>
6 
7 #include "local.h"
8 
9 DEFINE_STATIC_KEY_FALSE(apic_use_ipi_shorthand);
10 
11 #ifdef CONFIG_SMP
12 static int apic_ipi_shorthand_off __ro_after_init;
13 
14 static __init int apic_ipi_shorthand(char *str)
15 {
16 	get_option(&str, &apic_ipi_shorthand_off);
17 	return 1;
18 }
19 __setup("no_ipi_broadcast=", apic_ipi_shorthand);
20 
21 static int __init print_ipi_mode(void)
22 {
23 	pr_info("IPI shorthand broadcast: %s\n",
24 		apic_ipi_shorthand_off ? "disabled" : "enabled");
25 	return 0;
26 }
27 late_initcall(print_ipi_mode);
28 
29 void apic_smt_update(void)
30 {
31 	/*
32 	 * Do not switch to broadcast mode if:
33 	 * - Disabled on the command line
34 	 * - Only a single CPU is online
35 	 * - Not all present CPUs have been at least booted once
36 	 *
37 	 * The latter is important as the local APIC might be in some
38 	 * random state and a broadcast might cause havoc. That's
39 	 * especially true for NMI broadcasting.
40 	 */
41 	if (apic_ipi_shorthand_off || num_online_cpus() == 1 ||
42 	    !cpumask_equal(cpu_present_mask, &cpus_booted_once_mask)) {
43 		static_branch_disable(&apic_use_ipi_shorthand);
44 	} else {
45 		static_branch_enable(&apic_use_ipi_shorthand);
46 	}
47 }
48 
49 void apic_send_IPI_allbutself(unsigned int vector)
50 {
51 	if (num_online_cpus() < 2)
52 		return;
53 
54 	if (static_branch_likely(&apic_use_ipi_shorthand))
55 		apic->send_IPI_allbutself(vector);
56 	else
57 		apic->send_IPI_mask_allbutself(cpu_online_mask, vector);
58 }
59 
60 /*
61  * Send a 'reschedule' IPI to another CPU. It goes straight through and
62  * wastes no time serializing anything. Worst case is that we lose a
63  * reschedule ...
64  */
65 void native_smp_send_reschedule(int cpu)
66 {
67 	if (unlikely(cpu_is_offline(cpu))) {
68 		WARN(1, "sched: Unexpected reschedule of offline CPU#%d!\n", cpu);
69 		return;
70 	}
71 	apic->send_IPI(cpu, RESCHEDULE_VECTOR);
72 }
73 
74 void native_send_call_func_single_ipi(int cpu)
75 {
76 	apic->send_IPI(cpu, CALL_FUNCTION_SINGLE_VECTOR);
77 }
78 
79 void native_send_call_func_ipi(const struct cpumask *mask)
80 {
81 	if (static_branch_likely(&apic_use_ipi_shorthand)) {
82 		unsigned int cpu = smp_processor_id();
83 
84 		if (!cpumask_or_equal(mask, cpumask_of(cpu), cpu_online_mask))
85 			goto sendmask;
86 
87 		if (cpumask_test_cpu(cpu, mask))
88 			apic->send_IPI_all(CALL_FUNCTION_VECTOR);
89 		else if (num_online_cpus() > 1)
90 			apic->send_IPI_allbutself(CALL_FUNCTION_VECTOR);
91 		return;
92 	}
93 
94 sendmask:
95 	apic->send_IPI_mask(mask, CALL_FUNCTION_VECTOR);
96 }
97 
98 #endif /* CONFIG_SMP */
99 
100 static inline int __prepare_ICR2(unsigned int mask)
101 {
102 	return SET_XAPIC_DEST_FIELD(mask);
103 }
104 
105 static inline void __xapic_wait_icr_idle(void)
106 {
107 	while (native_apic_mem_read(APIC_ICR) & APIC_ICR_BUSY)
108 		cpu_relax();
109 }
110 
111 /*
112  * This is safe against interruption because it only writes the lower 32
113  * bits of the APIC_ICR register. The destination field is ignored for
114  * short hand IPIs.
115  *
116  *  wait_icr_idle()
117  *  write(ICR2, dest)
118  *  NMI
119  *	wait_icr_idle()
120  *	write(ICR)
121  *	wait_icr_idle()
122  *  write(ICR)
123  *
124  * This function does not need to disable interrupts as there is no ICR2
125  * interaction. The memory write is direct except when the machine is
126  * affected by the 11AP Pentium erratum, which turns the plain write into
127  * an XCHG operation.
128  */
129 static void __default_send_IPI_shortcut(unsigned int shortcut, int vector)
130 {
131 	/*
132 	 * Wait for the previous ICR command to complete.  Use
133 	 * safe_apic_wait_icr_idle() for the NMI vector as there have been
134 	 * issues where otherwise the system hangs when the panic CPU tries
135 	 * to stop the others before launching the kdump kernel.
136 	 */
137 	if (unlikely(vector == NMI_VECTOR))
138 		safe_apic_wait_icr_idle();
139 	else
140 		__xapic_wait_icr_idle();
141 
142 	/* Destination field (ICR2) and the destination mode are ignored */
143 	native_apic_mem_write(APIC_ICR, __prepare_ICR(shortcut, vector, 0));
144 }
145 
146 /*
147  * This is used to send an IPI with no shorthand notation (the destination is
148  * specified in bits 56 to 63 of the ICR).
149  */
150 void __default_send_IPI_dest_field(unsigned int dest_mask, int vector,
151 				   unsigned int dest_mode)
152 {
153 	/* See comment in __default_send_IPI_shortcut() */
154 	if (unlikely(vector == NMI_VECTOR))
155 		safe_apic_wait_icr_idle();
156 	else
157 		__xapic_wait_icr_idle();
158 
159 	/* Set the IPI destination field in the ICR */
160 	native_apic_mem_write(APIC_ICR2, __prepare_ICR2(dest_mask));
161 	/* Send it with the proper destination mode */
162 	native_apic_mem_write(APIC_ICR, __prepare_ICR(0, vector, dest_mode));
163 }
164 
165 void default_send_IPI_single_phys(int cpu, int vector)
166 {
167 	unsigned long flags;
168 
169 	local_irq_save(flags);
170 	__default_send_IPI_dest_field(per_cpu(x86_cpu_to_apicid, cpu),
171 				      vector, APIC_DEST_PHYSICAL);
172 	local_irq_restore(flags);
173 }
174 
175 void default_send_IPI_mask_sequence_phys(const struct cpumask *mask, int vector)
176 {
177 	unsigned long flags;
178 	unsigned long cpu;
179 
180 	local_irq_save(flags);
181 	for_each_cpu(cpu, mask) {
182 		__default_send_IPI_dest_field(per_cpu(x86_cpu_to_apicid,
183 				cpu), vector, APIC_DEST_PHYSICAL);
184 	}
185 	local_irq_restore(flags);
186 }
187 
188 void default_send_IPI_mask_allbutself_phys(const struct cpumask *mask,
189 						 int vector)
190 {
191 	unsigned int cpu, this_cpu = smp_processor_id();
192 	unsigned long flags;
193 
194 	local_irq_save(flags);
195 	for_each_cpu(cpu, mask) {
196 		if (cpu == this_cpu)
197 			continue;
198 		__default_send_IPI_dest_field(per_cpu(x86_cpu_to_apicid,
199 				 cpu), vector, APIC_DEST_PHYSICAL);
200 	}
201 	local_irq_restore(flags);
202 }
203 
204 /*
205  * Helper function for APICs which insist on cpumasks
206  */
207 void default_send_IPI_single(int cpu, int vector)
208 {
209 	apic->send_IPI_mask(cpumask_of(cpu), vector);
210 }
211 
212 void default_send_IPI_allbutself(int vector)
213 {
214 	__default_send_IPI_shortcut(APIC_DEST_ALLBUT, vector);
215 }
216 
217 void default_send_IPI_all(int vector)
218 {
219 	__default_send_IPI_shortcut(APIC_DEST_ALLINC, vector);
220 }
221 
222 void default_send_IPI_self(int vector)
223 {
224 	__default_send_IPI_shortcut(APIC_DEST_SELF, vector);
225 }
226 
227 #ifdef CONFIG_X86_32
228 void default_send_IPI_mask_sequence_logical(const struct cpumask *mask, int vector)
229 {
230 	unsigned long flags;
231 	unsigned int cpu;
232 
233 	local_irq_save(flags);
234 	for_each_cpu(cpu, mask)
235 		__default_send_IPI_dest_field(1U << cpu, vector, APIC_DEST_LOGICAL);
236 	local_irq_restore(flags);
237 }
238 
239 void default_send_IPI_mask_allbutself_logical(const struct cpumask *mask,
240 						 int vector)
241 {
242 	unsigned int cpu, this_cpu = smp_processor_id();
243 	unsigned long flags;
244 
245 	local_irq_save(flags);
246 	for_each_cpu(cpu, mask) {
247 		if (cpu == this_cpu)
248 			continue;
249 		__default_send_IPI_dest_field(1U << cpu, vector, APIC_DEST_LOGICAL);
250 	}
251 	local_irq_restore(flags);
252 }
253 
254 void default_send_IPI_mask_logical(const struct cpumask *cpumask, int vector)
255 {
256 	unsigned long mask = cpumask_bits(cpumask)[0];
257 	unsigned long flags;
258 
259 	if (!mask)
260 		return;
261 
262 	local_irq_save(flags);
263 	WARN_ON(mask & ~cpumask_bits(cpu_online_mask)[0]);
264 	__default_send_IPI_dest_field(mask, vector, APIC_DEST_LOGICAL);
265 	local_irq_restore(flags);
266 }
267 
268 static int convert_apicid_to_cpu(int apic_id)
269 {
270 	int i;
271 
272 	for_each_possible_cpu(i) {
273 		if (per_cpu(x86_cpu_to_apicid, i) == apic_id)
274 			return i;
275 	}
276 	return -1;
277 }
278 
279 int safe_smp_processor_id(void)
280 {
281 	int apicid, cpuid;
282 
283 	if (!boot_cpu_has(X86_FEATURE_APIC))
284 		return 0;
285 
286 	apicid = read_apic_id();
287 	if (apicid == BAD_APICID)
288 		return 0;
289 
290 	cpuid = convert_apicid_to_cpu(apicid);
291 
292 	return cpuid >= 0 ? cpuid : 0;
293 }
294 #endif
295