xref: /openbmc/linux/arch/x86/kernel/apic/ipi.c (revision bd82dba2)
1 // SPDX-License-Identifier: GPL-2.0
2 
3 #include <linux/cpumask.h>
4 #include <linux/smp.h>
5 
6 #include "local.h"
7 
8 static inline int __prepare_ICR2(unsigned int mask)
9 {
10 	return SET_APIC_DEST_FIELD(mask);
11 }
12 
13 static inline void __xapic_wait_icr_idle(void)
14 {
15 	while (native_apic_mem_read(APIC_ICR) & APIC_ICR_BUSY)
16 		cpu_relax();
17 }
18 
19 void __default_send_IPI_shortcut(unsigned int shortcut, int vector)
20 {
21 	/*
22 	 * Subtle. In the case of the 'never do double writes' workaround
23 	 * we have to lock out interrupts to be safe.  As we don't care
24 	 * of the value read we use an atomic rmw access to avoid costly
25 	 * cli/sti.  Otherwise we use an even cheaper single atomic write
26 	 * to the APIC.
27 	 */
28 	unsigned int cfg;
29 
30 	/*
31 	 * Wait for idle.
32 	 */
33 	if (unlikely(vector == NMI_VECTOR))
34 		safe_apic_wait_icr_idle();
35 	else
36 		__xapic_wait_icr_idle();
37 
38 	/*
39 	 * No need to touch the target chip field. Also the destination
40 	 * mode is ignored when a shorthand is used.
41 	 */
42 	cfg = __prepare_ICR(shortcut, vector, 0);
43 
44 	/*
45 	 * Send the IPI. The write to APIC_ICR fires this off.
46 	 */
47 	native_apic_mem_write(APIC_ICR, cfg);
48 }
49 
50 /*
51  * This is used to send an IPI with no shorthand notation (the destination is
52  * specified in bits 56 to 63 of the ICR).
53  */
54 void __default_send_IPI_dest_field(unsigned int mask, int vector, unsigned int dest)
55 {
56 	unsigned long cfg;
57 
58 	/*
59 	 * Wait for idle.
60 	 */
61 	if (unlikely(vector == NMI_VECTOR))
62 		safe_apic_wait_icr_idle();
63 	else
64 		__xapic_wait_icr_idle();
65 
66 	/*
67 	 * prepare target chip field
68 	 */
69 	cfg = __prepare_ICR2(mask);
70 	native_apic_mem_write(APIC_ICR2, cfg);
71 
72 	/*
73 	 * program the ICR
74 	 */
75 	cfg = __prepare_ICR(0, vector, dest);
76 
77 	/*
78 	 * Send the IPI. The write to APIC_ICR fires this off.
79 	 */
80 	native_apic_mem_write(APIC_ICR, cfg);
81 }
82 
83 void default_send_IPI_single_phys(int cpu, int vector)
84 {
85 	unsigned long flags;
86 
87 	local_irq_save(flags);
88 	__default_send_IPI_dest_field(per_cpu(x86_cpu_to_apicid, cpu),
89 				      vector, APIC_DEST_PHYSICAL);
90 	local_irq_restore(flags);
91 }
92 
93 void default_send_IPI_mask_sequence_phys(const struct cpumask *mask, int vector)
94 {
95 	unsigned long query_cpu;
96 	unsigned long flags;
97 
98 	/*
99 	 * Hack. The clustered APIC addressing mode doesn't allow us to send
100 	 * to an arbitrary mask, so I do a unicast to each CPU instead.
101 	 * - mbligh
102 	 */
103 	local_irq_save(flags);
104 	for_each_cpu(query_cpu, mask) {
105 		__default_send_IPI_dest_field(per_cpu(x86_cpu_to_apicid,
106 				query_cpu), vector, APIC_DEST_PHYSICAL);
107 	}
108 	local_irq_restore(flags);
109 }
110 
111 void default_send_IPI_mask_allbutself_phys(const struct cpumask *mask,
112 						 int vector)
113 {
114 	unsigned int this_cpu = smp_processor_id();
115 	unsigned int query_cpu;
116 	unsigned long flags;
117 
118 	/* See Hack comment above */
119 
120 	local_irq_save(flags);
121 	for_each_cpu(query_cpu, mask) {
122 		if (query_cpu == this_cpu)
123 			continue;
124 		__default_send_IPI_dest_field(per_cpu(x86_cpu_to_apicid,
125 				 query_cpu), vector, APIC_DEST_PHYSICAL);
126 	}
127 	local_irq_restore(flags);
128 }
129 
130 /*
131  * Helper function for APICs which insist on cpumasks
132  */
133 void default_send_IPI_single(int cpu, int vector)
134 {
135 	apic->send_IPI_mask(cpumask_of(cpu), vector);
136 }
137 
138 #ifdef CONFIG_X86_32
139 
140 void default_send_IPI_mask_sequence_logical(const struct cpumask *mask,
141 						 int vector)
142 {
143 	unsigned long flags;
144 	unsigned int query_cpu;
145 
146 	/*
147 	 * Hack. The clustered APIC addressing mode doesn't allow us to send
148 	 * to an arbitrary mask, so I do a unicasts to each CPU instead. This
149 	 * should be modified to do 1 message per cluster ID - mbligh
150 	 */
151 
152 	local_irq_save(flags);
153 	for_each_cpu(query_cpu, mask)
154 		__default_send_IPI_dest_field(
155 			early_per_cpu(x86_cpu_to_logical_apicid, query_cpu),
156 			vector, apic->dest_logical);
157 	local_irq_restore(flags);
158 }
159 
160 void default_send_IPI_mask_allbutself_logical(const struct cpumask *mask,
161 						 int vector)
162 {
163 	unsigned long flags;
164 	unsigned int query_cpu;
165 	unsigned int this_cpu = smp_processor_id();
166 
167 	/* See Hack comment above */
168 
169 	local_irq_save(flags);
170 	for_each_cpu(query_cpu, mask) {
171 		if (query_cpu == this_cpu)
172 			continue;
173 		__default_send_IPI_dest_field(
174 			early_per_cpu(x86_cpu_to_logical_apicid, query_cpu),
175 			vector, apic->dest_logical);
176 		}
177 	local_irq_restore(flags);
178 }
179 
180 /*
181  * This is only used on smaller machines.
182  */
183 void default_send_IPI_mask_logical(const struct cpumask *cpumask, int vector)
184 {
185 	unsigned long mask = cpumask_bits(cpumask)[0];
186 	unsigned long flags;
187 
188 	if (!mask)
189 		return;
190 
191 	local_irq_save(flags);
192 	WARN_ON(mask & ~cpumask_bits(cpu_online_mask)[0]);
193 	__default_send_IPI_dest_field(mask, vector, apic->dest_logical);
194 	local_irq_restore(flags);
195 }
196 
197 void default_send_IPI_allbutself(int vector)
198 {
199 	/*
200 	 * if there are no other CPUs in the system then we get an APIC send
201 	 * error if we try to broadcast, thus avoid sending IPIs in this case.
202 	 */
203 	if (num_online_cpus() < 2)
204 		return;
205 
206 	if (no_broadcast || vector == NMI_VECTOR) {
207 		apic->send_IPI_mask_allbutself(cpu_online_mask, vector);
208 	} else {
209 		__default_send_IPI_shortcut(APIC_DEST_ALLBUT, vector);
210 	}
211 }
212 
213 void default_send_IPI_all(int vector)
214 {
215 	if (no_broadcast || vector == NMI_VECTOR) {
216 		apic->send_IPI_mask(cpu_online_mask, vector);
217 	} else {
218 		__default_send_IPI_shortcut(APIC_DEST_ALLINC, vector);
219 	}
220 }
221 
222 void default_send_IPI_self(int vector)
223 {
224 	__default_send_IPI_shortcut(APIC_DEST_SELF, vector);
225 }
226 
227 /* must come after the send_IPI functions above for inlining */
228 static int convert_apicid_to_cpu(int apic_id)
229 {
230 	int i;
231 
232 	for_each_possible_cpu(i) {
233 		if (per_cpu(x86_cpu_to_apicid, i) == apic_id)
234 			return i;
235 	}
236 	return -1;
237 }
238 
239 int safe_smp_processor_id(void)
240 {
241 	int apicid, cpuid;
242 
243 	if (!boot_cpu_has(X86_FEATURE_APIC))
244 		return 0;
245 
246 	apicid = hard_smp_processor_id();
247 	if (apicid == BAD_APICID)
248 		return 0;
249 
250 	cpuid = convert_apicid_to_cpu(apicid);
251 
252 	return cpuid >= 0 ? cpuid : 0;
253 }
254 #endif
255