1 // SPDX-License-Identifier: GPL-2.0 2 3 #include <linux/cpumask.h> 4 #include <linux/smp.h> 5 6 #include "local.h" 7 8 static inline int __prepare_ICR2(unsigned int mask) 9 { 10 return SET_APIC_DEST_FIELD(mask); 11 } 12 13 static inline void __xapic_wait_icr_idle(void) 14 { 15 while (native_apic_mem_read(APIC_ICR) & APIC_ICR_BUSY) 16 cpu_relax(); 17 } 18 19 void __default_send_IPI_shortcut(unsigned int shortcut, int vector) 20 { 21 /* 22 * Subtle. In the case of the 'never do double writes' workaround 23 * we have to lock out interrupts to be safe. As we don't care 24 * of the value read we use an atomic rmw access to avoid costly 25 * cli/sti. Otherwise we use an even cheaper single atomic write 26 * to the APIC. 27 */ 28 unsigned int cfg; 29 30 /* 31 * Wait for idle. 32 */ 33 __xapic_wait_icr_idle(); 34 35 /* 36 * No need to touch the target chip field. Also the destination 37 * mode is ignored when a shorthand is used. 38 */ 39 cfg = __prepare_ICR(shortcut, vector, 0); 40 41 /* 42 * Send the IPI. The write to APIC_ICR fires this off. 43 */ 44 native_apic_mem_write(APIC_ICR, cfg); 45 } 46 47 /* 48 * This is used to send an IPI with no shorthand notation (the destination is 49 * specified in bits 56 to 63 of the ICR). 50 */ 51 void __default_send_IPI_dest_field(unsigned int mask, int vector, unsigned int dest) 52 { 53 unsigned long cfg; 54 55 /* 56 * Wait for idle. 57 */ 58 if (unlikely(vector == NMI_VECTOR)) 59 safe_apic_wait_icr_idle(); 60 else 61 __xapic_wait_icr_idle(); 62 63 /* 64 * prepare target chip field 65 */ 66 cfg = __prepare_ICR2(mask); 67 native_apic_mem_write(APIC_ICR2, cfg); 68 69 /* 70 * program the ICR 71 */ 72 cfg = __prepare_ICR(0, vector, dest); 73 74 /* 75 * Send the IPI. The write to APIC_ICR fires this off. 76 */ 77 native_apic_mem_write(APIC_ICR, cfg); 78 } 79 80 void default_send_IPI_single_phys(int cpu, int vector) 81 { 82 unsigned long flags; 83 84 local_irq_save(flags); 85 __default_send_IPI_dest_field(per_cpu(x86_cpu_to_apicid, cpu), 86 vector, APIC_DEST_PHYSICAL); 87 local_irq_restore(flags); 88 } 89 90 void default_send_IPI_mask_sequence_phys(const struct cpumask *mask, int vector) 91 { 92 unsigned long query_cpu; 93 unsigned long flags; 94 95 /* 96 * Hack. The clustered APIC addressing mode doesn't allow us to send 97 * to an arbitrary mask, so I do a unicast to each CPU instead. 98 * - mbligh 99 */ 100 local_irq_save(flags); 101 for_each_cpu(query_cpu, mask) { 102 __default_send_IPI_dest_field(per_cpu(x86_cpu_to_apicid, 103 query_cpu), vector, APIC_DEST_PHYSICAL); 104 } 105 local_irq_restore(flags); 106 } 107 108 void default_send_IPI_mask_allbutself_phys(const struct cpumask *mask, 109 int vector) 110 { 111 unsigned int this_cpu = smp_processor_id(); 112 unsigned int query_cpu; 113 unsigned long flags; 114 115 /* See Hack comment above */ 116 117 local_irq_save(flags); 118 for_each_cpu(query_cpu, mask) { 119 if (query_cpu == this_cpu) 120 continue; 121 __default_send_IPI_dest_field(per_cpu(x86_cpu_to_apicid, 122 query_cpu), vector, APIC_DEST_PHYSICAL); 123 } 124 local_irq_restore(flags); 125 } 126 127 /* 128 * Helper function for APICs which insist on cpumasks 129 */ 130 void default_send_IPI_single(int cpu, int vector) 131 { 132 apic->send_IPI_mask(cpumask_of(cpu), vector); 133 } 134 135 #ifdef CONFIG_X86_32 136 137 void default_send_IPI_mask_sequence_logical(const struct cpumask *mask, 138 int vector) 139 { 140 unsigned long flags; 141 unsigned int query_cpu; 142 143 /* 144 * Hack. The clustered APIC addressing mode doesn't allow us to send 145 * to an arbitrary mask, so I do a unicasts to each CPU instead. This 146 * should be modified to do 1 message per cluster ID - mbligh 147 */ 148 149 local_irq_save(flags); 150 for_each_cpu(query_cpu, mask) 151 __default_send_IPI_dest_field( 152 early_per_cpu(x86_cpu_to_logical_apicid, query_cpu), 153 vector, apic->dest_logical); 154 local_irq_restore(flags); 155 } 156 157 void default_send_IPI_mask_allbutself_logical(const struct cpumask *mask, 158 int vector) 159 { 160 unsigned long flags; 161 unsigned int query_cpu; 162 unsigned int this_cpu = smp_processor_id(); 163 164 /* See Hack comment above */ 165 166 local_irq_save(flags); 167 for_each_cpu(query_cpu, mask) { 168 if (query_cpu == this_cpu) 169 continue; 170 __default_send_IPI_dest_field( 171 early_per_cpu(x86_cpu_to_logical_apicid, query_cpu), 172 vector, apic->dest_logical); 173 } 174 local_irq_restore(flags); 175 } 176 177 /* 178 * This is only used on smaller machines. 179 */ 180 void default_send_IPI_mask_logical(const struct cpumask *cpumask, int vector) 181 { 182 unsigned long mask = cpumask_bits(cpumask)[0]; 183 unsigned long flags; 184 185 if (!mask) 186 return; 187 188 local_irq_save(flags); 189 WARN_ON(mask & ~cpumask_bits(cpu_online_mask)[0]); 190 __default_send_IPI_dest_field(mask, vector, apic->dest_logical); 191 local_irq_restore(flags); 192 } 193 194 void default_send_IPI_allbutself(int vector) 195 { 196 /* 197 * if there are no other CPUs in the system then we get an APIC send 198 * error if we try to broadcast, thus avoid sending IPIs in this case. 199 */ 200 if (num_online_cpus() < 2) 201 return; 202 203 if (no_broadcast || vector == NMI_VECTOR) { 204 apic->send_IPI_mask_allbutself(cpu_online_mask, vector); 205 } else { 206 __default_send_IPI_shortcut(APIC_DEST_ALLBUT, vector); 207 } 208 } 209 210 void default_send_IPI_all(int vector) 211 { 212 if (no_broadcast || vector == NMI_VECTOR) { 213 apic->send_IPI_mask(cpu_online_mask, vector); 214 } else { 215 __default_send_IPI_shortcut(APIC_DEST_ALLINC, vector); 216 } 217 } 218 219 void default_send_IPI_self(int vector) 220 { 221 __default_send_IPI_shortcut(APIC_DEST_SELF, vector); 222 } 223 224 /* must come after the send_IPI functions above for inlining */ 225 static int convert_apicid_to_cpu(int apic_id) 226 { 227 int i; 228 229 for_each_possible_cpu(i) { 230 if (per_cpu(x86_cpu_to_apicid, i) == apic_id) 231 return i; 232 } 233 return -1; 234 } 235 236 int safe_smp_processor_id(void) 237 { 238 int apicid, cpuid; 239 240 if (!boot_cpu_has(X86_FEATURE_APIC)) 241 return 0; 242 243 apicid = hard_smp_processor_id(); 244 if (apicid == BAD_APICID) 245 return 0; 246 247 cpuid = convert_apicid_to_cpu(apicid); 248 249 return cpuid >= 0 ? cpuid : 0; 250 } 251 #endif 252