1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * SMP Support 4 * 5 * Copyright (C) 1999 Walt Drummond <drummond@valinux.com> 6 * Copyright (C) 1999, 2001, 2003 David Mosberger-Tang <davidm@hpl.hp.com> 7 * 8 * Lots of stuff stolen from arch/alpha/kernel/smp.c 9 * 10 * 01/05/16 Rohit Seth <rohit.seth@intel.com> IA64-SMP functions. Reorganized 11 * the existing code (on the lines of x86 port). 12 * 00/09/11 David Mosberger <davidm@hpl.hp.com> Do loops_per_jiffy 13 * calibration on each CPU. 14 * 00/08/23 Asit Mallick <asit.k.mallick@intel.com> fixed logical processor id 15 * 00/03/31 Rohit Seth <rohit.seth@intel.com> Fixes for Bootstrap Processor 16 * & cpu_online_map now gets done here (instead of setup.c) 17 * 99/10/05 davidm Update to bring it in sync with new command-line processing 18 * scheme. 19 * 10/13/00 Goutham Rao <goutham.rao@intel.com> Updated smp_call_function and 20 * smp_call_function_single to resend IPI on timeouts 21 */ 22 #include <linux/module.h> 23 #include <linux/kernel.h> 24 #include <linux/sched.h> 25 #include <linux/init.h> 26 #include <linux/interrupt.h> 27 #include <linux/smp.h> 28 #include <linux/kernel_stat.h> 29 #include <linux/mm.h> 30 #include <linux/cache.h> 31 #include <linux/delay.h> 32 #include <linux/efi.h> 33 #include <linux/bitops.h> 34 #include <linux/kexec.h> 35 36 #include <linux/atomic.h> 37 #include <asm/current.h> 38 #include <asm/delay.h> 39 #include <asm/machvec.h> 40 #include <asm/io.h> 41 #include <asm/irq.h> 42 #include <asm/page.h> 43 #include <asm/pgalloc.h> 44 #include <asm/pgtable.h> 45 #include <asm/processor.h> 46 #include <asm/ptrace.h> 47 #include <asm/sal.h> 48 #include <asm/tlbflush.h> 49 #include <asm/unistd.h> 50 #include <asm/mca.h> 51 52 /* 53 * Note: alignment of 4 entries/cacheline was empirically determined 54 * to be a good tradeoff between hot cachelines & spreading the array 55 * across too many cacheline. 56 */ 57 static struct local_tlb_flush_counts { 58 unsigned int count; 59 } __attribute__((__aligned__(32))) local_tlb_flush_counts[NR_CPUS]; 60 61 static DEFINE_PER_CPU_SHARED_ALIGNED(unsigned short [NR_CPUS], 62 shadow_flush_counts); 63 64 #define IPI_CALL_FUNC 0 65 #define IPI_CPU_STOP 1 66 #define IPI_CALL_FUNC_SINGLE 2 67 #define IPI_KDUMP_CPU_STOP 3 68 69 /* This needs to be cacheline aligned because it is written to by *other* CPUs. */ 70 static DEFINE_PER_CPU_SHARED_ALIGNED(unsigned long, ipi_operation); 71 72 extern void cpu_halt (void); 73 74 static void 75 stop_this_cpu(void) 76 { 77 /* 78 * Remove this CPU: 79 */ 80 set_cpu_online(smp_processor_id(), false); 81 max_xtp(); 82 local_irq_disable(); 83 cpu_halt(); 84 } 85 86 void 87 cpu_die(void) 88 { 89 max_xtp(); 90 local_irq_disable(); 91 cpu_halt(); 92 /* Should never be here */ 93 BUG(); 94 for (;;); 95 } 96 97 irqreturn_t 98 handle_IPI (int irq, void *dev_id) 99 { 100 int this_cpu = get_cpu(); 101 unsigned long *pending_ipis = &__ia64_per_cpu_var(ipi_operation); 102 unsigned long ops; 103 104 mb(); /* Order interrupt and bit testing. */ 105 while ((ops = xchg(pending_ipis, 0)) != 0) { 106 mb(); /* Order bit clearing and data access. */ 107 do { 108 unsigned long which; 109 110 which = ffz(~ops); 111 ops &= ~(1 << which); 112 113 switch (which) { 114 case IPI_CPU_STOP: 115 stop_this_cpu(); 116 break; 117 case IPI_CALL_FUNC: 118 generic_smp_call_function_interrupt(); 119 break; 120 case IPI_CALL_FUNC_SINGLE: 121 generic_smp_call_function_single_interrupt(); 122 break; 123 #ifdef CONFIG_KEXEC 124 case IPI_KDUMP_CPU_STOP: 125 unw_init_running(kdump_cpu_freeze, NULL); 126 break; 127 #endif 128 default: 129 printk(KERN_CRIT "Unknown IPI on CPU %d: %lu\n", 130 this_cpu, which); 131 break; 132 } 133 } while (ops); 134 mb(); /* Order data access and bit testing. */ 135 } 136 put_cpu(); 137 return IRQ_HANDLED; 138 } 139 140 141 142 /* 143 * Called with preemption disabled. 144 */ 145 static inline void 146 send_IPI_single (int dest_cpu, int op) 147 { 148 set_bit(op, &per_cpu(ipi_operation, dest_cpu)); 149 platform_send_ipi(dest_cpu, IA64_IPI_VECTOR, IA64_IPI_DM_INT, 0); 150 } 151 152 /* 153 * Called with preemption disabled. 154 */ 155 static inline void 156 send_IPI_allbutself (int op) 157 { 158 unsigned int i; 159 160 for_each_online_cpu(i) { 161 if (i != smp_processor_id()) 162 send_IPI_single(i, op); 163 } 164 } 165 166 /* 167 * Called with preemption disabled. 168 */ 169 static inline void 170 send_IPI_mask(const struct cpumask *mask, int op) 171 { 172 unsigned int cpu; 173 174 for_each_cpu(cpu, mask) { 175 send_IPI_single(cpu, op); 176 } 177 } 178 179 /* 180 * Called with preemption disabled. 181 */ 182 static inline void 183 send_IPI_all (int op) 184 { 185 int i; 186 187 for_each_online_cpu(i) { 188 send_IPI_single(i, op); 189 } 190 } 191 192 /* 193 * Called with preemption disabled. 194 */ 195 static inline void 196 send_IPI_self (int op) 197 { 198 send_IPI_single(smp_processor_id(), op); 199 } 200 201 #ifdef CONFIG_KEXEC 202 void 203 kdump_smp_send_stop(void) 204 { 205 send_IPI_allbutself(IPI_KDUMP_CPU_STOP); 206 } 207 208 void 209 kdump_smp_send_init(void) 210 { 211 unsigned int cpu, self_cpu; 212 self_cpu = smp_processor_id(); 213 for_each_online_cpu(cpu) { 214 if (cpu != self_cpu) { 215 if(kdump_status[cpu] == 0) 216 platform_send_ipi(cpu, 0, IA64_IPI_DM_INIT, 0); 217 } 218 } 219 } 220 #endif 221 /* 222 * Called with preemption disabled. 223 */ 224 void 225 smp_send_reschedule (int cpu) 226 { 227 platform_send_ipi(cpu, IA64_IPI_RESCHEDULE, IA64_IPI_DM_INT, 0); 228 } 229 EXPORT_SYMBOL_GPL(smp_send_reschedule); 230 231 /* 232 * Called with preemption disabled. 233 */ 234 static void 235 smp_send_local_flush_tlb (int cpu) 236 { 237 platform_send_ipi(cpu, IA64_IPI_LOCAL_TLB_FLUSH, IA64_IPI_DM_INT, 0); 238 } 239 240 void 241 smp_local_flush_tlb(void) 242 { 243 /* 244 * Use atomic ops. Otherwise, the load/increment/store sequence from 245 * a "++" operation can have the line stolen between the load & store. 246 * The overhead of the atomic op in negligible in this case & offers 247 * significant benefit for the brief periods where lots of cpus 248 * are simultaneously flushing TLBs. 249 */ 250 ia64_fetchadd(1, &local_tlb_flush_counts[smp_processor_id()].count, acq); 251 local_flush_tlb_all(); 252 } 253 254 #define FLUSH_DELAY 5 /* Usec backoff to eliminate excessive cacheline bouncing */ 255 256 void 257 smp_flush_tlb_cpumask(cpumask_t xcpumask) 258 { 259 unsigned short *counts = __ia64_per_cpu_var(shadow_flush_counts); 260 cpumask_t cpumask = xcpumask; 261 int mycpu, cpu, flush_mycpu = 0; 262 263 preempt_disable(); 264 mycpu = smp_processor_id(); 265 266 for_each_cpu(cpu, &cpumask) 267 counts[cpu] = local_tlb_flush_counts[cpu].count & 0xffff; 268 269 mb(); 270 for_each_cpu(cpu, &cpumask) { 271 if (cpu == mycpu) 272 flush_mycpu = 1; 273 else 274 smp_send_local_flush_tlb(cpu); 275 } 276 277 if (flush_mycpu) 278 smp_local_flush_tlb(); 279 280 for_each_cpu(cpu, &cpumask) 281 while(counts[cpu] == (local_tlb_flush_counts[cpu].count & 0xffff)) 282 udelay(FLUSH_DELAY); 283 284 preempt_enable(); 285 } 286 287 void 288 smp_flush_tlb_all (void) 289 { 290 on_each_cpu((void (*)(void *))local_flush_tlb_all, NULL, 1); 291 } 292 293 void 294 smp_flush_tlb_mm (struct mm_struct *mm) 295 { 296 cpumask_var_t cpus; 297 preempt_disable(); 298 /* this happens for the common case of a single-threaded fork(): */ 299 if (likely(mm == current->active_mm && atomic_read(&mm->mm_users) == 1)) 300 { 301 local_finish_flush_tlb_mm(mm); 302 preempt_enable(); 303 return; 304 } 305 if (!alloc_cpumask_var(&cpus, GFP_ATOMIC)) { 306 smp_call_function((void (*)(void *))local_finish_flush_tlb_mm, 307 mm, 1); 308 } else { 309 cpumask_copy(cpus, mm_cpumask(mm)); 310 smp_call_function_many(cpus, 311 (void (*)(void *))local_finish_flush_tlb_mm, mm, 1); 312 free_cpumask_var(cpus); 313 } 314 local_irq_disable(); 315 local_finish_flush_tlb_mm(mm); 316 local_irq_enable(); 317 preempt_enable(); 318 } 319 320 void arch_send_call_function_single_ipi(int cpu) 321 { 322 send_IPI_single(cpu, IPI_CALL_FUNC_SINGLE); 323 } 324 325 void arch_send_call_function_ipi_mask(const struct cpumask *mask) 326 { 327 send_IPI_mask(mask, IPI_CALL_FUNC); 328 } 329 330 /* 331 * this function calls the 'stop' function on all other CPUs in the system. 332 */ 333 void 334 smp_send_stop (void) 335 { 336 send_IPI_allbutself(IPI_CPU_STOP); 337 } 338 339 int 340 setup_profiling_timer (unsigned int multiplier) 341 { 342 return -EINVAL; 343 } 344