1 /* 2 * This program is free software; you can redistribute it and/or 3 * modify it under the terms of the GNU General Public License 4 * as published by the Free Software Foundation; either version 2 5 * of the License, or (at your option) any later version. 6 * 7 * This program is distributed in the hope that it will be useful, 8 * but WITHOUT ANY WARRANTY; without even the implied warranty of 9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 10 * GNU General Public License for more details. 11 * 12 * You should have received a copy of the GNU General Public License 13 * along with this program; if not, write to the Free Software 14 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. 15 * 16 * Copyright (C) 2000, 2001 Kanoj Sarcar 17 * Copyright (C) 2000, 2001 Ralf Baechle 18 * Copyright (C) 2000, 2001 Silicon Graphics, Inc. 19 * Copyright (C) 2000, 2001, 2003 Broadcom Corporation 20 */ 21 #include <linux/cache.h> 22 #include <linux/delay.h> 23 #include <linux/init.h> 24 #include <linux/interrupt.h> 25 #include <linux/spinlock.h> 26 #include <linux/threads.h> 27 #include <linux/module.h> 28 #include <linux/time.h> 29 #include <linux/timex.h> 30 #include <linux/sched.h> 31 #include <linux/cpumask.h> 32 #include <linux/cpu.h> 33 34 #include <asm/atomic.h> 35 #include <asm/cpu.h> 36 #include <asm/processor.h> 37 #include <asm/system.h> 38 #include <asm/mmu_context.h> 39 #include <asm/smp.h> 40 41 #ifdef CONFIG_MIPS_MT_SMTC 42 #include <asm/mipsmtregs.h> 43 #endif /* CONFIG_MIPS_MT_SMTC */ 44 45 cpumask_t phys_cpu_present_map; /* Bitmask of available CPUs */ 46 volatile cpumask_t cpu_callin_map; /* Bitmask of started secondaries */ 47 cpumask_t cpu_online_map; /* Bitmask of currently online CPUs */ 48 int __cpu_number_map[NR_CPUS]; /* Map physical to logical */ 49 int __cpu_logical_map[NR_CPUS]; /* Map logical to physical */ 50 51 EXPORT_SYMBOL(phys_cpu_present_map); 52 EXPORT_SYMBOL(cpu_online_map); 53 54 extern void __init calibrate_delay(void); 55 extern void cpu_idle(void); 56 57 /* 58 * First C code run on the secondary CPUs after being started up by 59 * the master. 60 */ 61 asmlinkage __cpuinit void start_secondary(void) 62 { 63 unsigned int cpu; 64 65 #ifdef CONFIG_MIPS_MT_SMTC 66 /* Only do cpu_probe for first TC of CPU */ 67 if ((read_c0_tcbind() & TCBIND_CURTC) == 0) 68 #endif /* CONFIG_MIPS_MT_SMTC */ 69 cpu_probe(); 70 cpu_report(); 71 per_cpu_trap_init(); 72 prom_init_secondary(); 73 74 /* 75 * XXX parity protection should be folded in here when it's converted 76 * to an option instead of something based on .cputype 77 */ 78 79 calibrate_delay(); 80 preempt_disable(); 81 cpu = smp_processor_id(); 82 cpu_data[cpu].udelay_val = loops_per_jiffy; 83 84 prom_smp_finish(); 85 86 cpu_set(cpu, cpu_callin_map); 87 88 cpu_idle(); 89 } 90 91 DEFINE_SPINLOCK(smp_call_lock); 92 93 struct call_data_struct *call_data; 94 95 /* 96 * Run a function on all other CPUs. 97 * <func> The function to run. This must be fast and non-blocking. 98 * <info> An arbitrary pointer to pass to the function. 99 * <retry> If true, keep retrying until ready. 100 * <wait> If true, wait until function has completed on other CPUs. 101 * [RETURNS] 0 on success, else a negative status code. 102 * 103 * Does not return until remote CPUs are nearly ready to execute <func> 104 * or are or have executed. 105 * 106 * You must not call this function with disabled interrupts or from a 107 * hardware interrupt handler or from a bottom half handler: 108 * 109 * CPU A CPU B 110 * Disable interrupts 111 * smp_call_function() 112 * Take call_lock 113 * Send IPIs 114 * Wait for all cpus to acknowledge IPI 115 * CPU A has not responded, spin waiting 116 * for cpu A to respond, holding call_lock 117 * smp_call_function() 118 * Spin waiting for call_lock 119 * Deadlock Deadlock 120 */ 121 int smp_call_function (void (*func) (void *info), void *info, int retry, 122 int wait) 123 { 124 struct call_data_struct data; 125 int i, cpus = num_online_cpus() - 1; 126 int cpu = smp_processor_id(); 127 128 /* 129 * Can die spectacularly if this CPU isn't yet marked online 130 */ 131 BUG_ON(!cpu_online(cpu)); 132 133 if (!cpus) 134 return 0; 135 136 /* Can deadlock when called with interrupts disabled */ 137 WARN_ON(irqs_disabled()); 138 139 data.func = func; 140 data.info = info; 141 atomic_set(&data.started, 0); 142 data.wait = wait; 143 if (wait) 144 atomic_set(&data.finished, 0); 145 146 spin_lock(&smp_call_lock); 147 call_data = &data; 148 smp_mb(); 149 150 /* Send a message to all other CPUs and wait for them to respond */ 151 for_each_online_cpu(i) 152 if (i != cpu) 153 core_send_ipi(i, SMP_CALL_FUNCTION); 154 155 /* Wait for response */ 156 /* FIXME: lock-up detection, backtrace on lock-up */ 157 while (atomic_read(&data.started) != cpus) 158 barrier(); 159 160 if (wait) 161 while (atomic_read(&data.finished) != cpus) 162 barrier(); 163 call_data = NULL; 164 spin_unlock(&smp_call_lock); 165 166 return 0; 167 } 168 169 170 void smp_call_function_interrupt(void) 171 { 172 void (*func) (void *info) = call_data->func; 173 void *info = call_data->info; 174 int wait = call_data->wait; 175 176 /* 177 * Notify initiating CPU that I've grabbed the data and am 178 * about to execute the function. 179 */ 180 smp_mb(); 181 atomic_inc(&call_data->started); 182 183 /* 184 * At this point the info structure may be out of scope unless wait==1. 185 */ 186 irq_enter(); 187 (*func)(info); 188 irq_exit(); 189 190 if (wait) { 191 smp_mb(); 192 atomic_inc(&call_data->finished); 193 } 194 } 195 196 static void stop_this_cpu(void *dummy) 197 { 198 /* 199 * Remove this CPU: 200 */ 201 cpu_clear(smp_processor_id(), cpu_online_map); 202 local_irq_enable(); /* May need to service _machine_restart IPI */ 203 for (;;); /* Wait if available. */ 204 } 205 206 void smp_send_stop(void) 207 { 208 smp_call_function(stop_this_cpu, NULL, 1, 0); 209 } 210 211 void __init smp_cpus_done(unsigned int max_cpus) 212 { 213 prom_cpus_done(); 214 } 215 216 /* called from main before smp_init() */ 217 void __init smp_prepare_cpus(unsigned int max_cpus) 218 { 219 init_new_context(current, &init_mm); 220 current_thread_info()->cpu = 0; 221 plat_prepare_cpus(max_cpus); 222 #ifndef CONFIG_HOTPLUG_CPU 223 cpu_present_map = cpu_possible_map; 224 #endif 225 } 226 227 /* preload SMP state for boot cpu */ 228 void __devinit smp_prepare_boot_cpu(void) 229 { 230 /* 231 * This assumes that bootup is always handled by the processor 232 * with the logic and physical number 0. 233 */ 234 __cpu_number_map[0] = 0; 235 __cpu_logical_map[0] = 0; 236 cpu_set(0, phys_cpu_present_map); 237 cpu_set(0, cpu_online_map); 238 cpu_set(0, cpu_callin_map); 239 } 240 241 /* 242 * Called once for each "cpu_possible(cpu)". Needs to spin up the cpu 243 * and keep control until "cpu_online(cpu)" is set. Note: cpu is 244 * physical, not logical. 245 */ 246 int __cpuinit __cpu_up(unsigned int cpu) 247 { 248 struct task_struct *idle; 249 250 /* 251 * Processor goes to start_secondary(), sets online flag 252 * The following code is purely to make sure 253 * Linux can schedule processes on this slave. 254 */ 255 idle = fork_idle(cpu); 256 if (IS_ERR(idle)) 257 panic(KERN_ERR "Fork failed for CPU %d", cpu); 258 259 prom_boot_secondary(cpu, idle); 260 261 /* 262 * Trust is futile. We should really have timeouts ... 263 */ 264 while (!cpu_isset(cpu, cpu_callin_map)) 265 udelay(100); 266 267 cpu_set(cpu, cpu_online_map); 268 269 return 0; 270 } 271 272 /* Not really SMP stuff ... */ 273 int setup_profiling_timer(unsigned int multiplier) 274 { 275 return 0; 276 } 277 278 static void flush_tlb_all_ipi(void *info) 279 { 280 local_flush_tlb_all(); 281 } 282 283 void flush_tlb_all(void) 284 { 285 on_each_cpu(flush_tlb_all_ipi, NULL, 1, 1); 286 } 287 288 static void flush_tlb_mm_ipi(void *mm) 289 { 290 local_flush_tlb_mm((struct mm_struct *)mm); 291 } 292 293 /* 294 * Special Variant of smp_call_function for use by TLB functions: 295 * 296 * o No return value 297 * o collapses to normal function call on UP kernels 298 * o collapses to normal function call on systems with a single shared 299 * primary cache. 300 * o CONFIG_MIPS_MT_SMTC currently implies there is only one physical core. 301 */ 302 static inline void smp_on_other_tlbs(void (*func) (void *info), void *info) 303 { 304 #ifndef CONFIG_MIPS_MT_SMTC 305 smp_call_function(func, info, 1, 1); 306 #endif 307 } 308 309 static inline void smp_on_each_tlb(void (*func) (void *info), void *info) 310 { 311 preempt_disable(); 312 313 smp_on_other_tlbs(func, info); 314 func(info); 315 316 preempt_enable(); 317 } 318 319 /* 320 * The following tlb flush calls are invoked when old translations are 321 * being torn down, or pte attributes are changing. For single threaded 322 * address spaces, a new context is obtained on the current cpu, and tlb 323 * context on other cpus are invalidated to force a new context allocation 324 * at switch_mm time, should the mm ever be used on other cpus. For 325 * multithreaded address spaces, intercpu interrupts have to be sent. 326 * Another case where intercpu interrupts are required is when the target 327 * mm might be active on another cpu (eg debuggers doing the flushes on 328 * behalf of debugees, kswapd stealing pages from another process etc). 329 * Kanoj 07/00. 330 */ 331 332 void flush_tlb_mm(struct mm_struct *mm) 333 { 334 preempt_disable(); 335 336 if ((atomic_read(&mm->mm_users) != 1) || (current->mm != mm)) { 337 smp_on_other_tlbs(flush_tlb_mm_ipi, (void *)mm); 338 } else { 339 int i; 340 for (i = 0; i < num_online_cpus(); i++) 341 if (smp_processor_id() != i) 342 cpu_context(i, mm) = 0; 343 } 344 local_flush_tlb_mm(mm); 345 346 preempt_enable(); 347 } 348 349 struct flush_tlb_data { 350 struct vm_area_struct *vma; 351 unsigned long addr1; 352 unsigned long addr2; 353 }; 354 355 static void flush_tlb_range_ipi(void *info) 356 { 357 struct flush_tlb_data *fd = (struct flush_tlb_data *)info; 358 359 local_flush_tlb_range(fd->vma, fd->addr1, fd->addr2); 360 } 361 362 void flush_tlb_range(struct vm_area_struct *vma, unsigned long start, unsigned long end) 363 { 364 struct mm_struct *mm = vma->vm_mm; 365 366 preempt_disable(); 367 if ((atomic_read(&mm->mm_users) != 1) || (current->mm != mm)) { 368 struct flush_tlb_data fd; 369 370 fd.vma = vma; 371 fd.addr1 = start; 372 fd.addr2 = end; 373 smp_on_other_tlbs(flush_tlb_range_ipi, (void *)&fd); 374 } else { 375 int i; 376 for (i = 0; i < num_online_cpus(); i++) 377 if (smp_processor_id() != i) 378 cpu_context(i, mm) = 0; 379 } 380 local_flush_tlb_range(vma, start, end); 381 preempt_enable(); 382 } 383 384 static void flush_tlb_kernel_range_ipi(void *info) 385 { 386 struct flush_tlb_data *fd = (struct flush_tlb_data *)info; 387 388 local_flush_tlb_kernel_range(fd->addr1, fd->addr2); 389 } 390 391 void flush_tlb_kernel_range(unsigned long start, unsigned long end) 392 { 393 struct flush_tlb_data fd; 394 395 fd.addr1 = start; 396 fd.addr2 = end; 397 on_each_cpu(flush_tlb_kernel_range_ipi, (void *)&fd, 1, 1); 398 } 399 400 static void flush_tlb_page_ipi(void *info) 401 { 402 struct flush_tlb_data *fd = (struct flush_tlb_data *)info; 403 404 local_flush_tlb_page(fd->vma, fd->addr1); 405 } 406 407 void flush_tlb_page(struct vm_area_struct *vma, unsigned long page) 408 { 409 preempt_disable(); 410 if ((atomic_read(&vma->vm_mm->mm_users) != 1) || (current->mm != vma->vm_mm)) { 411 struct flush_tlb_data fd; 412 413 fd.vma = vma; 414 fd.addr1 = page; 415 smp_on_other_tlbs(flush_tlb_page_ipi, (void *)&fd); 416 } else { 417 int i; 418 for (i = 0; i < num_online_cpus(); i++) 419 if (smp_processor_id() != i) 420 cpu_context(i, vma->vm_mm) = 0; 421 } 422 local_flush_tlb_page(vma, page); 423 preempt_enable(); 424 } 425 426 static void flush_tlb_one_ipi(void *info) 427 { 428 unsigned long vaddr = (unsigned long) info; 429 430 local_flush_tlb_one(vaddr); 431 } 432 433 void flush_tlb_one(unsigned long vaddr) 434 { 435 smp_on_each_tlb(flush_tlb_one_ipi, (void *) vaddr); 436 } 437 438 EXPORT_SYMBOL(flush_tlb_page); 439 EXPORT_SYMBOL(flush_tlb_one); 440