1 /* 2 * This program is free software; you can redistribute it and/or 3 * modify it under the terms of the GNU General Public License 4 * as published by the Free Software Foundation; either version 2 5 * of the License, or (at your option) any later version. 6 * 7 * This program is distributed in the hope that it will be useful, 8 * but WITHOUT ANY WARRANTY; without even the implied warranty of 9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 10 * GNU General Public License for more details. 11 * 12 * You should have received a copy of the GNU General Public License 13 * along with this program; if not, write to the Free Software 14 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. 15 * 16 * Copyright (C) 2000, 2001 Kanoj Sarcar 17 * Copyright (C) 2000, 2001 Ralf Baechle 18 * Copyright (C) 2000, 2001 Silicon Graphics, Inc. 19 * Copyright (C) 2000, 2001, 2003 Broadcom Corporation 20 */ 21 #include <linux/cache.h> 22 #include <linux/delay.h> 23 #include <linux/init.h> 24 #include <linux/interrupt.h> 25 #include <linux/spinlock.h> 26 #include <linux/threads.h> 27 #include <linux/module.h> 28 #include <linux/time.h> 29 #include <linux/timex.h> 30 #include <linux/sched.h> 31 #include <linux/cpumask.h> 32 33 #include <asm/atomic.h> 34 #include <asm/cpu.h> 35 #include <asm/processor.h> 36 #include <asm/system.h> 37 #include <asm/mmu_context.h> 38 #include <asm/smp.h> 39 40 cpumask_t phys_cpu_present_map; /* Bitmask of available CPUs */ 41 volatile cpumask_t cpu_callin_map; /* Bitmask of started secondaries */ 42 cpumask_t cpu_online_map; /* Bitmask of currently online CPUs */ 43 int __cpu_number_map[NR_CPUS]; /* Map physical to logical */ 44 int __cpu_logical_map[NR_CPUS]; /* Map logical to physical */ 45 46 EXPORT_SYMBOL(phys_cpu_present_map); 47 EXPORT_SYMBOL(cpu_online_map); 48 49 static void smp_tune_scheduling (void) 50 { 51 struct cache_desc *cd = ¤t_cpu_data.scache; 52 unsigned long cachesize; /* kB */ 53 unsigned long cpu_khz; 54 55 /* 56 * Crude estimate until we actually meassure ... 57 */ 58 cpu_khz = loops_per_jiffy * 2 * HZ / 1000; 59 60 /* 61 * Rough estimation for SMP scheduling, this is the number of 62 * cycles it takes for a fully memory-limited process to flush 63 * the SMP-local cache. 64 * 65 * (For a P5 this pretty much means we will choose another idle 66 * CPU almost always at wakeup time (this is due to the small 67 * L1 cache), on PIIs it's around 50-100 usecs, depending on 68 * the cache size) 69 */ 70 if (!cpu_khz) 71 return; 72 73 cachesize = cd->linesz * cd->sets * cd->ways; 74 } 75 76 extern void __init calibrate_delay(void); 77 extern ATTRIB_NORET void cpu_idle(void); 78 79 /* 80 * First C code run on the secondary CPUs after being started up by 81 * the master. 82 */ 83 asmlinkage void start_secondary(void) 84 { 85 unsigned int cpu; 86 87 cpu_probe(); 88 cpu_report(); 89 per_cpu_trap_init(); 90 prom_init_secondary(); 91 92 /* 93 * XXX parity protection should be folded in here when it's converted 94 * to an option instead of something based on .cputype 95 */ 96 97 calibrate_delay(); 98 preempt_disable(); 99 cpu = smp_processor_id(); 100 cpu_data[cpu].udelay_val = loops_per_jiffy; 101 102 prom_smp_finish(); 103 104 cpu_set(cpu, cpu_callin_map); 105 106 cpu_idle(); 107 } 108 109 DEFINE_SPINLOCK(smp_call_lock); 110 111 struct call_data_struct *call_data; 112 113 /* 114 * Run a function on all other CPUs. 115 * <func> The function to run. This must be fast and non-blocking. 116 * <info> An arbitrary pointer to pass to the function. 117 * <retry> If true, keep retrying until ready. 118 * <wait> If true, wait until function has completed on other CPUs. 119 * [RETURNS] 0 on success, else a negative status code. 120 * 121 * Does not return until remote CPUs are nearly ready to execute <func> 122 * or are or have executed. 123 * 124 * You must not call this function with disabled interrupts or from a 125 * hardware interrupt handler or from a bottom half handler: 126 * 127 * CPU A CPU B 128 * Disable interrupts 129 * smp_call_function() 130 * Take call_lock 131 * Send IPIs 132 * Wait for all cpus to acknowledge IPI 133 * CPU A has not responded, spin waiting 134 * for cpu A to respond, holding call_lock 135 * smp_call_function() 136 * Spin waiting for call_lock 137 * Deadlock Deadlock 138 */ 139 int smp_call_function (void (*func) (void *info), void *info, int retry, 140 int wait) 141 { 142 struct call_data_struct data; 143 int i, cpus = num_online_cpus() - 1; 144 int cpu = smp_processor_id(); 145 146 /* 147 * Can die spectacularly if this CPU isn't yet marked online 148 */ 149 BUG_ON(!cpu_online(cpu)); 150 151 if (!cpus) 152 return 0; 153 154 /* Can deadlock when called with interrupts disabled */ 155 WARN_ON(irqs_disabled()); 156 157 data.func = func; 158 data.info = info; 159 atomic_set(&data.started, 0); 160 data.wait = wait; 161 if (wait) 162 atomic_set(&data.finished, 0); 163 164 spin_lock(&smp_call_lock); 165 call_data = &data; 166 mb(); 167 168 /* Send a message to all other CPUs and wait for them to respond */ 169 for (i = 0; i < NR_CPUS; i++) 170 if (cpu_online(i) && i != cpu) 171 core_send_ipi(i, SMP_CALL_FUNCTION); 172 173 /* Wait for response */ 174 /* FIXME: lock-up detection, backtrace on lock-up */ 175 while (atomic_read(&data.started) != cpus) 176 barrier(); 177 178 if (wait) 179 while (atomic_read(&data.finished) != cpus) 180 barrier(); 181 spin_unlock(&smp_call_lock); 182 183 return 0; 184 } 185 186 void smp_call_function_interrupt(void) 187 { 188 void (*func) (void *info) = call_data->func; 189 void *info = call_data->info; 190 int wait = call_data->wait; 191 192 /* 193 * Notify initiating CPU that I've grabbed the data and am 194 * about to execute the function. 195 */ 196 mb(); 197 atomic_inc(&call_data->started); 198 199 /* 200 * At this point the info structure may be out of scope unless wait==1. 201 */ 202 irq_enter(); 203 (*func)(info); 204 irq_exit(); 205 206 if (wait) { 207 mb(); 208 atomic_inc(&call_data->finished); 209 } 210 } 211 212 static void stop_this_cpu(void *dummy) 213 { 214 /* 215 * Remove this CPU: 216 */ 217 cpu_clear(smp_processor_id(), cpu_online_map); 218 local_irq_enable(); /* May need to service _machine_restart IPI */ 219 for (;;); /* Wait if available. */ 220 } 221 222 void smp_send_stop(void) 223 { 224 smp_call_function(stop_this_cpu, NULL, 1, 0); 225 } 226 227 void __init smp_cpus_done(unsigned int max_cpus) 228 { 229 prom_cpus_done(); 230 } 231 232 /* called from main before smp_init() */ 233 void __init smp_prepare_cpus(unsigned int max_cpus) 234 { 235 init_new_context(current, &init_mm); 236 current_thread_info()->cpu = 0; 237 smp_tune_scheduling(); 238 prom_prepare_cpus(max_cpus); 239 } 240 241 /* preload SMP state for boot cpu */ 242 void __devinit smp_prepare_boot_cpu(void) 243 { 244 /* 245 * This assumes that bootup is always handled by the processor 246 * with the logic and physical number 0. 247 */ 248 __cpu_number_map[0] = 0; 249 __cpu_logical_map[0] = 0; 250 cpu_set(0, phys_cpu_present_map); 251 cpu_set(0, cpu_online_map); 252 cpu_set(0, cpu_callin_map); 253 } 254 255 /* 256 * Called once for each "cpu_possible(cpu)". Needs to spin up the cpu 257 * and keep control until "cpu_online(cpu)" is set. Note: cpu is 258 * physical, not logical. 259 */ 260 int __devinit __cpu_up(unsigned int cpu) 261 { 262 struct task_struct *idle; 263 264 /* 265 * Processor goes to start_secondary(), sets online flag 266 * The following code is purely to make sure 267 * Linux can schedule processes on this slave. 268 */ 269 idle = fork_idle(cpu); 270 if (IS_ERR(idle)) 271 panic(KERN_ERR "Fork failed for CPU %d", cpu); 272 273 prom_boot_secondary(cpu, idle); 274 275 /* 276 * Trust is futile. We should really have timeouts ... 277 */ 278 while (!cpu_isset(cpu, cpu_callin_map)) 279 udelay(100); 280 281 cpu_set(cpu, cpu_online_map); 282 283 return 0; 284 } 285 286 /* Not really SMP stuff ... */ 287 int setup_profiling_timer(unsigned int multiplier) 288 { 289 return 0; 290 } 291 292 static void flush_tlb_all_ipi(void *info) 293 { 294 local_flush_tlb_all(); 295 } 296 297 void flush_tlb_all(void) 298 { 299 on_each_cpu(flush_tlb_all_ipi, 0, 1, 1); 300 } 301 302 static void flush_tlb_mm_ipi(void *mm) 303 { 304 local_flush_tlb_mm((struct mm_struct *)mm); 305 } 306 307 /* 308 * The following tlb flush calls are invoked when old translations are 309 * being torn down, or pte attributes are changing. For single threaded 310 * address spaces, a new context is obtained on the current cpu, and tlb 311 * context on other cpus are invalidated to force a new context allocation 312 * at switch_mm time, should the mm ever be used on other cpus. For 313 * multithreaded address spaces, intercpu interrupts have to be sent. 314 * Another case where intercpu interrupts are required is when the target 315 * mm might be active on another cpu (eg debuggers doing the flushes on 316 * behalf of debugees, kswapd stealing pages from another process etc). 317 * Kanoj 07/00. 318 */ 319 320 void flush_tlb_mm(struct mm_struct *mm) 321 { 322 preempt_disable(); 323 324 if ((atomic_read(&mm->mm_users) != 1) || (current->mm != mm)) { 325 smp_call_function(flush_tlb_mm_ipi, (void *)mm, 1, 1); 326 } else { 327 int i; 328 for (i = 0; i < num_online_cpus(); i++) 329 if (smp_processor_id() != i) 330 cpu_context(i, mm) = 0; 331 } 332 local_flush_tlb_mm(mm); 333 334 preempt_enable(); 335 } 336 337 struct flush_tlb_data { 338 struct vm_area_struct *vma; 339 unsigned long addr1; 340 unsigned long addr2; 341 }; 342 343 static void flush_tlb_range_ipi(void *info) 344 { 345 struct flush_tlb_data *fd = (struct flush_tlb_data *)info; 346 347 local_flush_tlb_range(fd->vma, fd->addr1, fd->addr2); 348 } 349 350 void flush_tlb_range(struct vm_area_struct *vma, unsigned long start, unsigned long end) 351 { 352 struct mm_struct *mm = vma->vm_mm; 353 354 preempt_disable(); 355 if ((atomic_read(&mm->mm_users) != 1) || (current->mm != mm)) { 356 struct flush_tlb_data fd; 357 358 fd.vma = vma; 359 fd.addr1 = start; 360 fd.addr2 = end; 361 smp_call_function(flush_tlb_range_ipi, (void *)&fd, 1, 1); 362 } else { 363 int i; 364 for (i = 0; i < num_online_cpus(); i++) 365 if (smp_processor_id() != i) 366 cpu_context(i, mm) = 0; 367 } 368 local_flush_tlb_range(vma, start, end); 369 preempt_enable(); 370 } 371 372 static void flush_tlb_kernel_range_ipi(void *info) 373 { 374 struct flush_tlb_data *fd = (struct flush_tlb_data *)info; 375 376 local_flush_tlb_kernel_range(fd->addr1, fd->addr2); 377 } 378 379 void flush_tlb_kernel_range(unsigned long start, unsigned long end) 380 { 381 struct flush_tlb_data fd; 382 383 fd.addr1 = start; 384 fd.addr2 = end; 385 on_each_cpu(flush_tlb_kernel_range_ipi, (void *)&fd, 1, 1); 386 } 387 388 static void flush_tlb_page_ipi(void *info) 389 { 390 struct flush_tlb_data *fd = (struct flush_tlb_data *)info; 391 392 local_flush_tlb_page(fd->vma, fd->addr1); 393 } 394 395 void flush_tlb_page(struct vm_area_struct *vma, unsigned long page) 396 { 397 preempt_disable(); 398 if ((atomic_read(&vma->vm_mm->mm_users) != 1) || (current->mm != vma->vm_mm)) { 399 struct flush_tlb_data fd; 400 401 fd.vma = vma; 402 fd.addr1 = page; 403 smp_call_function(flush_tlb_page_ipi, (void *)&fd, 1, 1); 404 } else { 405 int i; 406 for (i = 0; i < num_online_cpus(); i++) 407 if (smp_processor_id() != i) 408 cpu_context(i, vma->vm_mm) = 0; 409 } 410 local_flush_tlb_page(vma, page); 411 preempt_enable(); 412 } 413 414 static void flush_tlb_one_ipi(void *info) 415 { 416 unsigned long vaddr = (unsigned long) info; 417 418 local_flush_tlb_one(vaddr); 419 } 420 421 void flush_tlb_one(unsigned long vaddr) 422 { 423 smp_call_function(flush_tlb_one_ipi, (void *) vaddr, 1, 1); 424 local_flush_tlb_one(vaddr); 425 } 426 427 EXPORT_SYMBOL(flush_tlb_page); 428 EXPORT_SYMBOL(flush_tlb_one); 429 EXPORT_SYMBOL(cpu_data); 430 EXPORT_SYMBOL(synchronize_irq); 431