1 /* 2 * This program is free software; you can redistribute it and/or 3 * modify it under the terms of the GNU General Public License 4 * as published by the Free Software Foundation; either version 2 5 * of the License, or (at your option) any later version. 6 * 7 * This program is distributed in the hope that it will be useful, 8 * but WITHOUT ANY WARRANTY; without even the implied warranty of 9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 10 * GNU General Public License for more details. 11 * 12 * You should have received a copy of the GNU General Public License 13 * along with this program; if not, write to the Free Software 14 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. 15 * 16 * Copyright (C) 2000, 2001 Kanoj Sarcar 17 * Copyright (C) 2000, 2001 Ralf Baechle 18 * Copyright (C) 2000, 2001 Silicon Graphics, Inc. 19 * Copyright (C) 2000, 2001, 2003 Broadcom Corporation 20 */ 21 #include <linux/cache.h> 22 #include <linux/delay.h> 23 #include <linux/init.h> 24 #include <linux/interrupt.h> 25 #include <linux/smp.h> 26 #include <linux/spinlock.h> 27 #include <linux/threads.h> 28 #include <linux/module.h> 29 #include <linux/time.h> 30 #include <linux/timex.h> 31 #include <linux/sched.h> 32 #include <linux/cpumask.h> 33 #include <linux/cpu.h> 34 #include <linux/err.h> 35 #include <linux/ftrace.h> 36 37 #include <asm/atomic.h> 38 #include <asm/cpu.h> 39 #include <asm/processor.h> 40 #include <asm/r4k-timer.h> 41 #include <asm/system.h> 42 #include <asm/mmu_context.h> 43 #include <asm/time.h> 44 45 #ifdef CONFIG_MIPS_MT_SMTC 46 #include <asm/mipsmtregs.h> 47 #endif /* CONFIG_MIPS_MT_SMTC */ 48 49 volatile cpumask_t cpu_callin_map; /* Bitmask of started secondaries */ 50 int __cpu_number_map[NR_CPUS]; /* Map physical to logical */ 51 int __cpu_logical_map[NR_CPUS]; /* Map logical to physical */ 52 53 /* Number of TCs (or siblings in Intel speak) per CPU core */ 54 int smp_num_siblings = 1; 55 EXPORT_SYMBOL(smp_num_siblings); 56 57 /* representing the TCs (or siblings in Intel speak) of each logical CPU */ 58 cpumask_t cpu_sibling_map[NR_CPUS] __read_mostly; 59 EXPORT_SYMBOL(cpu_sibling_map); 60 61 /* representing cpus for which sibling maps can be computed */ 62 static cpumask_t cpu_sibling_setup_map; 63 64 static inline void set_cpu_sibling_map(int cpu) 65 { 66 int i; 67 68 cpu_set(cpu, cpu_sibling_setup_map); 69 70 if (smp_num_siblings > 1) { 71 for_each_cpu_mask(i, cpu_sibling_setup_map) { 72 if (cpu_data[cpu].core == cpu_data[i].core) { 73 cpu_set(i, cpu_sibling_map[cpu]); 74 cpu_set(cpu, cpu_sibling_map[i]); 75 } 76 } 77 } else 78 cpu_set(cpu, cpu_sibling_map[cpu]); 79 } 80 81 struct plat_smp_ops *mp_ops; 82 83 __cpuinit void register_smp_ops(struct plat_smp_ops *ops) 84 { 85 if (mp_ops) 86 printk(KERN_WARNING "Overriding previously set SMP ops\n"); 87 88 mp_ops = ops; 89 } 90 91 /* 92 * First C code run on the secondary CPUs after being started up by 93 * the master. 94 */ 95 asmlinkage __cpuinit void start_secondary(void) 96 { 97 unsigned int cpu; 98 99 #ifdef CONFIG_MIPS_MT_SMTC 100 /* Only do cpu_probe for first TC of CPU */ 101 if ((read_c0_tcbind() & TCBIND_CURTC) == 0) 102 #endif /* CONFIG_MIPS_MT_SMTC */ 103 cpu_probe(); 104 cpu_report(); 105 per_cpu_trap_init(); 106 mips_clockevent_init(); 107 mp_ops->init_secondary(); 108 109 /* 110 * XXX parity protection should be folded in here when it's converted 111 * to an option instead of something based on .cputype 112 */ 113 114 calibrate_delay(); 115 preempt_disable(); 116 cpu = smp_processor_id(); 117 cpu_data[cpu].udelay_val = loops_per_jiffy; 118 119 notify_cpu_starting(cpu); 120 121 mp_ops->smp_finish(); 122 set_cpu_sibling_map(cpu); 123 124 cpu_set(cpu, cpu_callin_map); 125 126 synchronise_count_slave(); 127 128 cpu_idle(); 129 } 130 131 /* 132 * Call into both interrupt handlers, as we share the IPI for them 133 */ 134 void __irq_entry smp_call_function_interrupt(void) 135 { 136 irq_enter(); 137 generic_smp_call_function_single_interrupt(); 138 generic_smp_call_function_interrupt(); 139 irq_exit(); 140 } 141 142 static void stop_this_cpu(void *dummy) 143 { 144 /* 145 * Remove this CPU: 146 */ 147 cpu_clear(smp_processor_id(), cpu_online_map); 148 for (;;) { 149 if (cpu_wait) 150 (*cpu_wait)(); /* Wait if available. */ 151 } 152 } 153 154 void smp_send_stop(void) 155 { 156 smp_call_function(stop_this_cpu, NULL, 0); 157 } 158 159 void __init smp_cpus_done(unsigned int max_cpus) 160 { 161 mp_ops->cpus_done(); 162 synchronise_count_master(); 163 } 164 165 /* called from main before smp_init() */ 166 void __init smp_prepare_cpus(unsigned int max_cpus) 167 { 168 init_new_context(current, &init_mm); 169 current_thread_info()->cpu = 0; 170 mp_ops->prepare_cpus(max_cpus); 171 set_cpu_sibling_map(0); 172 #ifndef CONFIG_HOTPLUG_CPU 173 init_cpu_present(&cpu_possible_map); 174 #endif 175 } 176 177 /* preload SMP state for boot cpu */ 178 void __devinit smp_prepare_boot_cpu(void) 179 { 180 set_cpu_possible(0, true); 181 set_cpu_online(0, true); 182 cpu_set(0, cpu_callin_map); 183 } 184 185 /* 186 * Called once for each "cpu_possible(cpu)". Needs to spin up the cpu 187 * and keep control until "cpu_online(cpu)" is set. Note: cpu is 188 * physical, not logical. 189 */ 190 static struct task_struct *cpu_idle_thread[NR_CPUS]; 191 192 int __cpuinit __cpu_up(unsigned int cpu) 193 { 194 struct task_struct *idle; 195 196 /* 197 * Processor goes to start_secondary(), sets online flag 198 * The following code is purely to make sure 199 * Linux can schedule processes on this slave. 200 */ 201 if (!cpu_idle_thread[cpu]) { 202 idle = fork_idle(cpu); 203 cpu_idle_thread[cpu] = idle; 204 205 if (IS_ERR(idle)) 206 panic(KERN_ERR "Fork failed for CPU %d", cpu); 207 } else { 208 idle = cpu_idle_thread[cpu]; 209 init_idle(idle, cpu); 210 } 211 212 mp_ops->boot_secondary(cpu, idle); 213 214 /* 215 * Trust is futile. We should really have timeouts ... 216 */ 217 while (!cpu_isset(cpu, cpu_callin_map)) 218 udelay(100); 219 220 cpu_set(cpu, cpu_online_map); 221 222 return 0; 223 } 224 225 /* Not really SMP stuff ... */ 226 int setup_profiling_timer(unsigned int multiplier) 227 { 228 return 0; 229 } 230 231 static void flush_tlb_all_ipi(void *info) 232 { 233 local_flush_tlb_all(); 234 } 235 236 void flush_tlb_all(void) 237 { 238 on_each_cpu(flush_tlb_all_ipi, NULL, 1); 239 } 240 241 static void flush_tlb_mm_ipi(void *mm) 242 { 243 local_flush_tlb_mm((struct mm_struct *)mm); 244 } 245 246 /* 247 * Special Variant of smp_call_function for use by TLB functions: 248 * 249 * o No return value 250 * o collapses to normal function call on UP kernels 251 * o collapses to normal function call on systems with a single shared 252 * primary cache. 253 * o CONFIG_MIPS_MT_SMTC currently implies there is only one physical core. 254 */ 255 static inline void smp_on_other_tlbs(void (*func) (void *info), void *info) 256 { 257 #ifndef CONFIG_MIPS_MT_SMTC 258 smp_call_function(func, info, 1); 259 #endif 260 } 261 262 static inline void smp_on_each_tlb(void (*func) (void *info), void *info) 263 { 264 preempt_disable(); 265 266 smp_on_other_tlbs(func, info); 267 func(info); 268 269 preempt_enable(); 270 } 271 272 /* 273 * The following tlb flush calls are invoked when old translations are 274 * being torn down, or pte attributes are changing. For single threaded 275 * address spaces, a new context is obtained on the current cpu, and tlb 276 * context on other cpus are invalidated to force a new context allocation 277 * at switch_mm time, should the mm ever be used on other cpus. For 278 * multithreaded address spaces, intercpu interrupts have to be sent. 279 * Another case where intercpu interrupts are required is when the target 280 * mm might be active on another cpu (eg debuggers doing the flushes on 281 * behalf of debugees, kswapd stealing pages from another process etc). 282 * Kanoj 07/00. 283 */ 284 285 void flush_tlb_mm(struct mm_struct *mm) 286 { 287 preempt_disable(); 288 289 if ((atomic_read(&mm->mm_users) != 1) || (current->mm != mm)) { 290 smp_on_other_tlbs(flush_tlb_mm_ipi, mm); 291 } else { 292 cpumask_t mask = cpu_online_map; 293 unsigned int cpu; 294 295 cpu_clear(smp_processor_id(), mask); 296 for_each_cpu_mask(cpu, mask) 297 if (cpu_context(cpu, mm)) 298 cpu_context(cpu, mm) = 0; 299 } 300 local_flush_tlb_mm(mm); 301 302 preempt_enable(); 303 } 304 305 struct flush_tlb_data { 306 struct vm_area_struct *vma; 307 unsigned long addr1; 308 unsigned long addr2; 309 }; 310 311 static void flush_tlb_range_ipi(void *info) 312 { 313 struct flush_tlb_data *fd = info; 314 315 local_flush_tlb_range(fd->vma, fd->addr1, fd->addr2); 316 } 317 318 void flush_tlb_range(struct vm_area_struct *vma, unsigned long start, unsigned long end) 319 { 320 struct mm_struct *mm = vma->vm_mm; 321 322 preempt_disable(); 323 if ((atomic_read(&mm->mm_users) != 1) || (current->mm != mm)) { 324 struct flush_tlb_data fd = { 325 .vma = vma, 326 .addr1 = start, 327 .addr2 = end, 328 }; 329 330 smp_on_other_tlbs(flush_tlb_range_ipi, &fd); 331 } else { 332 cpumask_t mask = cpu_online_map; 333 unsigned int cpu; 334 335 cpu_clear(smp_processor_id(), mask); 336 for_each_cpu_mask(cpu, mask) 337 if (cpu_context(cpu, mm)) 338 cpu_context(cpu, mm) = 0; 339 } 340 local_flush_tlb_range(vma, start, end); 341 preempt_enable(); 342 } 343 344 static void flush_tlb_kernel_range_ipi(void *info) 345 { 346 struct flush_tlb_data *fd = info; 347 348 local_flush_tlb_kernel_range(fd->addr1, fd->addr2); 349 } 350 351 void flush_tlb_kernel_range(unsigned long start, unsigned long end) 352 { 353 struct flush_tlb_data fd = { 354 .addr1 = start, 355 .addr2 = end, 356 }; 357 358 on_each_cpu(flush_tlb_kernel_range_ipi, &fd, 1); 359 } 360 361 static void flush_tlb_page_ipi(void *info) 362 { 363 struct flush_tlb_data *fd = info; 364 365 local_flush_tlb_page(fd->vma, fd->addr1); 366 } 367 368 void flush_tlb_page(struct vm_area_struct *vma, unsigned long page) 369 { 370 preempt_disable(); 371 if ((atomic_read(&vma->vm_mm->mm_users) != 1) || (current->mm != vma->vm_mm)) { 372 struct flush_tlb_data fd = { 373 .vma = vma, 374 .addr1 = page, 375 }; 376 377 smp_on_other_tlbs(flush_tlb_page_ipi, &fd); 378 } else { 379 cpumask_t mask = cpu_online_map; 380 unsigned int cpu; 381 382 cpu_clear(smp_processor_id(), mask); 383 for_each_cpu_mask(cpu, mask) 384 if (cpu_context(cpu, vma->vm_mm)) 385 cpu_context(cpu, vma->vm_mm) = 0; 386 } 387 local_flush_tlb_page(vma, page); 388 preempt_enable(); 389 } 390 391 static void flush_tlb_one_ipi(void *info) 392 { 393 unsigned long vaddr = (unsigned long) info; 394 395 local_flush_tlb_one(vaddr); 396 } 397 398 void flush_tlb_one(unsigned long vaddr) 399 { 400 smp_on_each_tlb(flush_tlb_one_ipi, (void *) vaddr); 401 } 402 403 EXPORT_SYMBOL(flush_tlb_page); 404 EXPORT_SYMBOL(flush_tlb_one); 405