1 /* 2 * This program is free software; you can redistribute it and/or 3 * modify it under the terms of the GNU General Public License 4 * as published by the Free Software Foundation; either version 2 5 * of the License, or (at your option) any later version. 6 * 7 * This program is distributed in the hope that it will be useful, 8 * but WITHOUT ANY WARRANTY; without even the implied warranty of 9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 10 * GNU General Public License for more details. 11 * 12 * You should have received a copy of the GNU General Public License 13 * along with this program; if not, write to the Free Software 14 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. 15 * 16 * Copyright (C) 2000, 2001 Kanoj Sarcar 17 * Copyright (C) 2000, 2001 Ralf Baechle 18 * Copyright (C) 2000, 2001 Silicon Graphics, Inc. 19 * Copyright (C) 2000, 2001, 2003 Broadcom Corporation 20 */ 21 #include <linux/cache.h> 22 #include <linux/delay.h> 23 #include <linux/init.h> 24 #include <linux/interrupt.h> 25 #include <linux/smp.h> 26 #include <linux/spinlock.h> 27 #include <linux/threads.h> 28 #include <linux/module.h> 29 #include <linux/time.h> 30 #include <linux/timex.h> 31 #include <linux/sched.h> 32 #include <linux/cpumask.h> 33 #include <linux/cpu.h> 34 #include <linux/err.h> 35 #include <linux/ftrace.h> 36 37 #include <linux/atomic.h> 38 #include <asm/cpu.h> 39 #include <asm/processor.h> 40 #include <asm/idle.h> 41 #include <asm/r4k-timer.h> 42 #include <asm/mmu_context.h> 43 #include <asm/time.h> 44 #include <asm/setup.h> 45 46 #ifdef CONFIG_MIPS_MT_SMTC 47 #include <asm/mipsmtregs.h> 48 #endif /* CONFIG_MIPS_MT_SMTC */ 49 50 volatile cpumask_t cpu_callin_map; /* Bitmask of started secondaries */ 51 52 int __cpu_number_map[NR_CPUS]; /* Map physical to logical */ 53 EXPORT_SYMBOL(__cpu_number_map); 54 55 int __cpu_logical_map[NR_CPUS]; /* Map logical to physical */ 56 EXPORT_SYMBOL(__cpu_logical_map); 57 58 /* Number of TCs (or siblings in Intel speak) per CPU core */ 59 int smp_num_siblings = 1; 60 EXPORT_SYMBOL(smp_num_siblings); 61 62 /* representing the TCs (or siblings in Intel speak) of each logical CPU */ 63 cpumask_t cpu_sibling_map[NR_CPUS] __read_mostly; 64 EXPORT_SYMBOL(cpu_sibling_map); 65 66 /* representing cpus for which sibling maps can be computed */ 67 static cpumask_t cpu_sibling_setup_map; 68 69 static inline void set_cpu_sibling_map(int cpu) 70 { 71 int i; 72 73 cpu_set(cpu, cpu_sibling_setup_map); 74 75 if (smp_num_siblings > 1) { 76 for_each_cpu_mask(i, cpu_sibling_setup_map) { 77 if (cpu_data[cpu].core == cpu_data[i].core) { 78 cpu_set(i, cpu_sibling_map[cpu]); 79 cpu_set(cpu, cpu_sibling_map[i]); 80 } 81 } 82 } else 83 cpu_set(cpu, cpu_sibling_map[cpu]); 84 } 85 86 struct plat_smp_ops *mp_ops; 87 EXPORT_SYMBOL(mp_ops); 88 89 void register_smp_ops(struct plat_smp_ops *ops) 90 { 91 if (mp_ops) 92 printk(KERN_WARNING "Overriding previously set SMP ops\n"); 93 94 mp_ops = ops; 95 } 96 97 /* 98 * First C code run on the secondary CPUs after being started up by 99 * the master. 100 */ 101 asmlinkage void start_secondary(void) 102 { 103 unsigned int cpu; 104 105 #ifdef CONFIG_MIPS_MT_SMTC 106 /* Only do cpu_probe for first TC of CPU */ 107 if ((read_c0_tcbind() & TCBIND_CURTC) != 0) 108 __cpu_name[smp_processor_id()] = __cpu_name[0]; 109 else 110 #endif /* CONFIG_MIPS_MT_SMTC */ 111 cpu_probe(); 112 cpu_report(); 113 per_cpu_trap_init(false); 114 mips_clockevent_init(); 115 mp_ops->init_secondary(); 116 117 /* 118 * XXX parity protection should be folded in here when it's converted 119 * to an option instead of something based on .cputype 120 */ 121 122 calibrate_delay(); 123 preempt_disable(); 124 cpu = smp_processor_id(); 125 cpu_data[cpu].udelay_val = loops_per_jiffy; 126 127 notify_cpu_starting(cpu); 128 129 set_cpu_online(cpu, true); 130 131 set_cpu_sibling_map(cpu); 132 133 cpu_set(cpu, cpu_callin_map); 134 135 synchronise_count_slave(cpu); 136 137 /* 138 * irq will be enabled in ->smp_finish(), enabling it too early 139 * is dangerous. 140 */ 141 WARN_ON_ONCE(!irqs_disabled()); 142 mp_ops->smp_finish(); 143 144 cpu_startup_entry(CPUHP_ONLINE); 145 } 146 147 /* 148 * Call into both interrupt handlers, as we share the IPI for them 149 */ 150 void __irq_entry smp_call_function_interrupt(void) 151 { 152 irq_enter(); 153 generic_smp_call_function_single_interrupt(); 154 generic_smp_call_function_interrupt(); 155 irq_exit(); 156 } 157 158 static void stop_this_cpu(void *dummy) 159 { 160 /* 161 * Remove this CPU: 162 */ 163 set_cpu_online(smp_processor_id(), false); 164 for (;;) { 165 if (cpu_wait) 166 (*cpu_wait)(); /* Wait if available. */ 167 } 168 } 169 170 void smp_send_stop(void) 171 { 172 smp_call_function(stop_this_cpu, NULL, 0); 173 } 174 175 void __init smp_cpus_done(unsigned int max_cpus) 176 { 177 mp_ops->cpus_done(); 178 } 179 180 /* called from main before smp_init() */ 181 void __init smp_prepare_cpus(unsigned int max_cpus) 182 { 183 init_new_context(current, &init_mm); 184 current_thread_info()->cpu = 0; 185 mp_ops->prepare_cpus(max_cpus); 186 set_cpu_sibling_map(0); 187 #ifndef CONFIG_HOTPLUG_CPU 188 init_cpu_present(cpu_possible_mask); 189 #endif 190 } 191 192 /* preload SMP state for boot cpu */ 193 void smp_prepare_boot_cpu(void) 194 { 195 set_cpu_possible(0, true); 196 set_cpu_online(0, true); 197 cpu_set(0, cpu_callin_map); 198 } 199 200 int __cpu_up(unsigned int cpu, struct task_struct *tidle) 201 { 202 mp_ops->boot_secondary(cpu, tidle); 203 204 /* 205 * Trust is futile. We should really have timeouts ... 206 */ 207 while (!cpu_isset(cpu, cpu_callin_map)) 208 udelay(100); 209 210 synchronise_count_master(cpu); 211 return 0; 212 } 213 214 /* Not really SMP stuff ... */ 215 int setup_profiling_timer(unsigned int multiplier) 216 { 217 return 0; 218 } 219 220 static void flush_tlb_all_ipi(void *info) 221 { 222 local_flush_tlb_all(); 223 } 224 225 void flush_tlb_all(void) 226 { 227 on_each_cpu(flush_tlb_all_ipi, NULL, 1); 228 } 229 230 static void flush_tlb_mm_ipi(void *mm) 231 { 232 local_flush_tlb_mm((struct mm_struct *)mm); 233 } 234 235 /* 236 * Special Variant of smp_call_function for use by TLB functions: 237 * 238 * o No return value 239 * o collapses to normal function call on UP kernels 240 * o collapses to normal function call on systems with a single shared 241 * primary cache. 242 * o CONFIG_MIPS_MT_SMTC currently implies there is only one physical core. 243 */ 244 static inline void smp_on_other_tlbs(void (*func) (void *info), void *info) 245 { 246 #ifndef CONFIG_MIPS_MT_SMTC 247 smp_call_function(func, info, 1); 248 #endif 249 } 250 251 static inline void smp_on_each_tlb(void (*func) (void *info), void *info) 252 { 253 preempt_disable(); 254 255 smp_on_other_tlbs(func, info); 256 func(info); 257 258 preempt_enable(); 259 } 260 261 /* 262 * The following tlb flush calls are invoked when old translations are 263 * being torn down, or pte attributes are changing. For single threaded 264 * address spaces, a new context is obtained on the current cpu, and tlb 265 * context on other cpus are invalidated to force a new context allocation 266 * at switch_mm time, should the mm ever be used on other cpus. For 267 * multithreaded address spaces, intercpu interrupts have to be sent. 268 * Another case where intercpu interrupts are required is when the target 269 * mm might be active on another cpu (eg debuggers doing the flushes on 270 * behalf of debugees, kswapd stealing pages from another process etc). 271 * Kanoj 07/00. 272 */ 273 274 void flush_tlb_mm(struct mm_struct *mm) 275 { 276 preempt_disable(); 277 278 if ((atomic_read(&mm->mm_users) != 1) || (current->mm != mm)) { 279 smp_on_other_tlbs(flush_tlb_mm_ipi, mm); 280 } else { 281 unsigned int cpu; 282 283 for_each_online_cpu(cpu) { 284 if (cpu != smp_processor_id() && cpu_context(cpu, mm)) 285 cpu_context(cpu, mm) = 0; 286 } 287 } 288 local_flush_tlb_mm(mm); 289 290 preempt_enable(); 291 } 292 293 struct flush_tlb_data { 294 struct vm_area_struct *vma; 295 unsigned long addr1; 296 unsigned long addr2; 297 }; 298 299 static void flush_tlb_range_ipi(void *info) 300 { 301 struct flush_tlb_data *fd = info; 302 303 local_flush_tlb_range(fd->vma, fd->addr1, fd->addr2); 304 } 305 306 void flush_tlb_range(struct vm_area_struct *vma, unsigned long start, unsigned long end) 307 { 308 struct mm_struct *mm = vma->vm_mm; 309 310 preempt_disable(); 311 if ((atomic_read(&mm->mm_users) != 1) || (current->mm != mm)) { 312 struct flush_tlb_data fd = { 313 .vma = vma, 314 .addr1 = start, 315 .addr2 = end, 316 }; 317 318 smp_on_other_tlbs(flush_tlb_range_ipi, &fd); 319 } else { 320 unsigned int cpu; 321 322 for_each_online_cpu(cpu) { 323 if (cpu != smp_processor_id() && cpu_context(cpu, mm)) 324 cpu_context(cpu, mm) = 0; 325 } 326 } 327 local_flush_tlb_range(vma, start, end); 328 preempt_enable(); 329 } 330 331 static void flush_tlb_kernel_range_ipi(void *info) 332 { 333 struct flush_tlb_data *fd = info; 334 335 local_flush_tlb_kernel_range(fd->addr1, fd->addr2); 336 } 337 338 void flush_tlb_kernel_range(unsigned long start, unsigned long end) 339 { 340 struct flush_tlb_data fd = { 341 .addr1 = start, 342 .addr2 = end, 343 }; 344 345 on_each_cpu(flush_tlb_kernel_range_ipi, &fd, 1); 346 } 347 348 static void flush_tlb_page_ipi(void *info) 349 { 350 struct flush_tlb_data *fd = info; 351 352 local_flush_tlb_page(fd->vma, fd->addr1); 353 } 354 355 void flush_tlb_page(struct vm_area_struct *vma, unsigned long page) 356 { 357 preempt_disable(); 358 if ((atomic_read(&vma->vm_mm->mm_users) != 1) || (current->mm != vma->vm_mm)) { 359 struct flush_tlb_data fd = { 360 .vma = vma, 361 .addr1 = page, 362 }; 363 364 smp_on_other_tlbs(flush_tlb_page_ipi, &fd); 365 } else { 366 unsigned int cpu; 367 368 for_each_online_cpu(cpu) { 369 if (cpu != smp_processor_id() && cpu_context(cpu, vma->vm_mm)) 370 cpu_context(cpu, vma->vm_mm) = 0; 371 } 372 } 373 local_flush_tlb_page(vma, page); 374 preempt_enable(); 375 } 376 377 static void flush_tlb_one_ipi(void *info) 378 { 379 unsigned long vaddr = (unsigned long) info; 380 381 local_flush_tlb_one(vaddr); 382 } 383 384 void flush_tlb_one(unsigned long vaddr) 385 { 386 smp_on_each_tlb(flush_tlb_one_ipi, (void *) vaddr); 387 } 388 389 EXPORT_SYMBOL(flush_tlb_page); 390 EXPORT_SYMBOL(flush_tlb_one); 391 392 #if defined(CONFIG_KEXEC) 393 void (*dump_ipi_function_ptr)(void *) = NULL; 394 void dump_send_ipi(void (*dump_ipi_callback)(void *)) 395 { 396 int i; 397 int cpu = smp_processor_id(); 398 399 dump_ipi_function_ptr = dump_ipi_callback; 400 smp_mb(); 401 for_each_online_cpu(i) 402 if (i != cpu) 403 mp_ops->send_ipi_single(i, SMP_DUMP); 404 405 } 406 EXPORT_SYMBOL(dump_send_ipi); 407 #endif 408