1 /* 2 * arch/sh/kernel/smp.c 3 * 4 * SMP support for the SuperH processors. 5 * 6 * Copyright (C) 2002 - 2010 Paul Mundt 7 * Copyright (C) 2006 - 2007 Akio Idehara 8 * 9 * This file is subject to the terms and conditions of the GNU General Public 10 * License. See the file "COPYING" in the main directory of this archive 11 * for more details. 12 */ 13 #include <linux/err.h> 14 #include <linux/cache.h> 15 #include <linux/cpumask.h> 16 #include <linux/delay.h> 17 #include <linux/init.h> 18 #include <linux/spinlock.h> 19 #include <linux/mm.h> 20 #include <linux/module.h> 21 #include <linux/cpu.h> 22 #include <linux/interrupt.h> 23 #include <linux/sched.h> 24 #include <linux/atomic.h> 25 #include <linux/clockchips.h> 26 #include <asm/processor.h> 27 #include <asm/mmu_context.h> 28 #include <asm/smp.h> 29 #include <asm/cacheflush.h> 30 #include <asm/sections.h> 31 #include <asm/setup.h> 32 33 int __cpu_number_map[NR_CPUS]; /* Map physical to logical */ 34 int __cpu_logical_map[NR_CPUS]; /* Map logical to physical */ 35 36 struct plat_smp_ops *mp_ops = NULL; 37 38 /* State of each CPU */ 39 DEFINE_PER_CPU(int, cpu_state) = { 0 }; 40 41 void register_smp_ops(struct plat_smp_ops *ops) 42 { 43 if (mp_ops) 44 printk(KERN_WARNING "Overriding previously set SMP ops\n"); 45 46 mp_ops = ops; 47 } 48 49 static inline void smp_store_cpu_info(unsigned int cpu) 50 { 51 struct sh_cpuinfo *c = cpu_data + cpu; 52 53 memcpy(c, &boot_cpu_data, sizeof(struct sh_cpuinfo)); 54 55 c->loops_per_jiffy = loops_per_jiffy; 56 } 57 58 void __init smp_prepare_cpus(unsigned int max_cpus) 59 { 60 unsigned int cpu = smp_processor_id(); 61 62 init_new_context(current, &init_mm); 63 current_thread_info()->cpu = cpu; 64 mp_ops->prepare_cpus(max_cpus); 65 66 #ifndef CONFIG_HOTPLUG_CPU 67 init_cpu_present(cpu_possible_mask); 68 #endif 69 } 70 71 void __init smp_prepare_boot_cpu(void) 72 { 73 unsigned int cpu = smp_processor_id(); 74 75 __cpu_number_map[0] = cpu; 76 __cpu_logical_map[0] = cpu; 77 78 set_cpu_online(cpu, true); 79 set_cpu_possible(cpu, true); 80 81 per_cpu(cpu_state, cpu) = CPU_ONLINE; 82 } 83 84 #ifdef CONFIG_HOTPLUG_CPU 85 void native_cpu_die(unsigned int cpu) 86 { 87 unsigned int i; 88 89 for (i = 0; i < 10; i++) { 90 smp_rmb(); 91 if (per_cpu(cpu_state, cpu) == CPU_DEAD) { 92 if (system_state == SYSTEM_RUNNING) 93 pr_info("CPU %u is now offline\n", cpu); 94 95 return; 96 } 97 98 msleep(100); 99 } 100 101 pr_err("CPU %u didn't die...\n", cpu); 102 } 103 104 int native_cpu_disable(unsigned int cpu) 105 { 106 return cpu == 0 ? -EPERM : 0; 107 } 108 109 void play_dead_common(void) 110 { 111 idle_task_exit(); 112 irq_ctx_exit(raw_smp_processor_id()); 113 mb(); 114 115 __this_cpu_write(cpu_state, CPU_DEAD); 116 local_irq_disable(); 117 } 118 119 void native_play_dead(void) 120 { 121 play_dead_common(); 122 } 123 124 int __cpu_disable(void) 125 { 126 unsigned int cpu = smp_processor_id(); 127 int ret; 128 129 ret = mp_ops->cpu_disable(cpu); 130 if (ret) 131 return ret; 132 133 /* 134 * Take this CPU offline. Once we clear this, we can't return, 135 * and we must not schedule until we're ready to give up the cpu. 136 */ 137 set_cpu_online(cpu, false); 138 139 /* 140 * OK - migrate IRQs away from this CPU 141 */ 142 migrate_irqs(); 143 144 /* 145 * Flush user cache and TLB mappings, and then remove this CPU 146 * from the vm mask set of all processes. 147 */ 148 flush_cache_all(); 149 #ifdef CONFIG_MMU 150 local_flush_tlb_all(); 151 #endif 152 153 clear_tasks_mm_cpumask(cpu); 154 155 return 0; 156 } 157 #else /* ... !CONFIG_HOTPLUG_CPU */ 158 int native_cpu_disable(unsigned int cpu) 159 { 160 return -ENOSYS; 161 } 162 163 void native_cpu_die(unsigned int cpu) 164 { 165 /* We said "no" in __cpu_disable */ 166 BUG(); 167 } 168 169 void native_play_dead(void) 170 { 171 BUG(); 172 } 173 #endif 174 175 asmlinkage void start_secondary(void) 176 { 177 unsigned int cpu = smp_processor_id(); 178 struct mm_struct *mm = &init_mm; 179 180 enable_mmu(); 181 atomic_inc(&mm->mm_count); 182 atomic_inc(&mm->mm_users); 183 current->active_mm = mm; 184 #ifdef CONFIG_MMU 185 enter_lazy_tlb(mm, current); 186 local_flush_tlb_all(); 187 #endif 188 189 per_cpu_trap_init(); 190 191 preempt_disable(); 192 193 notify_cpu_starting(cpu); 194 195 local_irq_enable(); 196 197 calibrate_delay(); 198 199 smp_store_cpu_info(cpu); 200 201 set_cpu_online(cpu, true); 202 per_cpu(cpu_state, cpu) = CPU_ONLINE; 203 204 cpu_startup_entry(CPUHP_AP_ONLINE_IDLE); 205 } 206 207 extern struct { 208 unsigned long sp; 209 unsigned long bss_start; 210 unsigned long bss_end; 211 void *start_kernel_fn; 212 void *cpu_init_fn; 213 void *thread_info; 214 } stack_start; 215 216 int __cpu_up(unsigned int cpu, struct task_struct *tsk) 217 { 218 unsigned long timeout; 219 220 per_cpu(cpu_state, cpu) = CPU_UP_PREPARE; 221 222 /* Fill in data in head.S for secondary cpus */ 223 stack_start.sp = tsk->thread.sp; 224 stack_start.thread_info = tsk->stack; 225 stack_start.bss_start = 0; /* don't clear bss for secondary cpus */ 226 stack_start.start_kernel_fn = start_secondary; 227 228 flush_icache_range((unsigned long)&stack_start, 229 (unsigned long)&stack_start + sizeof(stack_start)); 230 wmb(); 231 232 mp_ops->start_cpu(cpu, (unsigned long)_stext); 233 234 timeout = jiffies + HZ; 235 while (time_before(jiffies, timeout)) { 236 if (cpu_online(cpu)) 237 break; 238 239 udelay(10); 240 barrier(); 241 } 242 243 if (cpu_online(cpu)) 244 return 0; 245 246 return -ENOENT; 247 } 248 249 void __init smp_cpus_done(unsigned int max_cpus) 250 { 251 unsigned long bogosum = 0; 252 int cpu; 253 254 for_each_online_cpu(cpu) 255 bogosum += cpu_data[cpu].loops_per_jiffy; 256 257 printk(KERN_INFO "SMP: Total of %d processors activated " 258 "(%lu.%02lu BogoMIPS).\n", num_online_cpus(), 259 bogosum / (500000/HZ), 260 (bogosum / (5000/HZ)) % 100); 261 } 262 263 void smp_send_reschedule(int cpu) 264 { 265 mp_ops->send_ipi(cpu, SMP_MSG_RESCHEDULE); 266 } 267 268 void smp_send_stop(void) 269 { 270 smp_call_function(stop_this_cpu, 0, 0); 271 } 272 273 void arch_send_call_function_ipi_mask(const struct cpumask *mask) 274 { 275 int cpu; 276 277 for_each_cpu(cpu, mask) 278 mp_ops->send_ipi(cpu, SMP_MSG_FUNCTION); 279 } 280 281 void arch_send_call_function_single_ipi(int cpu) 282 { 283 mp_ops->send_ipi(cpu, SMP_MSG_FUNCTION_SINGLE); 284 } 285 286 #ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST 287 void tick_broadcast(const struct cpumask *mask) 288 { 289 int cpu; 290 291 for_each_cpu(cpu, mask) 292 mp_ops->send_ipi(cpu, SMP_MSG_TIMER); 293 } 294 295 static void ipi_timer(void) 296 { 297 irq_enter(); 298 tick_receive_broadcast(); 299 irq_exit(); 300 } 301 #endif 302 303 void smp_message_recv(unsigned int msg) 304 { 305 switch (msg) { 306 case SMP_MSG_FUNCTION: 307 generic_smp_call_function_interrupt(); 308 break; 309 case SMP_MSG_RESCHEDULE: 310 scheduler_ipi(); 311 break; 312 case SMP_MSG_FUNCTION_SINGLE: 313 generic_smp_call_function_single_interrupt(); 314 break; 315 #ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST 316 case SMP_MSG_TIMER: 317 ipi_timer(); 318 break; 319 #endif 320 default: 321 printk(KERN_WARNING "SMP %d: %s(): unknown IPI %d\n", 322 smp_processor_id(), __func__, msg); 323 break; 324 } 325 } 326 327 /* Not really SMP stuff ... */ 328 int setup_profiling_timer(unsigned int multiplier) 329 { 330 return 0; 331 } 332 333 #ifdef CONFIG_MMU 334 335 static void flush_tlb_all_ipi(void *info) 336 { 337 local_flush_tlb_all(); 338 } 339 340 void flush_tlb_all(void) 341 { 342 on_each_cpu(flush_tlb_all_ipi, 0, 1); 343 } 344 345 static void flush_tlb_mm_ipi(void *mm) 346 { 347 local_flush_tlb_mm((struct mm_struct *)mm); 348 } 349 350 /* 351 * The following tlb flush calls are invoked when old translations are 352 * being torn down, or pte attributes are changing. For single threaded 353 * address spaces, a new context is obtained on the current cpu, and tlb 354 * context on other cpus are invalidated to force a new context allocation 355 * at switch_mm time, should the mm ever be used on other cpus. For 356 * multithreaded address spaces, intercpu interrupts have to be sent. 357 * Another case where intercpu interrupts are required is when the target 358 * mm might be active on another cpu (eg debuggers doing the flushes on 359 * behalf of debugees, kswapd stealing pages from another process etc). 360 * Kanoj 07/00. 361 */ 362 void flush_tlb_mm(struct mm_struct *mm) 363 { 364 preempt_disable(); 365 366 if ((atomic_read(&mm->mm_users) != 1) || (current->mm != mm)) { 367 smp_call_function(flush_tlb_mm_ipi, (void *)mm, 1); 368 } else { 369 int i; 370 for_each_online_cpu(i) 371 if (smp_processor_id() != i) 372 cpu_context(i, mm) = 0; 373 } 374 local_flush_tlb_mm(mm); 375 376 preempt_enable(); 377 } 378 379 struct flush_tlb_data { 380 struct vm_area_struct *vma; 381 unsigned long addr1; 382 unsigned long addr2; 383 }; 384 385 static void flush_tlb_range_ipi(void *info) 386 { 387 struct flush_tlb_data *fd = (struct flush_tlb_data *)info; 388 389 local_flush_tlb_range(fd->vma, fd->addr1, fd->addr2); 390 } 391 392 void flush_tlb_range(struct vm_area_struct *vma, 393 unsigned long start, unsigned long end) 394 { 395 struct mm_struct *mm = vma->vm_mm; 396 397 preempt_disable(); 398 if ((atomic_read(&mm->mm_users) != 1) || (current->mm != mm)) { 399 struct flush_tlb_data fd; 400 401 fd.vma = vma; 402 fd.addr1 = start; 403 fd.addr2 = end; 404 smp_call_function(flush_tlb_range_ipi, (void *)&fd, 1); 405 } else { 406 int i; 407 for_each_online_cpu(i) 408 if (smp_processor_id() != i) 409 cpu_context(i, mm) = 0; 410 } 411 local_flush_tlb_range(vma, start, end); 412 preempt_enable(); 413 } 414 415 static void flush_tlb_kernel_range_ipi(void *info) 416 { 417 struct flush_tlb_data *fd = (struct flush_tlb_data *)info; 418 419 local_flush_tlb_kernel_range(fd->addr1, fd->addr2); 420 } 421 422 void flush_tlb_kernel_range(unsigned long start, unsigned long end) 423 { 424 struct flush_tlb_data fd; 425 426 fd.addr1 = start; 427 fd.addr2 = end; 428 on_each_cpu(flush_tlb_kernel_range_ipi, (void *)&fd, 1); 429 } 430 431 static void flush_tlb_page_ipi(void *info) 432 { 433 struct flush_tlb_data *fd = (struct flush_tlb_data *)info; 434 435 local_flush_tlb_page(fd->vma, fd->addr1); 436 } 437 438 void flush_tlb_page(struct vm_area_struct *vma, unsigned long page) 439 { 440 preempt_disable(); 441 if ((atomic_read(&vma->vm_mm->mm_users) != 1) || 442 (current->mm != vma->vm_mm)) { 443 struct flush_tlb_data fd; 444 445 fd.vma = vma; 446 fd.addr1 = page; 447 smp_call_function(flush_tlb_page_ipi, (void *)&fd, 1); 448 } else { 449 int i; 450 for_each_online_cpu(i) 451 if (smp_processor_id() != i) 452 cpu_context(i, vma->vm_mm) = 0; 453 } 454 local_flush_tlb_page(vma, page); 455 preempt_enable(); 456 } 457 458 static void flush_tlb_one_ipi(void *info) 459 { 460 struct flush_tlb_data *fd = (struct flush_tlb_data *)info; 461 local_flush_tlb_one(fd->addr1, fd->addr2); 462 } 463 464 void flush_tlb_one(unsigned long asid, unsigned long vaddr) 465 { 466 struct flush_tlb_data fd; 467 468 fd.addr1 = asid; 469 fd.addr2 = vaddr; 470 471 smp_call_function(flush_tlb_one_ipi, (void *)&fd, 1); 472 local_flush_tlb_one(asid, vaddr); 473 } 474 475 #endif 476