1 /* 2 * linux/arch/arm/kernel/smp.c 3 * 4 * Copyright (C) 2002 ARM Limited, All Rights Reserved. 5 * 6 * This program is free software; you can redistribute it and/or modify 7 * it under the terms of the GNU General Public License version 2 as 8 * published by the Free Software Foundation. 9 */ 10 #include <linux/module.h> 11 #include <linux/delay.h> 12 #include <linux/init.h> 13 #include <linux/spinlock.h> 14 #include <linux/sched.h> 15 #include <linux/interrupt.h> 16 #include <linux/cache.h> 17 #include <linux/profile.h> 18 #include <linux/errno.h> 19 #include <linux/mm.h> 20 #include <linux/err.h> 21 #include <linux/cpu.h> 22 #include <linux/smp.h> 23 #include <linux/seq_file.h> 24 #include <linux/irq.h> 25 #include <linux/percpu.h> 26 #include <linux/clockchips.h> 27 28 #include <asm/atomic.h> 29 #include <asm/cacheflush.h> 30 #include <asm/cpu.h> 31 #include <asm/cputype.h> 32 #include <asm/mmu_context.h> 33 #include <asm/pgtable.h> 34 #include <asm/pgalloc.h> 35 #include <asm/processor.h> 36 #include <asm/tlbflush.h> 37 #include <asm/ptrace.h> 38 #include <asm/localtimer.h> 39 #include <asm/smp_plat.h> 40 41 /* 42 * as from 2.5, kernels no longer have an init_tasks structure 43 * so we need some other way of telling a new secondary core 44 * where to place its SVC stack 45 */ 46 struct secondary_data secondary_data; 47 48 /* 49 * structures for inter-processor calls 50 * - A collection of single bit ipi messages. 51 */ 52 struct ipi_data { 53 spinlock_t lock; 54 unsigned long ipi_count; 55 unsigned long bits; 56 }; 57 58 static DEFINE_PER_CPU(struct ipi_data, ipi_data) = { 59 .lock = SPIN_LOCK_UNLOCKED, 60 }; 61 62 enum ipi_msg_type { 63 IPI_TIMER, 64 IPI_RESCHEDULE, 65 IPI_CALL_FUNC, 66 IPI_CALL_FUNC_SINGLE, 67 IPI_CPU_STOP, 68 }; 69 70 int __cpuinit __cpu_up(unsigned int cpu) 71 { 72 struct cpuinfo_arm *ci = &per_cpu(cpu_data, cpu); 73 struct task_struct *idle = ci->idle; 74 pgd_t *pgd; 75 pmd_t *pmd; 76 int ret; 77 78 /* 79 * Spawn a new process manually, if not already done. 80 * Grab a pointer to its task struct so we can mess with it 81 */ 82 if (!idle) { 83 idle = fork_idle(cpu); 84 if (IS_ERR(idle)) { 85 printk(KERN_ERR "CPU%u: fork() failed\n", cpu); 86 return PTR_ERR(idle); 87 } 88 ci->idle = idle; 89 } 90 91 /* 92 * Allocate initial page tables to allow the new CPU to 93 * enable the MMU safely. This essentially means a set 94 * of our "standard" page tables, with the addition of 95 * a 1:1 mapping for the physical address of the kernel. 96 */ 97 pgd = pgd_alloc(&init_mm); 98 pmd = pmd_offset(pgd + pgd_index(PHYS_OFFSET), PHYS_OFFSET); 99 *pmd = __pmd((PHYS_OFFSET & PGDIR_MASK) | 100 PMD_TYPE_SECT | PMD_SECT_AP_WRITE); 101 flush_pmd_entry(pmd); 102 outer_clean_range(__pa(pmd), __pa(pmd + 1)); 103 104 /* 105 * We need to tell the secondary core where to find 106 * its stack and the page tables. 107 */ 108 secondary_data.stack = task_stack_page(idle) + THREAD_START_SP; 109 secondary_data.pgdir = virt_to_phys(pgd); 110 __cpuc_flush_dcache_area(&secondary_data, sizeof(secondary_data)); 111 outer_clean_range(__pa(&secondary_data), __pa(&secondary_data + 1)); 112 113 /* 114 * Now bring the CPU into our world. 115 */ 116 ret = boot_secondary(cpu, idle); 117 if (ret == 0) { 118 unsigned long timeout; 119 120 /* 121 * CPU was successfully started, wait for it 122 * to come online or time out. 123 */ 124 timeout = jiffies + HZ; 125 while (time_before(jiffies, timeout)) { 126 if (cpu_online(cpu)) 127 break; 128 129 udelay(10); 130 barrier(); 131 } 132 133 if (!cpu_online(cpu)) 134 ret = -EIO; 135 } 136 137 secondary_data.stack = NULL; 138 secondary_data.pgdir = 0; 139 140 *pmd = __pmd(0); 141 clean_pmd_entry(pmd); 142 pgd_free(&init_mm, pgd); 143 144 if (ret) { 145 printk(KERN_CRIT "CPU%u: processor failed to boot\n", cpu); 146 147 /* 148 * FIXME: We need to clean up the new idle thread. --rmk 149 */ 150 } 151 152 return ret; 153 } 154 155 #ifdef CONFIG_HOTPLUG_CPU 156 /* 157 * __cpu_disable runs on the processor to be shutdown. 158 */ 159 int __cpu_disable(void) 160 { 161 unsigned int cpu = smp_processor_id(); 162 struct task_struct *p; 163 int ret; 164 165 ret = mach_cpu_disable(cpu); 166 if (ret) 167 return ret; 168 169 /* 170 * Take this CPU offline. Once we clear this, we can't return, 171 * and we must not schedule until we're ready to give up the cpu. 172 */ 173 set_cpu_online(cpu, false); 174 175 /* 176 * OK - migrate IRQs away from this CPU 177 */ 178 migrate_irqs(); 179 180 /* 181 * Stop the local timer for this CPU. 182 */ 183 local_timer_stop(); 184 185 /* 186 * Flush user cache and TLB mappings, and then remove this CPU 187 * from the vm mask set of all processes. 188 */ 189 flush_cache_all(); 190 local_flush_tlb_all(); 191 192 read_lock(&tasklist_lock); 193 for_each_process(p) { 194 if (p->mm) 195 cpumask_clear_cpu(cpu, mm_cpumask(p->mm)); 196 } 197 read_unlock(&tasklist_lock); 198 199 return 0; 200 } 201 202 /* 203 * called on the thread which is asking for a CPU to be shutdown - 204 * waits until shutdown has completed, or it is timed out. 205 */ 206 void __cpu_die(unsigned int cpu) 207 { 208 if (!platform_cpu_kill(cpu)) 209 printk("CPU%u: unable to kill\n", cpu); 210 } 211 212 /* 213 * Called from the idle thread for the CPU which has been shutdown. 214 * 215 * Note that we disable IRQs here, but do not re-enable them 216 * before returning to the caller. This is also the behaviour 217 * of the other hotplug-cpu capable cores, so presumably coming 218 * out of idle fixes this. 219 */ 220 void __ref cpu_die(void) 221 { 222 unsigned int cpu = smp_processor_id(); 223 224 local_irq_disable(); 225 idle_task_exit(); 226 227 /* 228 * actual CPU shutdown procedure is at least platform (if not 229 * CPU) specific 230 */ 231 platform_cpu_die(cpu); 232 233 /* 234 * Do not return to the idle loop - jump back to the secondary 235 * cpu initialisation. There's some initialisation which needs 236 * to be repeated to undo the effects of taking the CPU offline. 237 */ 238 __asm__("mov sp, %0\n" 239 " b secondary_start_kernel" 240 : 241 : "r" (task_stack_page(current) + THREAD_SIZE - 8)); 242 } 243 #endif /* CONFIG_HOTPLUG_CPU */ 244 245 /* 246 * This is the secondary CPU boot entry. We're using this CPUs 247 * idle thread stack, but a set of temporary page tables. 248 */ 249 asmlinkage void __cpuinit secondary_start_kernel(void) 250 { 251 struct mm_struct *mm = &init_mm; 252 unsigned int cpu = smp_processor_id(); 253 254 printk("CPU%u: Booted secondary processor\n", cpu); 255 256 /* 257 * All kernel threads share the same mm context; grab a 258 * reference and switch to it. 259 */ 260 atomic_inc(&mm->mm_users); 261 atomic_inc(&mm->mm_count); 262 current->active_mm = mm; 263 cpumask_set_cpu(cpu, mm_cpumask(mm)); 264 cpu_switch_mm(mm->pgd, mm); 265 enter_lazy_tlb(mm, current); 266 local_flush_tlb_all(); 267 268 cpu_init(); 269 preempt_disable(); 270 271 /* 272 * Give the platform a chance to do its own initialisation. 273 */ 274 platform_secondary_init(cpu); 275 276 /* 277 * Enable local interrupts. 278 */ 279 notify_cpu_starting(cpu); 280 local_irq_enable(); 281 local_fiq_enable(); 282 283 /* 284 * Setup the percpu timer for this CPU. 285 */ 286 percpu_timer_setup(); 287 288 calibrate_delay(); 289 290 smp_store_cpu_info(cpu); 291 292 /* 293 * OK, now it's safe to let the boot CPU continue 294 */ 295 set_cpu_online(cpu, true); 296 297 /* 298 * OK, it's off to the idle thread for us 299 */ 300 cpu_idle(); 301 } 302 303 /* 304 * Called by both boot and secondaries to move global data into 305 * per-processor storage. 306 */ 307 void __cpuinit smp_store_cpu_info(unsigned int cpuid) 308 { 309 struct cpuinfo_arm *cpu_info = &per_cpu(cpu_data, cpuid); 310 311 cpu_info->loops_per_jiffy = loops_per_jiffy; 312 } 313 314 void __init smp_cpus_done(unsigned int max_cpus) 315 { 316 int cpu; 317 unsigned long bogosum = 0; 318 319 for_each_online_cpu(cpu) 320 bogosum += per_cpu(cpu_data, cpu).loops_per_jiffy; 321 322 printk(KERN_INFO "SMP: Total of %d processors activated " 323 "(%lu.%02lu BogoMIPS).\n", 324 num_online_cpus(), 325 bogosum / (500000/HZ), 326 (bogosum / (5000/HZ)) % 100); 327 } 328 329 void __init smp_prepare_boot_cpu(void) 330 { 331 unsigned int cpu = smp_processor_id(); 332 333 per_cpu(cpu_data, cpu).idle = current; 334 } 335 336 static void send_ipi_message(const struct cpumask *mask, enum ipi_msg_type msg) 337 { 338 unsigned long flags; 339 unsigned int cpu; 340 341 local_irq_save(flags); 342 343 for_each_cpu(cpu, mask) { 344 struct ipi_data *ipi = &per_cpu(ipi_data, cpu); 345 346 spin_lock(&ipi->lock); 347 ipi->bits |= 1 << msg; 348 spin_unlock(&ipi->lock); 349 } 350 351 /* 352 * Call the platform specific cross-CPU call function. 353 */ 354 smp_cross_call(mask); 355 356 local_irq_restore(flags); 357 } 358 359 void arch_send_call_function_ipi_mask(const struct cpumask *mask) 360 { 361 send_ipi_message(mask, IPI_CALL_FUNC); 362 } 363 364 void arch_send_call_function_single_ipi(int cpu) 365 { 366 send_ipi_message(cpumask_of(cpu), IPI_CALL_FUNC_SINGLE); 367 } 368 369 void show_ipi_list(struct seq_file *p) 370 { 371 unsigned int cpu; 372 373 seq_puts(p, "IPI:"); 374 375 for_each_present_cpu(cpu) 376 seq_printf(p, " %10lu", per_cpu(ipi_data, cpu).ipi_count); 377 378 seq_putc(p, '\n'); 379 } 380 381 void show_local_irqs(struct seq_file *p) 382 { 383 unsigned int cpu; 384 385 seq_printf(p, "LOC: "); 386 387 for_each_present_cpu(cpu) 388 seq_printf(p, "%10u ", irq_stat[cpu].local_timer_irqs); 389 390 seq_putc(p, '\n'); 391 } 392 393 /* 394 * Timer (local or broadcast) support 395 */ 396 static DEFINE_PER_CPU(struct clock_event_device, percpu_clockevent); 397 398 static void ipi_timer(void) 399 { 400 struct clock_event_device *evt = &__get_cpu_var(percpu_clockevent); 401 irq_enter(); 402 evt->event_handler(evt); 403 irq_exit(); 404 } 405 406 #ifdef CONFIG_LOCAL_TIMERS 407 asmlinkage void __exception do_local_timer(struct pt_regs *regs) 408 { 409 struct pt_regs *old_regs = set_irq_regs(regs); 410 int cpu = smp_processor_id(); 411 412 if (local_timer_ack()) { 413 irq_stat[cpu].local_timer_irqs++; 414 ipi_timer(); 415 } 416 417 set_irq_regs(old_regs); 418 } 419 #endif 420 421 #ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST 422 static void smp_timer_broadcast(const struct cpumask *mask) 423 { 424 send_ipi_message(mask, IPI_TIMER); 425 } 426 427 static void broadcast_timer_set_mode(enum clock_event_mode mode, 428 struct clock_event_device *evt) 429 { 430 } 431 432 static void local_timer_setup(struct clock_event_device *evt) 433 { 434 evt->name = "dummy_timer"; 435 evt->features = CLOCK_EVT_FEAT_ONESHOT | 436 CLOCK_EVT_FEAT_PERIODIC | 437 CLOCK_EVT_FEAT_DUMMY; 438 evt->rating = 400; 439 evt->mult = 1; 440 evt->set_mode = broadcast_timer_set_mode; 441 evt->broadcast = smp_timer_broadcast; 442 443 clockevents_register_device(evt); 444 } 445 #endif 446 447 void __cpuinit percpu_timer_setup(void) 448 { 449 unsigned int cpu = smp_processor_id(); 450 struct clock_event_device *evt = &per_cpu(percpu_clockevent, cpu); 451 452 evt->cpumask = cpumask_of(cpu); 453 454 local_timer_setup(evt); 455 } 456 457 static DEFINE_SPINLOCK(stop_lock); 458 459 /* 460 * ipi_cpu_stop - handle IPI from smp_send_stop() 461 */ 462 static void ipi_cpu_stop(unsigned int cpu) 463 { 464 spin_lock(&stop_lock); 465 printk(KERN_CRIT "CPU%u: stopping\n", cpu); 466 dump_stack(); 467 spin_unlock(&stop_lock); 468 469 set_cpu_online(cpu, false); 470 471 local_fiq_disable(); 472 local_irq_disable(); 473 474 while (1) 475 cpu_relax(); 476 } 477 478 /* 479 * Main handler for inter-processor interrupts 480 * 481 * For ARM, the ipimask now only identifies a single 482 * category of IPI (Bit 1 IPIs have been replaced by a 483 * different mechanism): 484 * 485 * Bit 0 - Inter-processor function call 486 */ 487 asmlinkage void __exception do_IPI(struct pt_regs *regs) 488 { 489 unsigned int cpu = smp_processor_id(); 490 struct ipi_data *ipi = &per_cpu(ipi_data, cpu); 491 struct pt_regs *old_regs = set_irq_regs(regs); 492 493 ipi->ipi_count++; 494 495 for (;;) { 496 unsigned long msgs; 497 498 spin_lock(&ipi->lock); 499 msgs = ipi->bits; 500 ipi->bits = 0; 501 spin_unlock(&ipi->lock); 502 503 if (!msgs) 504 break; 505 506 do { 507 unsigned nextmsg; 508 509 nextmsg = msgs & -msgs; 510 msgs &= ~nextmsg; 511 nextmsg = ffz(~nextmsg); 512 513 switch (nextmsg) { 514 case IPI_TIMER: 515 ipi_timer(); 516 break; 517 518 case IPI_RESCHEDULE: 519 /* 520 * nothing more to do - eveything is 521 * done on the interrupt return path 522 */ 523 break; 524 525 case IPI_CALL_FUNC: 526 generic_smp_call_function_interrupt(); 527 break; 528 529 case IPI_CALL_FUNC_SINGLE: 530 generic_smp_call_function_single_interrupt(); 531 break; 532 533 case IPI_CPU_STOP: 534 ipi_cpu_stop(cpu); 535 break; 536 537 default: 538 printk(KERN_CRIT "CPU%u: Unknown IPI message 0x%x\n", 539 cpu, nextmsg); 540 break; 541 } 542 } while (msgs); 543 } 544 545 set_irq_regs(old_regs); 546 } 547 548 void smp_send_reschedule(int cpu) 549 { 550 send_ipi_message(cpumask_of(cpu), IPI_RESCHEDULE); 551 } 552 553 void smp_send_stop(void) 554 { 555 cpumask_t mask = cpu_online_map; 556 cpu_clear(smp_processor_id(), mask); 557 send_ipi_message(&mask, IPI_CPU_STOP); 558 } 559 560 /* 561 * not supported here 562 */ 563 int setup_profiling_timer(unsigned int multiplier) 564 { 565 return -EINVAL; 566 } 567 568 static void 569 on_each_cpu_mask(void (*func)(void *), void *info, int wait, 570 const struct cpumask *mask) 571 { 572 preempt_disable(); 573 574 smp_call_function_many(mask, func, info, wait); 575 if (cpumask_test_cpu(smp_processor_id(), mask)) 576 func(info); 577 578 preempt_enable(); 579 } 580 581 /**********************************************************************/ 582 583 /* 584 * TLB operations 585 */ 586 struct tlb_args { 587 struct vm_area_struct *ta_vma; 588 unsigned long ta_start; 589 unsigned long ta_end; 590 }; 591 592 static inline void ipi_flush_tlb_all(void *ignored) 593 { 594 local_flush_tlb_all(); 595 } 596 597 static inline void ipi_flush_tlb_mm(void *arg) 598 { 599 struct mm_struct *mm = (struct mm_struct *)arg; 600 601 local_flush_tlb_mm(mm); 602 } 603 604 static inline void ipi_flush_tlb_page(void *arg) 605 { 606 struct tlb_args *ta = (struct tlb_args *)arg; 607 608 local_flush_tlb_page(ta->ta_vma, ta->ta_start); 609 } 610 611 static inline void ipi_flush_tlb_kernel_page(void *arg) 612 { 613 struct tlb_args *ta = (struct tlb_args *)arg; 614 615 local_flush_tlb_kernel_page(ta->ta_start); 616 } 617 618 static inline void ipi_flush_tlb_range(void *arg) 619 { 620 struct tlb_args *ta = (struct tlb_args *)arg; 621 622 local_flush_tlb_range(ta->ta_vma, ta->ta_start, ta->ta_end); 623 } 624 625 static inline void ipi_flush_tlb_kernel_range(void *arg) 626 { 627 struct tlb_args *ta = (struct tlb_args *)arg; 628 629 local_flush_tlb_kernel_range(ta->ta_start, ta->ta_end); 630 } 631 632 void flush_tlb_all(void) 633 { 634 if (tlb_ops_need_broadcast()) 635 on_each_cpu(ipi_flush_tlb_all, NULL, 1); 636 else 637 local_flush_tlb_all(); 638 } 639 640 void flush_tlb_mm(struct mm_struct *mm) 641 { 642 if (tlb_ops_need_broadcast()) 643 on_each_cpu_mask(ipi_flush_tlb_mm, mm, 1, mm_cpumask(mm)); 644 else 645 local_flush_tlb_mm(mm); 646 } 647 648 void flush_tlb_page(struct vm_area_struct *vma, unsigned long uaddr) 649 { 650 if (tlb_ops_need_broadcast()) { 651 struct tlb_args ta; 652 ta.ta_vma = vma; 653 ta.ta_start = uaddr; 654 on_each_cpu_mask(ipi_flush_tlb_page, &ta, 1, mm_cpumask(vma->vm_mm)); 655 } else 656 local_flush_tlb_page(vma, uaddr); 657 } 658 659 void flush_tlb_kernel_page(unsigned long kaddr) 660 { 661 if (tlb_ops_need_broadcast()) { 662 struct tlb_args ta; 663 ta.ta_start = kaddr; 664 on_each_cpu(ipi_flush_tlb_kernel_page, &ta, 1); 665 } else 666 local_flush_tlb_kernel_page(kaddr); 667 } 668 669 void flush_tlb_range(struct vm_area_struct *vma, 670 unsigned long start, unsigned long end) 671 { 672 if (tlb_ops_need_broadcast()) { 673 struct tlb_args ta; 674 ta.ta_vma = vma; 675 ta.ta_start = start; 676 ta.ta_end = end; 677 on_each_cpu_mask(ipi_flush_tlb_range, &ta, 1, mm_cpumask(vma->vm_mm)); 678 } else 679 local_flush_tlb_range(vma, start, end); 680 } 681 682 void flush_tlb_kernel_range(unsigned long start, unsigned long end) 683 { 684 if (tlb_ops_need_broadcast()) { 685 struct tlb_args ta; 686 ta.ta_start = start; 687 ta.ta_end = end; 688 on_each_cpu(ipi_flush_tlb_kernel_range, &ta, 1); 689 } else 690 local_flush_tlb_kernel_range(start, end); 691 } 692