1 /* 2 * linux/arch/arm/kernel/smp.c 3 * 4 * Copyright (C) 2002 ARM Limited, All Rights Reserved. 5 * 6 * This program is free software; you can redistribute it and/or modify 7 * it under the terms of the GNU General Public License version 2 as 8 * published by the Free Software Foundation. 9 */ 10 #include <linux/module.h> 11 #include <linux/delay.h> 12 #include <linux/init.h> 13 #include <linux/spinlock.h> 14 #include <linux/sched.h> 15 #include <linux/interrupt.h> 16 #include <linux/cache.h> 17 #include <linux/profile.h> 18 #include <linux/errno.h> 19 #include <linux/mm.h> 20 #include <linux/err.h> 21 #include <linux/cpu.h> 22 #include <linux/seq_file.h> 23 #include <linux/irq.h> 24 #include <linux/percpu.h> 25 #include <linux/clockchips.h> 26 #include <linux/completion.h> 27 #include <linux/cpufreq.h> 28 #include <linux/irq_work.h> 29 30 #include <linux/atomic.h> 31 #include <asm/smp.h> 32 #include <asm/cacheflush.h> 33 #include <asm/cpu.h> 34 #include <asm/cputype.h> 35 #include <asm/exception.h> 36 #include <asm/idmap.h> 37 #include <asm/topology.h> 38 #include <asm/mmu_context.h> 39 #include <asm/pgtable.h> 40 #include <asm/pgalloc.h> 41 #include <asm/processor.h> 42 #include <asm/sections.h> 43 #include <asm/tlbflush.h> 44 #include <asm/ptrace.h> 45 #include <asm/smp_plat.h> 46 #include <asm/virt.h> 47 #include <asm/mach/arch.h> 48 #include <asm/mpu.h> 49 50 #define CREATE_TRACE_POINTS 51 #include <trace/events/ipi.h> 52 53 /* 54 * as from 2.5, kernels no longer have an init_tasks structure 55 * so we need some other way of telling a new secondary core 56 * where to place its SVC stack 57 */ 58 struct secondary_data secondary_data; 59 60 /* 61 * control for which core is the next to come out of the secondary 62 * boot "holding pen" 63 */ 64 volatile int pen_release = -1; 65 66 enum ipi_msg_type { 67 IPI_WAKEUP, 68 IPI_TIMER, 69 IPI_RESCHEDULE, 70 IPI_CALL_FUNC, 71 IPI_CALL_FUNC_SINGLE, 72 IPI_CPU_STOP, 73 IPI_IRQ_WORK, 74 IPI_COMPLETION, 75 }; 76 77 static DECLARE_COMPLETION(cpu_running); 78 79 static struct smp_operations smp_ops; 80 81 void __init smp_set_ops(struct smp_operations *ops) 82 { 83 if (ops) 84 smp_ops = *ops; 85 }; 86 87 static unsigned long get_arch_pgd(pgd_t *pgd) 88 { 89 phys_addr_t pgdir = virt_to_idmap(pgd); 90 BUG_ON(pgdir & ARCH_PGD_MASK); 91 return pgdir >> ARCH_PGD_SHIFT; 92 } 93 94 int __cpu_up(unsigned int cpu, struct task_struct *idle) 95 { 96 int ret; 97 98 if (!smp_ops.smp_boot_secondary) 99 return -ENOSYS; 100 101 /* 102 * We need to tell the secondary core where to find 103 * its stack and the page tables. 104 */ 105 secondary_data.stack = task_stack_page(idle) + THREAD_START_SP; 106 #ifdef CONFIG_ARM_MPU 107 secondary_data.mpu_rgn_szr = mpu_rgn_info.rgns[MPU_RAM_REGION].drsr; 108 #endif 109 110 #ifdef CONFIG_MMU 111 secondary_data.pgdir = get_arch_pgd(idmap_pgd); 112 secondary_data.swapper_pg_dir = get_arch_pgd(swapper_pg_dir); 113 #endif 114 sync_cache_w(&secondary_data); 115 116 /* 117 * Now bring the CPU into our world. 118 */ 119 ret = smp_ops.smp_boot_secondary(cpu, idle); 120 if (ret == 0) { 121 /* 122 * CPU was successfully started, wait for it 123 * to come online or time out. 124 */ 125 wait_for_completion_timeout(&cpu_running, 126 msecs_to_jiffies(1000)); 127 128 if (!cpu_online(cpu)) { 129 pr_crit("CPU%u: failed to come online\n", cpu); 130 ret = -EIO; 131 } 132 } else { 133 pr_err("CPU%u: failed to boot: %d\n", cpu, ret); 134 } 135 136 137 memset(&secondary_data, 0, sizeof(secondary_data)); 138 return ret; 139 } 140 141 /* platform specific SMP operations */ 142 void __init smp_init_cpus(void) 143 { 144 if (smp_ops.smp_init_cpus) 145 smp_ops.smp_init_cpus(); 146 } 147 148 int platform_can_cpu_hotplug(void) 149 { 150 #ifdef CONFIG_HOTPLUG_CPU 151 if (smp_ops.cpu_kill) 152 return 1; 153 #endif 154 155 return 0; 156 } 157 158 #ifdef CONFIG_HOTPLUG_CPU 159 static int platform_cpu_kill(unsigned int cpu) 160 { 161 if (smp_ops.cpu_kill) 162 return smp_ops.cpu_kill(cpu); 163 return 1; 164 } 165 166 static int platform_cpu_disable(unsigned int cpu) 167 { 168 if (smp_ops.cpu_disable) 169 return smp_ops.cpu_disable(cpu); 170 171 /* 172 * By default, allow disabling all CPUs except the first one, 173 * since this is special on a lot of platforms, e.g. because 174 * of clock tick interrupts. 175 */ 176 return cpu == 0 ? -EPERM : 0; 177 } 178 /* 179 * __cpu_disable runs on the processor to be shutdown. 180 */ 181 int __cpu_disable(void) 182 { 183 unsigned int cpu = smp_processor_id(); 184 int ret; 185 186 ret = platform_cpu_disable(cpu); 187 if (ret) 188 return ret; 189 190 /* 191 * Take this CPU offline. Once we clear this, we can't return, 192 * and we must not schedule until we're ready to give up the cpu. 193 */ 194 set_cpu_online(cpu, false); 195 196 /* 197 * OK - migrate IRQs away from this CPU 198 */ 199 migrate_irqs(); 200 201 /* 202 * Flush user cache and TLB mappings, and then remove this CPU 203 * from the vm mask set of all processes. 204 * 205 * Caches are flushed to the Level of Unification Inner Shareable 206 * to write-back dirty lines to unified caches shared by all CPUs. 207 */ 208 flush_cache_louis(); 209 local_flush_tlb_all(); 210 211 clear_tasks_mm_cpumask(cpu); 212 213 return 0; 214 } 215 216 static DECLARE_COMPLETION(cpu_died); 217 218 /* 219 * called on the thread which is asking for a CPU to be shutdown - 220 * waits until shutdown has completed, or it is timed out. 221 */ 222 void __cpu_die(unsigned int cpu) 223 { 224 if (!wait_for_completion_timeout(&cpu_died, msecs_to_jiffies(5000))) { 225 pr_err("CPU%u: cpu didn't die\n", cpu); 226 return; 227 } 228 pr_notice("CPU%u: shutdown\n", cpu); 229 230 /* 231 * platform_cpu_kill() is generally expected to do the powering off 232 * and/or cutting of clocks to the dying CPU. Optionally, this may 233 * be done by the CPU which is dying in preference to supporting 234 * this call, but that means there is _no_ synchronisation between 235 * the requesting CPU and the dying CPU actually losing power. 236 */ 237 if (!platform_cpu_kill(cpu)) 238 pr_err("CPU%u: unable to kill\n", cpu); 239 } 240 241 /* 242 * Called from the idle thread for the CPU which has been shutdown. 243 * 244 * Note that we disable IRQs here, but do not re-enable them 245 * before returning to the caller. This is also the behaviour 246 * of the other hotplug-cpu capable cores, so presumably coming 247 * out of idle fixes this. 248 */ 249 void __ref cpu_die(void) 250 { 251 unsigned int cpu = smp_processor_id(); 252 253 idle_task_exit(); 254 255 local_irq_disable(); 256 257 /* 258 * Flush the data out of the L1 cache for this CPU. This must be 259 * before the completion to ensure that data is safely written out 260 * before platform_cpu_kill() gets called - which may disable 261 * *this* CPU and power down its cache. 262 */ 263 flush_cache_louis(); 264 265 /* 266 * Tell __cpu_die() that this CPU is now safe to dispose of. Once 267 * this returns, power and/or clocks can be removed at any point 268 * from this CPU and its cache by platform_cpu_kill(). 269 */ 270 complete(&cpu_died); 271 272 /* 273 * Ensure that the cache lines associated with that completion are 274 * written out. This covers the case where _this_ CPU is doing the 275 * powering down, to ensure that the completion is visible to the 276 * CPU waiting for this one. 277 */ 278 flush_cache_louis(); 279 280 /* 281 * The actual CPU shutdown procedure is at least platform (if not 282 * CPU) specific. This may remove power, or it may simply spin. 283 * 284 * Platforms are generally expected *NOT* to return from this call, 285 * although there are some which do because they have no way to 286 * power down the CPU. These platforms are the _only_ reason we 287 * have a return path which uses the fragment of assembly below. 288 * 289 * The return path should not be used for platforms which can 290 * power off the CPU. 291 */ 292 if (smp_ops.cpu_die) 293 smp_ops.cpu_die(cpu); 294 295 pr_warn("CPU%u: smp_ops.cpu_die() returned, trying to resuscitate\n", 296 cpu); 297 298 /* 299 * Do not return to the idle loop - jump back to the secondary 300 * cpu initialisation. There's some initialisation which needs 301 * to be repeated to undo the effects of taking the CPU offline. 302 */ 303 __asm__("mov sp, %0\n" 304 " mov fp, #0\n" 305 " b secondary_start_kernel" 306 : 307 : "r" (task_stack_page(current) + THREAD_SIZE - 8)); 308 } 309 #endif /* CONFIG_HOTPLUG_CPU */ 310 311 /* 312 * Called by both boot and secondaries to move global data into 313 * per-processor storage. 314 */ 315 static void smp_store_cpu_info(unsigned int cpuid) 316 { 317 struct cpuinfo_arm *cpu_info = &per_cpu(cpu_data, cpuid); 318 319 cpu_info->loops_per_jiffy = loops_per_jiffy; 320 cpu_info->cpuid = read_cpuid_id(); 321 322 store_cpu_topology(cpuid); 323 } 324 325 /* 326 * This is the secondary CPU boot entry. We're using this CPUs 327 * idle thread stack, but a set of temporary page tables. 328 */ 329 asmlinkage void secondary_start_kernel(void) 330 { 331 struct mm_struct *mm = &init_mm; 332 unsigned int cpu; 333 334 /* 335 * The identity mapping is uncached (strongly ordered), so 336 * switch away from it before attempting any exclusive accesses. 337 */ 338 cpu_switch_mm(mm->pgd, mm); 339 local_flush_bp_all(); 340 enter_lazy_tlb(mm, current); 341 local_flush_tlb_all(); 342 343 /* 344 * All kernel threads share the same mm context; grab a 345 * reference and switch to it. 346 */ 347 cpu = smp_processor_id(); 348 atomic_inc(&mm->mm_count); 349 current->active_mm = mm; 350 cpumask_set_cpu(cpu, mm_cpumask(mm)); 351 352 cpu_init(); 353 354 pr_debug("CPU%u: Booted secondary processor\n", cpu); 355 356 preempt_disable(); 357 trace_hardirqs_off(); 358 359 /* 360 * Give the platform a chance to do its own initialisation. 361 */ 362 if (smp_ops.smp_secondary_init) 363 smp_ops.smp_secondary_init(cpu); 364 365 notify_cpu_starting(cpu); 366 367 calibrate_delay(); 368 369 smp_store_cpu_info(cpu); 370 371 /* 372 * OK, now it's safe to let the boot CPU continue. Wait for 373 * the CPU migration code to notice that the CPU is online 374 * before we continue - which happens after __cpu_up returns. 375 */ 376 set_cpu_online(cpu, true); 377 complete(&cpu_running); 378 379 local_irq_enable(); 380 local_fiq_enable(); 381 382 /* 383 * OK, it's off to the idle thread for us 384 */ 385 cpu_startup_entry(CPUHP_ONLINE); 386 } 387 388 void __init smp_cpus_done(unsigned int max_cpus) 389 { 390 hyp_mode_check(); 391 } 392 393 void __init smp_prepare_boot_cpu(void) 394 { 395 set_my_cpu_offset(per_cpu_offset(smp_processor_id())); 396 } 397 398 void __init smp_prepare_cpus(unsigned int max_cpus) 399 { 400 unsigned int ncores = num_possible_cpus(); 401 402 init_cpu_topology(); 403 404 smp_store_cpu_info(smp_processor_id()); 405 406 /* 407 * are we trying to boot more cores than exist? 408 */ 409 if (max_cpus > ncores) 410 max_cpus = ncores; 411 if (ncores > 1 && max_cpus) { 412 /* 413 * Initialise the present map, which describes the set of CPUs 414 * actually populated at the present time. A platform should 415 * re-initialize the map in the platforms smp_prepare_cpus() 416 * if present != possible (e.g. physical hotplug). 417 */ 418 init_cpu_present(cpu_possible_mask); 419 420 /* 421 * Initialise the SCU if there are more than one CPU 422 * and let them know where to start. 423 */ 424 if (smp_ops.smp_prepare_cpus) 425 smp_ops.smp_prepare_cpus(max_cpus); 426 } 427 } 428 429 static void (*__smp_cross_call)(const struct cpumask *, unsigned int); 430 431 void __init set_smp_cross_call(void (*fn)(const struct cpumask *, unsigned int)) 432 { 433 if (!__smp_cross_call) 434 __smp_cross_call = fn; 435 } 436 437 static const char *ipi_types[NR_IPI] __tracepoint_string = { 438 #define S(x,s) [x] = s 439 S(IPI_WAKEUP, "CPU wakeup interrupts"), 440 S(IPI_TIMER, "Timer broadcast interrupts"), 441 S(IPI_RESCHEDULE, "Rescheduling interrupts"), 442 S(IPI_CALL_FUNC, "Function call interrupts"), 443 S(IPI_CALL_FUNC_SINGLE, "Single function call interrupts"), 444 S(IPI_CPU_STOP, "CPU stop interrupts"), 445 S(IPI_IRQ_WORK, "IRQ work interrupts"), 446 S(IPI_COMPLETION, "completion interrupts"), 447 }; 448 449 static void smp_cross_call(const struct cpumask *target, unsigned int ipinr) 450 { 451 trace_ipi_raise(target, ipi_types[ipinr]); 452 __smp_cross_call(target, ipinr); 453 } 454 455 void show_ipi_list(struct seq_file *p, int prec) 456 { 457 unsigned int cpu, i; 458 459 for (i = 0; i < NR_IPI; i++) { 460 seq_printf(p, "%*s%u: ", prec - 1, "IPI", i); 461 462 for_each_online_cpu(cpu) 463 seq_printf(p, "%10u ", 464 __get_irq_stat(cpu, ipi_irqs[i])); 465 466 seq_printf(p, " %s\n", ipi_types[i]); 467 } 468 } 469 470 u64 smp_irq_stat_cpu(unsigned int cpu) 471 { 472 u64 sum = 0; 473 int i; 474 475 for (i = 0; i < NR_IPI; i++) 476 sum += __get_irq_stat(cpu, ipi_irqs[i]); 477 478 return sum; 479 } 480 481 void arch_send_call_function_ipi_mask(const struct cpumask *mask) 482 { 483 smp_cross_call(mask, IPI_CALL_FUNC); 484 } 485 486 void arch_send_wakeup_ipi_mask(const struct cpumask *mask) 487 { 488 smp_cross_call(mask, IPI_WAKEUP); 489 } 490 491 void arch_send_call_function_single_ipi(int cpu) 492 { 493 smp_cross_call(cpumask_of(cpu), IPI_CALL_FUNC_SINGLE); 494 } 495 496 #ifdef CONFIG_IRQ_WORK 497 void arch_irq_work_raise(void) 498 { 499 if (arch_irq_work_has_interrupt()) 500 smp_cross_call(cpumask_of(smp_processor_id()), IPI_IRQ_WORK); 501 } 502 #endif 503 504 #ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST 505 void tick_broadcast(const struct cpumask *mask) 506 { 507 smp_cross_call(mask, IPI_TIMER); 508 } 509 #endif 510 511 static DEFINE_RAW_SPINLOCK(stop_lock); 512 513 /* 514 * ipi_cpu_stop - handle IPI from smp_send_stop() 515 */ 516 static void ipi_cpu_stop(unsigned int cpu) 517 { 518 if (system_state == SYSTEM_BOOTING || 519 system_state == SYSTEM_RUNNING) { 520 raw_spin_lock(&stop_lock); 521 pr_crit("CPU%u: stopping\n", cpu); 522 dump_stack(); 523 raw_spin_unlock(&stop_lock); 524 } 525 526 set_cpu_online(cpu, false); 527 528 local_fiq_disable(); 529 local_irq_disable(); 530 531 while (1) 532 cpu_relax(); 533 } 534 535 static DEFINE_PER_CPU(struct completion *, cpu_completion); 536 537 int register_ipi_completion(struct completion *completion, int cpu) 538 { 539 per_cpu(cpu_completion, cpu) = completion; 540 return IPI_COMPLETION; 541 } 542 543 static void ipi_complete(unsigned int cpu) 544 { 545 complete(per_cpu(cpu_completion, cpu)); 546 } 547 548 /* 549 * Main handler for inter-processor interrupts 550 */ 551 asmlinkage void __exception_irq_entry do_IPI(int ipinr, struct pt_regs *regs) 552 { 553 handle_IPI(ipinr, regs); 554 } 555 556 void handle_IPI(int ipinr, struct pt_regs *regs) 557 { 558 unsigned int cpu = smp_processor_id(); 559 struct pt_regs *old_regs = set_irq_regs(regs); 560 561 if ((unsigned)ipinr < NR_IPI) { 562 trace_ipi_entry(ipi_types[ipinr]); 563 __inc_irq_stat(cpu, ipi_irqs[ipinr]); 564 } 565 566 switch (ipinr) { 567 case IPI_WAKEUP: 568 break; 569 570 #ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST 571 case IPI_TIMER: 572 irq_enter(); 573 tick_receive_broadcast(); 574 irq_exit(); 575 break; 576 #endif 577 578 case IPI_RESCHEDULE: 579 scheduler_ipi(); 580 break; 581 582 case IPI_CALL_FUNC: 583 irq_enter(); 584 generic_smp_call_function_interrupt(); 585 irq_exit(); 586 break; 587 588 case IPI_CALL_FUNC_SINGLE: 589 irq_enter(); 590 generic_smp_call_function_single_interrupt(); 591 irq_exit(); 592 break; 593 594 case IPI_CPU_STOP: 595 irq_enter(); 596 ipi_cpu_stop(cpu); 597 irq_exit(); 598 break; 599 600 #ifdef CONFIG_IRQ_WORK 601 case IPI_IRQ_WORK: 602 irq_enter(); 603 irq_work_run(); 604 irq_exit(); 605 break; 606 #endif 607 608 case IPI_COMPLETION: 609 irq_enter(); 610 ipi_complete(cpu); 611 irq_exit(); 612 break; 613 614 default: 615 pr_crit("CPU%u: Unknown IPI message 0x%x\n", 616 cpu, ipinr); 617 break; 618 } 619 620 if ((unsigned)ipinr < NR_IPI) 621 trace_ipi_exit(ipi_types[ipinr]); 622 set_irq_regs(old_regs); 623 } 624 625 void smp_send_reschedule(int cpu) 626 { 627 smp_cross_call(cpumask_of(cpu), IPI_RESCHEDULE); 628 } 629 630 void smp_send_stop(void) 631 { 632 unsigned long timeout; 633 struct cpumask mask; 634 635 cpumask_copy(&mask, cpu_online_mask); 636 cpumask_clear_cpu(smp_processor_id(), &mask); 637 if (!cpumask_empty(&mask)) 638 smp_cross_call(&mask, IPI_CPU_STOP); 639 640 /* Wait up to one second for other CPUs to stop */ 641 timeout = USEC_PER_SEC; 642 while (num_online_cpus() > 1 && timeout--) 643 udelay(1); 644 645 if (num_online_cpus() > 1) 646 pr_warn("SMP: failed to stop secondary CPUs\n"); 647 } 648 649 /* 650 * not supported here 651 */ 652 int setup_profiling_timer(unsigned int multiplier) 653 { 654 return -EINVAL; 655 } 656 657 #ifdef CONFIG_CPU_FREQ 658 659 static DEFINE_PER_CPU(unsigned long, l_p_j_ref); 660 static DEFINE_PER_CPU(unsigned long, l_p_j_ref_freq); 661 static unsigned long global_l_p_j_ref; 662 static unsigned long global_l_p_j_ref_freq; 663 664 static int cpufreq_callback(struct notifier_block *nb, 665 unsigned long val, void *data) 666 { 667 struct cpufreq_freqs *freq = data; 668 int cpu = freq->cpu; 669 670 if (freq->flags & CPUFREQ_CONST_LOOPS) 671 return NOTIFY_OK; 672 673 if (!per_cpu(l_p_j_ref, cpu)) { 674 per_cpu(l_p_j_ref, cpu) = 675 per_cpu(cpu_data, cpu).loops_per_jiffy; 676 per_cpu(l_p_j_ref_freq, cpu) = freq->old; 677 if (!global_l_p_j_ref) { 678 global_l_p_j_ref = loops_per_jiffy; 679 global_l_p_j_ref_freq = freq->old; 680 } 681 } 682 683 if ((val == CPUFREQ_PRECHANGE && freq->old < freq->new) || 684 (val == CPUFREQ_POSTCHANGE && freq->old > freq->new)) { 685 loops_per_jiffy = cpufreq_scale(global_l_p_j_ref, 686 global_l_p_j_ref_freq, 687 freq->new); 688 per_cpu(cpu_data, cpu).loops_per_jiffy = 689 cpufreq_scale(per_cpu(l_p_j_ref, cpu), 690 per_cpu(l_p_j_ref_freq, cpu), 691 freq->new); 692 } 693 return NOTIFY_OK; 694 } 695 696 static struct notifier_block cpufreq_notifier = { 697 .notifier_call = cpufreq_callback, 698 }; 699 700 static int __init register_cpufreq_notifier(void) 701 { 702 return cpufreq_register_notifier(&cpufreq_notifier, 703 CPUFREQ_TRANSITION_NOTIFIER); 704 } 705 core_initcall(register_cpufreq_notifier); 706 707 #endif 708