1 /* 2 * This program is free software; you can redistribute it and/or 3 * modify it under the terms of the GNU General Public License 4 * as published by the Free Software Foundation; either version 2 5 * of the License, or (at your option) any later version. 6 * 7 * This program is distributed in the hope that it will be useful, 8 * but WITHOUT ANY WARRANTY; without even the implied warranty of 9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 10 * GNU General Public License for more details. 11 * 12 * You should have received a copy of the GNU General Public License 13 * along with this program; if not, write to the Free Software 14 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. 15 * 16 * Copyright (C) 2000, 2001 Kanoj Sarcar 17 * Copyright (C) 2000, 2001 Ralf Baechle 18 * Copyright (C) 2000, 2001 Silicon Graphics, Inc. 19 * Copyright (C) 2000, 2001, 2003 Broadcom Corporation 20 */ 21 #include <linux/cache.h> 22 #include <linux/delay.h> 23 #include <linux/init.h> 24 #include <linux/interrupt.h> 25 #include <linux/smp.h> 26 #include <linux/spinlock.h> 27 #include <linux/threads.h> 28 #include <linux/export.h> 29 #include <linux/time.h> 30 #include <linux/timex.h> 31 #include <linux/sched.h> 32 #include <linux/cpumask.h> 33 #include <linux/cpu.h> 34 #include <linux/err.h> 35 #include <linux/ftrace.h> 36 #include <linux/irqdomain.h> 37 #include <linux/of.h> 38 #include <linux/of_irq.h> 39 40 #include <linux/atomic.h> 41 #include <asm/cpu.h> 42 #include <asm/processor.h> 43 #include <asm/idle.h> 44 #include <asm/r4k-timer.h> 45 #include <asm/mips-cpc.h> 46 #include <asm/mmu_context.h> 47 #include <asm/time.h> 48 #include <asm/setup.h> 49 #include <asm/maar.h> 50 51 cpumask_t cpu_callin_map; /* Bitmask of started secondaries */ 52 53 int __cpu_number_map[NR_CPUS]; /* Map physical to logical */ 54 EXPORT_SYMBOL(__cpu_number_map); 55 56 int __cpu_logical_map[NR_CPUS]; /* Map logical to physical */ 57 EXPORT_SYMBOL(__cpu_logical_map); 58 59 /* Number of TCs (or siblings in Intel speak) per CPU core */ 60 int smp_num_siblings = 1; 61 EXPORT_SYMBOL(smp_num_siblings); 62 63 /* representing the TCs (or siblings in Intel speak) of each logical CPU */ 64 cpumask_t cpu_sibling_map[NR_CPUS] __read_mostly; 65 EXPORT_SYMBOL(cpu_sibling_map); 66 67 /* representing the core map of multi-core chips of each logical CPU */ 68 cpumask_t cpu_core_map[NR_CPUS] __read_mostly; 69 EXPORT_SYMBOL(cpu_core_map); 70 71 /* 72 * A logcal cpu mask containing only one VPE per core to 73 * reduce the number of IPIs on large MT systems. 74 */ 75 cpumask_t cpu_foreign_map[NR_CPUS] __read_mostly; 76 EXPORT_SYMBOL(cpu_foreign_map); 77 78 /* representing cpus for which sibling maps can be computed */ 79 static cpumask_t cpu_sibling_setup_map; 80 81 /* representing cpus for which core maps can be computed */ 82 static cpumask_t cpu_core_setup_map; 83 84 cpumask_t cpu_coherent_mask; 85 86 #ifdef CONFIG_GENERIC_IRQ_IPI 87 static struct irq_desc *call_desc; 88 static struct irq_desc *sched_desc; 89 #endif 90 91 static inline void set_cpu_sibling_map(int cpu) 92 { 93 int i; 94 95 cpumask_set_cpu(cpu, &cpu_sibling_setup_map); 96 97 if (smp_num_siblings > 1) { 98 for_each_cpu(i, &cpu_sibling_setup_map) { 99 if (cpu_data[cpu].package == cpu_data[i].package && 100 cpu_data[cpu].core == cpu_data[i].core) { 101 cpumask_set_cpu(i, &cpu_sibling_map[cpu]); 102 cpumask_set_cpu(cpu, &cpu_sibling_map[i]); 103 } 104 } 105 } else 106 cpumask_set_cpu(cpu, &cpu_sibling_map[cpu]); 107 } 108 109 static inline void set_cpu_core_map(int cpu) 110 { 111 int i; 112 113 cpumask_set_cpu(cpu, &cpu_core_setup_map); 114 115 for_each_cpu(i, &cpu_core_setup_map) { 116 if (cpu_data[cpu].package == cpu_data[i].package) { 117 cpumask_set_cpu(i, &cpu_core_map[cpu]); 118 cpumask_set_cpu(cpu, &cpu_core_map[i]); 119 } 120 } 121 } 122 123 /* 124 * Calculate a new cpu_foreign_map mask whenever a 125 * new cpu appears or disappears. 126 */ 127 void calculate_cpu_foreign_map(void) 128 { 129 int i, k, core_present; 130 cpumask_t temp_foreign_map; 131 132 /* Re-calculate the mask */ 133 cpumask_clear(&temp_foreign_map); 134 for_each_online_cpu(i) { 135 core_present = 0; 136 for_each_cpu(k, &temp_foreign_map) 137 if (cpu_data[i].package == cpu_data[k].package && 138 cpu_data[i].core == cpu_data[k].core) 139 core_present = 1; 140 if (!core_present) 141 cpumask_set_cpu(i, &temp_foreign_map); 142 } 143 144 for_each_online_cpu(i) 145 cpumask_andnot(&cpu_foreign_map[i], 146 &temp_foreign_map, &cpu_sibling_map[i]); 147 } 148 149 struct plat_smp_ops *mp_ops; 150 EXPORT_SYMBOL(mp_ops); 151 152 void register_smp_ops(struct plat_smp_ops *ops) 153 { 154 if (mp_ops) 155 printk(KERN_WARNING "Overriding previously set SMP ops\n"); 156 157 mp_ops = ops; 158 } 159 160 #ifdef CONFIG_GENERIC_IRQ_IPI 161 void mips_smp_send_ipi_single(int cpu, unsigned int action) 162 { 163 mips_smp_send_ipi_mask(cpumask_of(cpu), action); 164 } 165 166 void mips_smp_send_ipi_mask(const struct cpumask *mask, unsigned int action) 167 { 168 unsigned long flags; 169 unsigned int core; 170 int cpu; 171 172 local_irq_save(flags); 173 174 switch (action) { 175 case SMP_CALL_FUNCTION: 176 __ipi_send_mask(call_desc, mask); 177 break; 178 179 case SMP_RESCHEDULE_YOURSELF: 180 __ipi_send_mask(sched_desc, mask); 181 break; 182 183 default: 184 BUG(); 185 } 186 187 if (mips_cpc_present()) { 188 for_each_cpu(cpu, mask) { 189 core = cpu_data[cpu].core; 190 191 if (core == current_cpu_data.core) 192 continue; 193 194 while (!cpumask_test_cpu(cpu, &cpu_coherent_mask)) { 195 mips_cm_lock_other(core, 0); 196 mips_cpc_lock_other(core); 197 write_cpc_co_cmd(CPC_Cx_CMD_PWRUP); 198 mips_cpc_unlock_other(); 199 mips_cm_unlock_other(); 200 } 201 } 202 } 203 204 local_irq_restore(flags); 205 } 206 207 208 static irqreturn_t ipi_resched_interrupt(int irq, void *dev_id) 209 { 210 scheduler_ipi(); 211 212 return IRQ_HANDLED; 213 } 214 215 static irqreturn_t ipi_call_interrupt(int irq, void *dev_id) 216 { 217 generic_smp_call_function_interrupt(); 218 219 return IRQ_HANDLED; 220 } 221 222 static struct irqaction irq_resched = { 223 .handler = ipi_resched_interrupt, 224 .flags = IRQF_PERCPU, 225 .name = "IPI resched" 226 }; 227 228 static struct irqaction irq_call = { 229 .handler = ipi_call_interrupt, 230 .flags = IRQF_PERCPU, 231 .name = "IPI call" 232 }; 233 234 static void smp_ipi_init_one(unsigned int virq, 235 struct irqaction *action) 236 { 237 int ret; 238 239 irq_set_handler(virq, handle_percpu_irq); 240 ret = setup_irq(virq, action); 241 BUG_ON(ret); 242 } 243 244 static unsigned int call_virq, sched_virq; 245 246 int mips_smp_ipi_allocate(const struct cpumask *mask) 247 { 248 int virq; 249 struct irq_domain *ipidomain; 250 struct device_node *node; 251 252 node = of_irq_find_parent(of_root); 253 ipidomain = irq_find_matching_host(node, DOMAIN_BUS_IPI); 254 255 /* 256 * Some platforms have half DT setup. So if we found irq node but 257 * didn't find an ipidomain, try to search for one that is not in the 258 * DT. 259 */ 260 if (node && !ipidomain) 261 ipidomain = irq_find_matching_host(NULL, DOMAIN_BUS_IPI); 262 263 /* 264 * There are systems which only use IPI domains some of the time, 265 * depending upon configuration we don't know until runtime. An 266 * example is Malta where we may compile in support for GIC & the 267 * MT ASE, but run on a system which has multiple VPEs in a single 268 * core and doesn't include a GIC. Until all IPI implementations 269 * have been converted to use IPI domains the best we can do here 270 * is to return & hope some other code sets up the IPIs. 271 */ 272 if (!ipidomain) 273 return 0; 274 275 virq = irq_reserve_ipi(ipidomain, mask); 276 BUG_ON(!virq); 277 if (!call_virq) 278 call_virq = virq; 279 280 virq = irq_reserve_ipi(ipidomain, mask); 281 BUG_ON(!virq); 282 if (!sched_virq) 283 sched_virq = virq; 284 285 if (irq_domain_is_ipi_per_cpu(ipidomain)) { 286 int cpu; 287 288 for_each_cpu(cpu, mask) { 289 smp_ipi_init_one(call_virq + cpu, &irq_call); 290 smp_ipi_init_one(sched_virq + cpu, &irq_resched); 291 } 292 } else { 293 smp_ipi_init_one(call_virq, &irq_call); 294 smp_ipi_init_one(sched_virq, &irq_resched); 295 } 296 297 return 0; 298 } 299 300 int mips_smp_ipi_free(const struct cpumask *mask) 301 { 302 struct irq_domain *ipidomain; 303 struct device_node *node; 304 305 node = of_irq_find_parent(of_root); 306 ipidomain = irq_find_matching_host(node, DOMAIN_BUS_IPI); 307 308 /* 309 * Some platforms have half DT setup. So if we found irq node but 310 * didn't find an ipidomain, try to search for one that is not in the 311 * DT. 312 */ 313 if (node && !ipidomain) 314 ipidomain = irq_find_matching_host(NULL, DOMAIN_BUS_IPI); 315 316 BUG_ON(!ipidomain); 317 318 if (irq_domain_is_ipi_per_cpu(ipidomain)) { 319 int cpu; 320 321 for_each_cpu(cpu, mask) { 322 remove_irq(call_virq + cpu, &irq_call); 323 remove_irq(sched_virq + cpu, &irq_resched); 324 } 325 } 326 irq_destroy_ipi(call_virq, mask); 327 irq_destroy_ipi(sched_virq, mask); 328 return 0; 329 } 330 331 332 static int __init mips_smp_ipi_init(void) 333 { 334 mips_smp_ipi_allocate(cpu_possible_mask); 335 336 call_desc = irq_to_desc(call_virq); 337 sched_desc = irq_to_desc(sched_virq); 338 339 return 0; 340 } 341 early_initcall(mips_smp_ipi_init); 342 #endif 343 344 /* 345 * First C code run on the secondary CPUs after being started up by 346 * the master. 347 */ 348 asmlinkage void start_secondary(void) 349 { 350 unsigned int cpu; 351 352 cpu_probe(); 353 per_cpu_trap_init(false); 354 mips_clockevent_init(); 355 mp_ops->init_secondary(); 356 cpu_report(); 357 maar_init(); 358 359 /* 360 * XXX parity protection should be folded in here when it's converted 361 * to an option instead of something based on .cputype 362 */ 363 364 calibrate_delay(); 365 preempt_disable(); 366 cpu = smp_processor_id(); 367 cpu_data[cpu].udelay_val = loops_per_jiffy; 368 369 cpumask_set_cpu(cpu, &cpu_coherent_mask); 370 notify_cpu_starting(cpu); 371 372 cpumask_set_cpu(cpu, &cpu_callin_map); 373 synchronise_count_slave(cpu); 374 375 set_cpu_online(cpu, true); 376 377 set_cpu_sibling_map(cpu); 378 set_cpu_core_map(cpu); 379 380 calculate_cpu_foreign_map(); 381 382 /* 383 * irq will be enabled in ->smp_finish(), enabling it too early 384 * is dangerous. 385 */ 386 WARN_ON_ONCE(!irqs_disabled()); 387 mp_ops->smp_finish(); 388 389 cpu_startup_entry(CPUHP_AP_ONLINE_IDLE); 390 } 391 392 static void stop_this_cpu(void *dummy) 393 { 394 /* 395 * Remove this CPU: 396 */ 397 398 set_cpu_online(smp_processor_id(), false); 399 calculate_cpu_foreign_map(); 400 local_irq_disable(); 401 while (1); 402 } 403 404 void smp_send_stop(void) 405 { 406 smp_call_function(stop_this_cpu, NULL, 0); 407 } 408 409 void __init smp_cpus_done(unsigned int max_cpus) 410 { 411 } 412 413 /* called from main before smp_init() */ 414 void __init smp_prepare_cpus(unsigned int max_cpus) 415 { 416 init_new_context(current, &init_mm); 417 current_thread_info()->cpu = 0; 418 mp_ops->prepare_cpus(max_cpus); 419 set_cpu_sibling_map(0); 420 set_cpu_core_map(0); 421 calculate_cpu_foreign_map(); 422 #ifndef CONFIG_HOTPLUG_CPU 423 init_cpu_present(cpu_possible_mask); 424 #endif 425 cpumask_copy(&cpu_coherent_mask, cpu_possible_mask); 426 } 427 428 /* preload SMP state for boot cpu */ 429 void smp_prepare_boot_cpu(void) 430 { 431 set_cpu_possible(0, true); 432 set_cpu_online(0, true); 433 cpumask_set_cpu(0, &cpu_callin_map); 434 } 435 436 int __cpu_up(unsigned int cpu, struct task_struct *tidle) 437 { 438 mp_ops->boot_secondary(cpu, tidle); 439 440 /* 441 * Trust is futile. We should really have timeouts ... 442 */ 443 while (!cpumask_test_cpu(cpu, &cpu_callin_map)) { 444 udelay(100); 445 schedule(); 446 } 447 448 synchronise_count_master(cpu); 449 return 0; 450 } 451 452 /* Not really SMP stuff ... */ 453 int setup_profiling_timer(unsigned int multiplier) 454 { 455 return 0; 456 } 457 458 static void flush_tlb_all_ipi(void *info) 459 { 460 local_flush_tlb_all(); 461 } 462 463 void flush_tlb_all(void) 464 { 465 on_each_cpu(flush_tlb_all_ipi, NULL, 1); 466 } 467 468 static void flush_tlb_mm_ipi(void *mm) 469 { 470 local_flush_tlb_mm((struct mm_struct *)mm); 471 } 472 473 /* 474 * Special Variant of smp_call_function for use by TLB functions: 475 * 476 * o No return value 477 * o collapses to normal function call on UP kernels 478 * o collapses to normal function call on systems with a single shared 479 * primary cache. 480 */ 481 static inline void smp_on_other_tlbs(void (*func) (void *info), void *info) 482 { 483 smp_call_function(func, info, 1); 484 } 485 486 static inline void smp_on_each_tlb(void (*func) (void *info), void *info) 487 { 488 preempt_disable(); 489 490 smp_on_other_tlbs(func, info); 491 func(info); 492 493 preempt_enable(); 494 } 495 496 /* 497 * The following tlb flush calls are invoked when old translations are 498 * being torn down, or pte attributes are changing. For single threaded 499 * address spaces, a new context is obtained on the current cpu, and tlb 500 * context on other cpus are invalidated to force a new context allocation 501 * at switch_mm time, should the mm ever be used on other cpus. For 502 * multithreaded address spaces, intercpu interrupts have to be sent. 503 * Another case where intercpu interrupts are required is when the target 504 * mm might be active on another cpu (eg debuggers doing the flushes on 505 * behalf of debugees, kswapd stealing pages from another process etc). 506 * Kanoj 07/00. 507 */ 508 509 void flush_tlb_mm(struct mm_struct *mm) 510 { 511 preempt_disable(); 512 513 if ((atomic_read(&mm->mm_users) != 1) || (current->mm != mm)) { 514 smp_on_other_tlbs(flush_tlb_mm_ipi, mm); 515 } else { 516 unsigned int cpu; 517 518 for_each_online_cpu(cpu) { 519 if (cpu != smp_processor_id() && cpu_context(cpu, mm)) 520 cpu_context(cpu, mm) = 0; 521 } 522 } 523 local_flush_tlb_mm(mm); 524 525 preempt_enable(); 526 } 527 528 struct flush_tlb_data { 529 struct vm_area_struct *vma; 530 unsigned long addr1; 531 unsigned long addr2; 532 }; 533 534 static void flush_tlb_range_ipi(void *info) 535 { 536 struct flush_tlb_data *fd = info; 537 538 local_flush_tlb_range(fd->vma, fd->addr1, fd->addr2); 539 } 540 541 void flush_tlb_range(struct vm_area_struct *vma, unsigned long start, unsigned long end) 542 { 543 struct mm_struct *mm = vma->vm_mm; 544 545 preempt_disable(); 546 if ((atomic_read(&mm->mm_users) != 1) || (current->mm != mm)) { 547 struct flush_tlb_data fd = { 548 .vma = vma, 549 .addr1 = start, 550 .addr2 = end, 551 }; 552 553 smp_on_other_tlbs(flush_tlb_range_ipi, &fd); 554 } else { 555 unsigned int cpu; 556 int exec = vma->vm_flags & VM_EXEC; 557 558 for_each_online_cpu(cpu) { 559 /* 560 * flush_cache_range() will only fully flush icache if 561 * the VMA is executable, otherwise we must invalidate 562 * ASID without it appearing to has_valid_asid() as if 563 * mm has been completely unused by that CPU. 564 */ 565 if (cpu != smp_processor_id() && cpu_context(cpu, mm)) 566 cpu_context(cpu, mm) = !exec; 567 } 568 } 569 local_flush_tlb_range(vma, start, end); 570 preempt_enable(); 571 } 572 573 static void flush_tlb_kernel_range_ipi(void *info) 574 { 575 struct flush_tlb_data *fd = info; 576 577 local_flush_tlb_kernel_range(fd->addr1, fd->addr2); 578 } 579 580 void flush_tlb_kernel_range(unsigned long start, unsigned long end) 581 { 582 struct flush_tlb_data fd = { 583 .addr1 = start, 584 .addr2 = end, 585 }; 586 587 on_each_cpu(flush_tlb_kernel_range_ipi, &fd, 1); 588 } 589 590 static void flush_tlb_page_ipi(void *info) 591 { 592 struct flush_tlb_data *fd = info; 593 594 local_flush_tlb_page(fd->vma, fd->addr1); 595 } 596 597 void flush_tlb_page(struct vm_area_struct *vma, unsigned long page) 598 { 599 preempt_disable(); 600 if ((atomic_read(&vma->vm_mm->mm_users) != 1) || (current->mm != vma->vm_mm)) { 601 struct flush_tlb_data fd = { 602 .vma = vma, 603 .addr1 = page, 604 }; 605 606 smp_on_other_tlbs(flush_tlb_page_ipi, &fd); 607 } else { 608 unsigned int cpu; 609 610 for_each_online_cpu(cpu) { 611 /* 612 * flush_cache_page() only does partial flushes, so 613 * invalidate ASID without it appearing to 614 * has_valid_asid() as if mm has been completely unused 615 * by that CPU. 616 */ 617 if (cpu != smp_processor_id() && cpu_context(cpu, vma->vm_mm)) 618 cpu_context(cpu, vma->vm_mm) = 1; 619 } 620 } 621 local_flush_tlb_page(vma, page); 622 preempt_enable(); 623 } 624 625 static void flush_tlb_one_ipi(void *info) 626 { 627 unsigned long vaddr = (unsigned long) info; 628 629 local_flush_tlb_one(vaddr); 630 } 631 632 void flush_tlb_one(unsigned long vaddr) 633 { 634 smp_on_each_tlb(flush_tlb_one_ipi, (void *) vaddr); 635 } 636 637 EXPORT_SYMBOL(flush_tlb_page); 638 EXPORT_SYMBOL(flush_tlb_one); 639 640 #if defined(CONFIG_KEXEC) 641 void (*dump_ipi_function_ptr)(void *) = NULL; 642 void dump_send_ipi(void (*dump_ipi_callback)(void *)) 643 { 644 int i; 645 int cpu = smp_processor_id(); 646 647 dump_ipi_function_ptr = dump_ipi_callback; 648 smp_mb(); 649 for_each_online_cpu(i) 650 if (i != cpu) 651 mp_ops->send_ipi_single(i, SMP_DUMP); 652 653 } 654 EXPORT_SYMBOL(dump_send_ipi); 655 #endif 656 657 #ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST 658 659 static DEFINE_PER_CPU(atomic_t, tick_broadcast_count); 660 static DEFINE_PER_CPU(struct call_single_data, tick_broadcast_csd); 661 662 void tick_broadcast(const struct cpumask *mask) 663 { 664 atomic_t *count; 665 struct call_single_data *csd; 666 int cpu; 667 668 for_each_cpu(cpu, mask) { 669 count = &per_cpu(tick_broadcast_count, cpu); 670 csd = &per_cpu(tick_broadcast_csd, cpu); 671 672 if (atomic_inc_return(count) == 1) 673 smp_call_function_single_async(cpu, csd); 674 } 675 } 676 677 static void tick_broadcast_callee(void *info) 678 { 679 int cpu = smp_processor_id(); 680 tick_receive_broadcast(); 681 atomic_set(&per_cpu(tick_broadcast_count, cpu), 0); 682 } 683 684 static int __init tick_broadcast_init(void) 685 { 686 struct call_single_data *csd; 687 int cpu; 688 689 for (cpu = 0; cpu < NR_CPUS; cpu++) { 690 csd = &per_cpu(tick_broadcast_csd, cpu); 691 csd->func = tick_broadcast_callee; 692 } 693 694 return 0; 695 } 696 early_initcall(tick_broadcast_init); 697 698 #endif /* CONFIG_GENERIC_CLOCKEVENTS_BROADCAST */ 699