1 /* smp.c: Sparc64 SMP support. 2 * 3 * Copyright (C) 1997, 2007, 2008 David S. Miller (davem@davemloft.net) 4 */ 5 6 #include <linux/module.h> 7 #include <linux/kernel.h> 8 #include <linux/sched.h> 9 #include <linux/mm.h> 10 #include <linux/pagemap.h> 11 #include <linux/threads.h> 12 #include <linux/smp.h> 13 #include <linux/interrupt.h> 14 #include <linux/kernel_stat.h> 15 #include <linux/delay.h> 16 #include <linux/init.h> 17 #include <linux/spinlock.h> 18 #include <linux/fs.h> 19 #include <linux/seq_file.h> 20 #include <linux/cache.h> 21 #include <linux/jiffies.h> 22 #include <linux/profile.h> 23 #include <linux/lmb.h> 24 #include <linux/cpu.h> 25 26 #include <asm/head.h> 27 #include <asm/ptrace.h> 28 #include <asm/atomic.h> 29 #include <asm/tlbflush.h> 30 #include <asm/mmu_context.h> 31 #include <asm/cpudata.h> 32 #include <asm/hvtramp.h> 33 #include <asm/io.h> 34 #include <asm/timer.h> 35 36 #include <asm/irq.h> 37 #include <asm/irq_regs.h> 38 #include <asm/page.h> 39 #include <asm/pgtable.h> 40 #include <asm/oplib.h> 41 #include <asm/uaccess.h> 42 #include <asm/starfire.h> 43 #include <asm/tlb.h> 44 #include <asm/sections.h> 45 #include <asm/prom.h> 46 #include <asm/mdesc.h> 47 #include <asm/ldc.h> 48 #include <asm/hypervisor.h> 49 50 int sparc64_multi_core __read_mostly; 51 52 DEFINE_PER_CPU(cpumask_t, cpu_sibling_map) = CPU_MASK_NONE; 53 cpumask_t cpu_core_map[NR_CPUS] __read_mostly = 54 { [0 ... NR_CPUS-1] = CPU_MASK_NONE }; 55 56 EXPORT_PER_CPU_SYMBOL(cpu_sibling_map); 57 EXPORT_SYMBOL(cpu_core_map); 58 59 static cpumask_t smp_commenced_mask; 60 61 void smp_info(struct seq_file *m) 62 { 63 int i; 64 65 seq_printf(m, "State:\n"); 66 for_each_online_cpu(i) 67 seq_printf(m, "CPU%d:\t\tonline\n", i); 68 } 69 70 void smp_bogo(struct seq_file *m) 71 { 72 int i; 73 74 for_each_online_cpu(i) 75 seq_printf(m, 76 "Cpu%dClkTck\t: %016lx\n", 77 i, cpu_data(i).clock_tick); 78 } 79 80 extern void setup_sparc64_timer(void); 81 82 static volatile unsigned long callin_flag = 0; 83 84 void __cpuinit smp_callin(void) 85 { 86 int cpuid = hard_smp_processor_id(); 87 88 __local_per_cpu_offset = __per_cpu_offset(cpuid); 89 90 if (tlb_type == hypervisor) 91 sun4v_ktsb_register(); 92 93 __flush_tlb_all(); 94 95 setup_sparc64_timer(); 96 97 if (cheetah_pcache_forced_on) 98 cheetah_enable_pcache(); 99 100 local_irq_enable(); 101 102 callin_flag = 1; 103 __asm__ __volatile__("membar #Sync\n\t" 104 "flush %%g6" : : : "memory"); 105 106 /* Clear this or we will die instantly when we 107 * schedule back to this idler... 108 */ 109 current_thread_info()->new_child = 0; 110 111 /* Attach to the address space of init_task. */ 112 atomic_inc(&init_mm.mm_count); 113 current->active_mm = &init_mm; 114 115 /* inform the notifiers about the new cpu */ 116 notify_cpu_starting(cpuid); 117 118 while (!cpu_isset(cpuid, smp_commenced_mask)) 119 rmb(); 120 121 ipi_call_lock(); 122 cpu_set(cpuid, cpu_online_map); 123 ipi_call_unlock(); 124 125 /* idle thread is expected to have preempt disabled */ 126 preempt_disable(); 127 } 128 129 void cpu_panic(void) 130 { 131 printk("CPU[%d]: Returns from cpu_idle!\n", smp_processor_id()); 132 panic("SMP bolixed\n"); 133 } 134 135 /* This tick register synchronization scheme is taken entirely from 136 * the ia64 port, see arch/ia64/kernel/smpboot.c for details and credit. 137 * 138 * The only change I've made is to rework it so that the master 139 * initiates the synchonization instead of the slave. -DaveM 140 */ 141 142 #define MASTER 0 143 #define SLAVE (SMP_CACHE_BYTES/sizeof(unsigned long)) 144 145 #define NUM_ROUNDS 64 /* magic value */ 146 #define NUM_ITERS 5 /* likewise */ 147 148 static DEFINE_SPINLOCK(itc_sync_lock); 149 static unsigned long go[SLAVE + 1]; 150 151 #define DEBUG_TICK_SYNC 0 152 153 static inline long get_delta (long *rt, long *master) 154 { 155 unsigned long best_t0 = 0, best_t1 = ~0UL, best_tm = 0; 156 unsigned long tcenter, t0, t1, tm; 157 unsigned long i; 158 159 for (i = 0; i < NUM_ITERS; i++) { 160 t0 = tick_ops->get_tick(); 161 go[MASTER] = 1; 162 membar_safe("#StoreLoad"); 163 while (!(tm = go[SLAVE])) 164 rmb(); 165 go[SLAVE] = 0; 166 wmb(); 167 t1 = tick_ops->get_tick(); 168 169 if (t1 - t0 < best_t1 - best_t0) 170 best_t0 = t0, best_t1 = t1, best_tm = tm; 171 } 172 173 *rt = best_t1 - best_t0; 174 *master = best_tm - best_t0; 175 176 /* average best_t0 and best_t1 without overflow: */ 177 tcenter = (best_t0/2 + best_t1/2); 178 if (best_t0 % 2 + best_t1 % 2 == 2) 179 tcenter++; 180 return tcenter - best_tm; 181 } 182 183 void smp_synchronize_tick_client(void) 184 { 185 long i, delta, adj, adjust_latency = 0, done = 0; 186 unsigned long flags, rt, master_time_stamp, bound; 187 #if DEBUG_TICK_SYNC 188 struct { 189 long rt; /* roundtrip time */ 190 long master; /* master's timestamp */ 191 long diff; /* difference between midpoint and master's timestamp */ 192 long lat; /* estimate of itc adjustment latency */ 193 } t[NUM_ROUNDS]; 194 #endif 195 196 go[MASTER] = 1; 197 198 while (go[MASTER]) 199 rmb(); 200 201 local_irq_save(flags); 202 { 203 for (i = 0; i < NUM_ROUNDS; i++) { 204 delta = get_delta(&rt, &master_time_stamp); 205 if (delta == 0) { 206 done = 1; /* let's lock on to this... */ 207 bound = rt; 208 } 209 210 if (!done) { 211 if (i > 0) { 212 adjust_latency += -delta; 213 adj = -delta + adjust_latency/4; 214 } else 215 adj = -delta; 216 217 tick_ops->add_tick(adj); 218 } 219 #if DEBUG_TICK_SYNC 220 t[i].rt = rt; 221 t[i].master = master_time_stamp; 222 t[i].diff = delta; 223 t[i].lat = adjust_latency/4; 224 #endif 225 } 226 } 227 local_irq_restore(flags); 228 229 #if DEBUG_TICK_SYNC 230 for (i = 0; i < NUM_ROUNDS; i++) 231 printk("rt=%5ld master=%5ld diff=%5ld adjlat=%5ld\n", 232 t[i].rt, t[i].master, t[i].diff, t[i].lat); 233 #endif 234 235 printk(KERN_INFO "CPU %d: synchronized TICK with master CPU " 236 "(last diff %ld cycles, maxerr %lu cycles)\n", 237 smp_processor_id(), delta, rt); 238 } 239 240 static void smp_start_sync_tick_client(int cpu); 241 242 static void smp_synchronize_one_tick(int cpu) 243 { 244 unsigned long flags, i; 245 246 go[MASTER] = 0; 247 248 smp_start_sync_tick_client(cpu); 249 250 /* wait for client to be ready */ 251 while (!go[MASTER]) 252 rmb(); 253 254 /* now let the client proceed into his loop */ 255 go[MASTER] = 0; 256 membar_safe("#StoreLoad"); 257 258 spin_lock_irqsave(&itc_sync_lock, flags); 259 { 260 for (i = 0; i < NUM_ROUNDS*NUM_ITERS; i++) { 261 while (!go[MASTER]) 262 rmb(); 263 go[MASTER] = 0; 264 wmb(); 265 go[SLAVE] = tick_ops->get_tick(); 266 membar_safe("#StoreLoad"); 267 } 268 } 269 spin_unlock_irqrestore(&itc_sync_lock, flags); 270 } 271 272 #if defined(CONFIG_SUN_LDOMS) && defined(CONFIG_HOTPLUG_CPU) 273 /* XXX Put this in some common place. XXX */ 274 static unsigned long kimage_addr_to_ra(void *p) 275 { 276 unsigned long val = (unsigned long) p; 277 278 return kern_base + (val - KERNBASE); 279 } 280 281 static void __cpuinit ldom_startcpu_cpuid(unsigned int cpu, unsigned long thread_reg) 282 { 283 extern unsigned long sparc64_ttable_tl0; 284 extern unsigned long kern_locked_tte_data; 285 struct hvtramp_descr *hdesc; 286 unsigned long trampoline_ra; 287 struct trap_per_cpu *tb; 288 u64 tte_vaddr, tte_data; 289 unsigned long hv_err; 290 int i; 291 292 hdesc = kzalloc(sizeof(*hdesc) + 293 (sizeof(struct hvtramp_mapping) * 294 num_kernel_image_mappings - 1), 295 GFP_KERNEL); 296 if (!hdesc) { 297 printk(KERN_ERR "ldom_startcpu_cpuid: Cannot allocate " 298 "hvtramp_descr.\n"); 299 return; 300 } 301 302 hdesc->cpu = cpu; 303 hdesc->num_mappings = num_kernel_image_mappings; 304 305 tb = &trap_block[cpu]; 306 tb->hdesc = hdesc; 307 308 hdesc->fault_info_va = (unsigned long) &tb->fault_info; 309 hdesc->fault_info_pa = kimage_addr_to_ra(&tb->fault_info); 310 311 hdesc->thread_reg = thread_reg; 312 313 tte_vaddr = (unsigned long) KERNBASE; 314 tte_data = kern_locked_tte_data; 315 316 for (i = 0; i < hdesc->num_mappings; i++) { 317 hdesc->maps[i].vaddr = tte_vaddr; 318 hdesc->maps[i].tte = tte_data; 319 tte_vaddr += 0x400000; 320 tte_data += 0x400000; 321 } 322 323 trampoline_ra = kimage_addr_to_ra(hv_cpu_startup); 324 325 hv_err = sun4v_cpu_start(cpu, trampoline_ra, 326 kimage_addr_to_ra(&sparc64_ttable_tl0), 327 __pa(hdesc)); 328 if (hv_err) 329 printk(KERN_ERR "ldom_startcpu_cpuid: sun4v_cpu_start() " 330 "gives error %lu\n", hv_err); 331 } 332 #endif 333 334 extern unsigned long sparc64_cpu_startup; 335 336 /* The OBP cpu startup callback truncates the 3rd arg cookie to 337 * 32-bits (I think) so to be safe we have it read the pointer 338 * contained here so we work on >4GB machines. -DaveM 339 */ 340 static struct thread_info *cpu_new_thread = NULL; 341 342 static int __cpuinit smp_boot_one_cpu(unsigned int cpu) 343 { 344 struct trap_per_cpu *tb = &trap_block[cpu]; 345 unsigned long entry = 346 (unsigned long)(&sparc64_cpu_startup); 347 unsigned long cookie = 348 (unsigned long)(&cpu_new_thread); 349 struct task_struct *p; 350 int timeout, ret; 351 352 p = fork_idle(cpu); 353 if (IS_ERR(p)) 354 return PTR_ERR(p); 355 callin_flag = 0; 356 cpu_new_thread = task_thread_info(p); 357 358 if (tlb_type == hypervisor) { 359 #if defined(CONFIG_SUN_LDOMS) && defined(CONFIG_HOTPLUG_CPU) 360 if (ldom_domaining_enabled) 361 ldom_startcpu_cpuid(cpu, 362 (unsigned long) cpu_new_thread); 363 else 364 #endif 365 prom_startcpu_cpuid(cpu, entry, cookie); 366 } else { 367 struct device_node *dp = of_find_node_by_cpuid(cpu); 368 369 prom_startcpu(dp->node, entry, cookie); 370 } 371 372 for (timeout = 0; timeout < 50000; timeout++) { 373 if (callin_flag) 374 break; 375 udelay(100); 376 } 377 378 if (callin_flag) { 379 ret = 0; 380 } else { 381 printk("Processor %d is stuck.\n", cpu); 382 ret = -ENODEV; 383 } 384 cpu_new_thread = NULL; 385 386 if (tb->hdesc) { 387 kfree(tb->hdesc); 388 tb->hdesc = NULL; 389 } 390 391 return ret; 392 } 393 394 static void spitfire_xcall_helper(u64 data0, u64 data1, u64 data2, u64 pstate, unsigned long cpu) 395 { 396 u64 result, target; 397 int stuck, tmp; 398 399 if (this_is_starfire) { 400 /* map to real upaid */ 401 cpu = (((cpu & 0x3c) << 1) | 402 ((cpu & 0x40) >> 4) | 403 (cpu & 0x3)); 404 } 405 406 target = (cpu << 14) | 0x70; 407 again: 408 /* Ok, this is the real Spitfire Errata #54. 409 * One must read back from a UDB internal register 410 * after writes to the UDB interrupt dispatch, but 411 * before the membar Sync for that write. 412 * So we use the high UDB control register (ASI 0x7f, 413 * ADDR 0x20) for the dummy read. -DaveM 414 */ 415 tmp = 0x40; 416 __asm__ __volatile__( 417 "wrpr %1, %2, %%pstate\n\t" 418 "stxa %4, [%0] %3\n\t" 419 "stxa %5, [%0+%8] %3\n\t" 420 "add %0, %8, %0\n\t" 421 "stxa %6, [%0+%8] %3\n\t" 422 "membar #Sync\n\t" 423 "stxa %%g0, [%7] %3\n\t" 424 "membar #Sync\n\t" 425 "mov 0x20, %%g1\n\t" 426 "ldxa [%%g1] 0x7f, %%g0\n\t" 427 "membar #Sync" 428 : "=r" (tmp) 429 : "r" (pstate), "i" (PSTATE_IE), "i" (ASI_INTR_W), 430 "r" (data0), "r" (data1), "r" (data2), "r" (target), 431 "r" (0x10), "0" (tmp) 432 : "g1"); 433 434 /* NOTE: PSTATE_IE is still clear. */ 435 stuck = 100000; 436 do { 437 __asm__ __volatile__("ldxa [%%g0] %1, %0" 438 : "=r" (result) 439 : "i" (ASI_INTR_DISPATCH_STAT)); 440 if (result == 0) { 441 __asm__ __volatile__("wrpr %0, 0x0, %%pstate" 442 : : "r" (pstate)); 443 return; 444 } 445 stuck -= 1; 446 if (stuck == 0) 447 break; 448 } while (result & 0x1); 449 __asm__ __volatile__("wrpr %0, 0x0, %%pstate" 450 : : "r" (pstate)); 451 if (stuck == 0) { 452 printk("CPU[%d]: mondo stuckage result[%016llx]\n", 453 smp_processor_id(), result); 454 } else { 455 udelay(2); 456 goto again; 457 } 458 } 459 460 static void spitfire_xcall_deliver(struct trap_per_cpu *tb, int cnt) 461 { 462 u64 *mondo, data0, data1, data2; 463 u16 *cpu_list; 464 u64 pstate; 465 int i; 466 467 __asm__ __volatile__("rdpr %%pstate, %0" : "=r" (pstate)); 468 cpu_list = __va(tb->cpu_list_pa); 469 mondo = __va(tb->cpu_mondo_block_pa); 470 data0 = mondo[0]; 471 data1 = mondo[1]; 472 data2 = mondo[2]; 473 for (i = 0; i < cnt; i++) 474 spitfire_xcall_helper(data0, data1, data2, pstate, cpu_list[i]); 475 } 476 477 /* Cheetah now allows to send the whole 64-bytes of data in the interrupt 478 * packet, but we have no use for that. However we do take advantage of 479 * the new pipelining feature (ie. dispatch to multiple cpus simultaneously). 480 */ 481 static void cheetah_xcall_deliver(struct trap_per_cpu *tb, int cnt) 482 { 483 int nack_busy_id, is_jbus, need_more; 484 u64 *mondo, pstate, ver, busy_mask; 485 u16 *cpu_list; 486 487 cpu_list = __va(tb->cpu_list_pa); 488 mondo = __va(tb->cpu_mondo_block_pa); 489 490 /* Unfortunately, someone at Sun had the brilliant idea to make the 491 * busy/nack fields hard-coded by ITID number for this Ultra-III 492 * derivative processor. 493 */ 494 __asm__ ("rdpr %%ver, %0" : "=r" (ver)); 495 is_jbus = ((ver >> 32) == __JALAPENO_ID || 496 (ver >> 32) == __SERRANO_ID); 497 498 __asm__ __volatile__("rdpr %%pstate, %0" : "=r" (pstate)); 499 500 retry: 501 need_more = 0; 502 __asm__ __volatile__("wrpr %0, %1, %%pstate\n\t" 503 : : "r" (pstate), "i" (PSTATE_IE)); 504 505 /* Setup the dispatch data registers. */ 506 __asm__ __volatile__("stxa %0, [%3] %6\n\t" 507 "stxa %1, [%4] %6\n\t" 508 "stxa %2, [%5] %6\n\t" 509 "membar #Sync\n\t" 510 : /* no outputs */ 511 : "r" (mondo[0]), "r" (mondo[1]), "r" (mondo[2]), 512 "r" (0x40), "r" (0x50), "r" (0x60), 513 "i" (ASI_INTR_W)); 514 515 nack_busy_id = 0; 516 busy_mask = 0; 517 { 518 int i; 519 520 for (i = 0; i < cnt; i++) { 521 u64 target, nr; 522 523 nr = cpu_list[i]; 524 if (nr == 0xffff) 525 continue; 526 527 target = (nr << 14) | 0x70; 528 if (is_jbus) { 529 busy_mask |= (0x1UL << (nr * 2)); 530 } else { 531 target |= (nack_busy_id << 24); 532 busy_mask |= (0x1UL << 533 (nack_busy_id * 2)); 534 } 535 __asm__ __volatile__( 536 "stxa %%g0, [%0] %1\n\t" 537 "membar #Sync\n\t" 538 : /* no outputs */ 539 : "r" (target), "i" (ASI_INTR_W)); 540 nack_busy_id++; 541 if (nack_busy_id == 32) { 542 need_more = 1; 543 break; 544 } 545 } 546 } 547 548 /* Now, poll for completion. */ 549 { 550 u64 dispatch_stat, nack_mask; 551 long stuck; 552 553 stuck = 100000 * nack_busy_id; 554 nack_mask = busy_mask << 1; 555 do { 556 __asm__ __volatile__("ldxa [%%g0] %1, %0" 557 : "=r" (dispatch_stat) 558 : "i" (ASI_INTR_DISPATCH_STAT)); 559 if (!(dispatch_stat & (busy_mask | nack_mask))) { 560 __asm__ __volatile__("wrpr %0, 0x0, %%pstate" 561 : : "r" (pstate)); 562 if (unlikely(need_more)) { 563 int i, this_cnt = 0; 564 for (i = 0; i < cnt; i++) { 565 if (cpu_list[i] == 0xffff) 566 continue; 567 cpu_list[i] = 0xffff; 568 this_cnt++; 569 if (this_cnt == 32) 570 break; 571 } 572 goto retry; 573 } 574 return; 575 } 576 if (!--stuck) 577 break; 578 } while (dispatch_stat & busy_mask); 579 580 __asm__ __volatile__("wrpr %0, 0x0, %%pstate" 581 : : "r" (pstate)); 582 583 if (dispatch_stat & busy_mask) { 584 /* Busy bits will not clear, continue instead 585 * of freezing up on this cpu. 586 */ 587 printk("CPU[%d]: mondo stuckage result[%016llx]\n", 588 smp_processor_id(), dispatch_stat); 589 } else { 590 int i, this_busy_nack = 0; 591 592 /* Delay some random time with interrupts enabled 593 * to prevent deadlock. 594 */ 595 udelay(2 * nack_busy_id); 596 597 /* Clear out the mask bits for cpus which did not 598 * NACK us. 599 */ 600 for (i = 0; i < cnt; i++) { 601 u64 check_mask, nr; 602 603 nr = cpu_list[i]; 604 if (nr == 0xffff) 605 continue; 606 607 if (is_jbus) 608 check_mask = (0x2UL << (2*nr)); 609 else 610 check_mask = (0x2UL << 611 this_busy_nack); 612 if ((dispatch_stat & check_mask) == 0) 613 cpu_list[i] = 0xffff; 614 this_busy_nack += 2; 615 if (this_busy_nack == 64) 616 break; 617 } 618 619 goto retry; 620 } 621 } 622 } 623 624 /* Multi-cpu list version. */ 625 static void hypervisor_xcall_deliver(struct trap_per_cpu *tb, int cnt) 626 { 627 int retries, this_cpu, prev_sent, i, saw_cpu_error; 628 unsigned long status; 629 u16 *cpu_list; 630 631 this_cpu = smp_processor_id(); 632 633 cpu_list = __va(tb->cpu_list_pa); 634 635 saw_cpu_error = 0; 636 retries = 0; 637 prev_sent = 0; 638 do { 639 int forward_progress, n_sent; 640 641 status = sun4v_cpu_mondo_send(cnt, 642 tb->cpu_list_pa, 643 tb->cpu_mondo_block_pa); 644 645 /* HV_EOK means all cpus received the xcall, we're done. */ 646 if (likely(status == HV_EOK)) 647 break; 648 649 /* First, see if we made any forward progress. 650 * 651 * The hypervisor indicates successful sends by setting 652 * cpu list entries to the value 0xffff. 653 */ 654 n_sent = 0; 655 for (i = 0; i < cnt; i++) { 656 if (likely(cpu_list[i] == 0xffff)) 657 n_sent++; 658 } 659 660 forward_progress = 0; 661 if (n_sent > prev_sent) 662 forward_progress = 1; 663 664 prev_sent = n_sent; 665 666 /* If we get a HV_ECPUERROR, then one or more of the cpus 667 * in the list are in error state. Use the cpu_state() 668 * hypervisor call to find out which cpus are in error state. 669 */ 670 if (unlikely(status == HV_ECPUERROR)) { 671 for (i = 0; i < cnt; i++) { 672 long err; 673 u16 cpu; 674 675 cpu = cpu_list[i]; 676 if (cpu == 0xffff) 677 continue; 678 679 err = sun4v_cpu_state(cpu); 680 if (err == HV_CPU_STATE_ERROR) { 681 saw_cpu_error = (cpu + 1); 682 cpu_list[i] = 0xffff; 683 } 684 } 685 } else if (unlikely(status != HV_EWOULDBLOCK)) 686 goto fatal_mondo_error; 687 688 /* Don't bother rewriting the CPU list, just leave the 689 * 0xffff and non-0xffff entries in there and the 690 * hypervisor will do the right thing. 691 * 692 * Only advance timeout state if we didn't make any 693 * forward progress. 694 */ 695 if (unlikely(!forward_progress)) { 696 if (unlikely(++retries > 10000)) 697 goto fatal_mondo_timeout; 698 699 /* Delay a little bit to let other cpus catch up 700 * on their cpu mondo queue work. 701 */ 702 udelay(2 * cnt); 703 } 704 } while (1); 705 706 if (unlikely(saw_cpu_error)) 707 goto fatal_mondo_cpu_error; 708 709 return; 710 711 fatal_mondo_cpu_error: 712 printk(KERN_CRIT "CPU[%d]: SUN4V mondo cpu error, some target cpus " 713 "(including %d) were in error state\n", 714 this_cpu, saw_cpu_error - 1); 715 return; 716 717 fatal_mondo_timeout: 718 printk(KERN_CRIT "CPU[%d]: SUN4V mondo timeout, no forward " 719 " progress after %d retries.\n", 720 this_cpu, retries); 721 goto dump_cpu_list_and_out; 722 723 fatal_mondo_error: 724 printk(KERN_CRIT "CPU[%d]: Unexpected SUN4V mondo error %lu\n", 725 this_cpu, status); 726 printk(KERN_CRIT "CPU[%d]: Args were cnt(%d) cpulist_pa(%lx) " 727 "mondo_block_pa(%lx)\n", 728 this_cpu, cnt, tb->cpu_list_pa, tb->cpu_mondo_block_pa); 729 730 dump_cpu_list_and_out: 731 printk(KERN_CRIT "CPU[%d]: CPU list [ ", this_cpu); 732 for (i = 0; i < cnt; i++) 733 printk("%u ", cpu_list[i]); 734 printk("]\n"); 735 } 736 737 static void (*xcall_deliver_impl)(struct trap_per_cpu *, int); 738 739 static void xcall_deliver(u64 data0, u64 data1, u64 data2, const cpumask_t *mask) 740 { 741 struct trap_per_cpu *tb; 742 int this_cpu, i, cnt; 743 unsigned long flags; 744 u16 *cpu_list; 745 u64 *mondo; 746 747 /* We have to do this whole thing with interrupts fully disabled. 748 * Otherwise if we send an xcall from interrupt context it will 749 * corrupt both our mondo block and cpu list state. 750 * 751 * One consequence of this is that we cannot use timeout mechanisms 752 * that depend upon interrupts being delivered locally. So, for 753 * example, we cannot sample jiffies and expect it to advance. 754 * 755 * Fortunately, udelay() uses %stick/%tick so we can use that. 756 */ 757 local_irq_save(flags); 758 759 this_cpu = smp_processor_id(); 760 tb = &trap_block[this_cpu]; 761 762 mondo = __va(tb->cpu_mondo_block_pa); 763 mondo[0] = data0; 764 mondo[1] = data1; 765 mondo[2] = data2; 766 wmb(); 767 768 cpu_list = __va(tb->cpu_list_pa); 769 770 /* Setup the initial cpu list. */ 771 cnt = 0; 772 for_each_cpu(i, mask) { 773 if (i == this_cpu || !cpu_online(i)) 774 continue; 775 cpu_list[cnt++] = i; 776 } 777 778 if (cnt) 779 xcall_deliver_impl(tb, cnt); 780 781 local_irq_restore(flags); 782 } 783 784 /* Send cross call to all processors mentioned in MASK_P 785 * except self. Really, there are only two cases currently, 786 * "&cpu_online_map" and "&mm->cpu_vm_mask". 787 */ 788 static void smp_cross_call_masked(unsigned long *func, u32 ctx, u64 data1, u64 data2, const cpumask_t *mask) 789 { 790 u64 data0 = (((u64)ctx)<<32 | (((u64)func) & 0xffffffff)); 791 792 xcall_deliver(data0, data1, data2, mask); 793 } 794 795 /* Send cross call to all processors except self. */ 796 static void smp_cross_call(unsigned long *func, u32 ctx, u64 data1, u64 data2) 797 { 798 smp_cross_call_masked(func, ctx, data1, data2, &cpu_online_map); 799 } 800 801 extern unsigned long xcall_sync_tick; 802 803 static void smp_start_sync_tick_client(int cpu) 804 { 805 xcall_deliver((u64) &xcall_sync_tick, 0, 0, 806 &cpumask_of_cpu(cpu)); 807 } 808 809 extern unsigned long xcall_call_function; 810 811 void arch_send_call_function_ipi_mask(const struct cpumask *mask) 812 { 813 xcall_deliver((u64) &xcall_call_function, 0, 0, mask); 814 } 815 816 extern unsigned long xcall_call_function_single; 817 818 void arch_send_call_function_single_ipi(int cpu) 819 { 820 xcall_deliver((u64) &xcall_call_function_single, 0, 0, 821 &cpumask_of_cpu(cpu)); 822 } 823 824 void smp_call_function_client(int irq, struct pt_regs *regs) 825 { 826 clear_softint(1 << irq); 827 generic_smp_call_function_interrupt(); 828 } 829 830 void smp_call_function_single_client(int irq, struct pt_regs *regs) 831 { 832 clear_softint(1 << irq); 833 generic_smp_call_function_single_interrupt(); 834 } 835 836 static void tsb_sync(void *info) 837 { 838 struct trap_per_cpu *tp = &trap_block[raw_smp_processor_id()]; 839 struct mm_struct *mm = info; 840 841 /* It is not valid to test "currrent->active_mm == mm" here. 842 * 843 * The value of "current" is not changed atomically with 844 * switch_mm(). But that's OK, we just need to check the 845 * current cpu's trap block PGD physical address. 846 */ 847 if (tp->pgd_paddr == __pa(mm->pgd)) 848 tsb_context_switch(mm); 849 } 850 851 void smp_tsb_sync(struct mm_struct *mm) 852 { 853 smp_call_function_many(mm_cpumask(mm), tsb_sync, mm, 1); 854 } 855 856 extern unsigned long xcall_flush_tlb_mm; 857 extern unsigned long xcall_flush_tlb_pending; 858 extern unsigned long xcall_flush_tlb_kernel_range; 859 extern unsigned long xcall_fetch_glob_regs; 860 extern unsigned long xcall_receive_signal; 861 extern unsigned long xcall_new_mmu_context_version; 862 #ifdef CONFIG_KGDB 863 extern unsigned long xcall_kgdb_capture; 864 #endif 865 866 #ifdef DCACHE_ALIASING_POSSIBLE 867 extern unsigned long xcall_flush_dcache_page_cheetah; 868 #endif 869 extern unsigned long xcall_flush_dcache_page_spitfire; 870 871 #ifdef CONFIG_DEBUG_DCFLUSH 872 extern atomic_t dcpage_flushes; 873 extern atomic_t dcpage_flushes_xcall; 874 #endif 875 876 static inline void __local_flush_dcache_page(struct page *page) 877 { 878 #ifdef DCACHE_ALIASING_POSSIBLE 879 __flush_dcache_page(page_address(page), 880 ((tlb_type == spitfire) && 881 page_mapping(page) != NULL)); 882 #else 883 if (page_mapping(page) != NULL && 884 tlb_type == spitfire) 885 __flush_icache_page(__pa(page_address(page))); 886 #endif 887 } 888 889 void smp_flush_dcache_page_impl(struct page *page, int cpu) 890 { 891 int this_cpu; 892 893 if (tlb_type == hypervisor) 894 return; 895 896 #ifdef CONFIG_DEBUG_DCFLUSH 897 atomic_inc(&dcpage_flushes); 898 #endif 899 900 this_cpu = get_cpu(); 901 902 if (cpu == this_cpu) { 903 __local_flush_dcache_page(page); 904 } else if (cpu_online(cpu)) { 905 void *pg_addr = page_address(page); 906 u64 data0 = 0; 907 908 if (tlb_type == spitfire) { 909 data0 = ((u64)&xcall_flush_dcache_page_spitfire); 910 if (page_mapping(page) != NULL) 911 data0 |= ((u64)1 << 32); 912 } else if (tlb_type == cheetah || tlb_type == cheetah_plus) { 913 #ifdef DCACHE_ALIASING_POSSIBLE 914 data0 = ((u64)&xcall_flush_dcache_page_cheetah); 915 #endif 916 } 917 if (data0) { 918 xcall_deliver(data0, __pa(pg_addr), 919 (u64) pg_addr, &cpumask_of_cpu(cpu)); 920 #ifdef CONFIG_DEBUG_DCFLUSH 921 atomic_inc(&dcpage_flushes_xcall); 922 #endif 923 } 924 } 925 926 put_cpu(); 927 } 928 929 void flush_dcache_page_all(struct mm_struct *mm, struct page *page) 930 { 931 void *pg_addr; 932 int this_cpu; 933 u64 data0; 934 935 if (tlb_type == hypervisor) 936 return; 937 938 this_cpu = get_cpu(); 939 940 #ifdef CONFIG_DEBUG_DCFLUSH 941 atomic_inc(&dcpage_flushes); 942 #endif 943 data0 = 0; 944 pg_addr = page_address(page); 945 if (tlb_type == spitfire) { 946 data0 = ((u64)&xcall_flush_dcache_page_spitfire); 947 if (page_mapping(page) != NULL) 948 data0 |= ((u64)1 << 32); 949 } else if (tlb_type == cheetah || tlb_type == cheetah_plus) { 950 #ifdef DCACHE_ALIASING_POSSIBLE 951 data0 = ((u64)&xcall_flush_dcache_page_cheetah); 952 #endif 953 } 954 if (data0) { 955 xcall_deliver(data0, __pa(pg_addr), 956 (u64) pg_addr, &cpu_online_map); 957 #ifdef CONFIG_DEBUG_DCFLUSH 958 atomic_inc(&dcpage_flushes_xcall); 959 #endif 960 } 961 __local_flush_dcache_page(page); 962 963 put_cpu(); 964 } 965 966 void smp_new_mmu_context_version_client(int irq, struct pt_regs *regs) 967 { 968 struct mm_struct *mm; 969 unsigned long flags; 970 971 clear_softint(1 << irq); 972 973 /* See if we need to allocate a new TLB context because 974 * the version of the one we are using is now out of date. 975 */ 976 mm = current->active_mm; 977 if (unlikely(!mm || (mm == &init_mm))) 978 return; 979 980 spin_lock_irqsave(&mm->context.lock, flags); 981 982 if (unlikely(!CTX_VALID(mm->context))) 983 get_new_mmu_context(mm); 984 985 spin_unlock_irqrestore(&mm->context.lock, flags); 986 987 load_secondary_context(mm); 988 __flush_tlb_mm(CTX_HWBITS(mm->context), 989 SECONDARY_CONTEXT); 990 } 991 992 void smp_new_mmu_context_version(void) 993 { 994 smp_cross_call(&xcall_new_mmu_context_version, 0, 0, 0); 995 } 996 997 #ifdef CONFIG_KGDB 998 void kgdb_roundup_cpus(unsigned long flags) 999 { 1000 smp_cross_call(&xcall_kgdb_capture, 0, 0, 0); 1001 } 1002 #endif 1003 1004 void smp_fetch_global_regs(void) 1005 { 1006 smp_cross_call(&xcall_fetch_glob_regs, 0, 0, 0); 1007 } 1008 1009 /* We know that the window frames of the user have been flushed 1010 * to the stack before we get here because all callers of us 1011 * are flush_tlb_*() routines, and these run after flush_cache_*() 1012 * which performs the flushw. 1013 * 1014 * The SMP TLB coherency scheme we use works as follows: 1015 * 1016 * 1) mm->cpu_vm_mask is a bit mask of which cpus an address 1017 * space has (potentially) executed on, this is the heuristic 1018 * we use to avoid doing cross calls. 1019 * 1020 * Also, for flushing from kswapd and also for clones, we 1021 * use cpu_vm_mask as the list of cpus to make run the TLB. 1022 * 1023 * 2) TLB context numbers are shared globally across all processors 1024 * in the system, this allows us to play several games to avoid 1025 * cross calls. 1026 * 1027 * One invariant is that when a cpu switches to a process, and 1028 * that processes tsk->active_mm->cpu_vm_mask does not have the 1029 * current cpu's bit set, that tlb context is flushed locally. 1030 * 1031 * If the address space is non-shared (ie. mm->count == 1) we avoid 1032 * cross calls when we want to flush the currently running process's 1033 * tlb state. This is done by clearing all cpu bits except the current 1034 * processor's in current->mm->cpu_vm_mask and performing the 1035 * flush locally only. This will force any subsequent cpus which run 1036 * this task to flush the context from the local tlb if the process 1037 * migrates to another cpu (again). 1038 * 1039 * 3) For shared address spaces (threads) and swapping we bite the 1040 * bullet for most cases and perform the cross call (but only to 1041 * the cpus listed in cpu_vm_mask). 1042 * 1043 * The performance gain from "optimizing" away the cross call for threads is 1044 * questionable (in theory the big win for threads is the massive sharing of 1045 * address space state across processors). 1046 */ 1047 1048 /* This currently is only used by the hugetlb arch pre-fault 1049 * hook on UltraSPARC-III+ and later when changing the pagesize 1050 * bits of the context register for an address space. 1051 */ 1052 void smp_flush_tlb_mm(struct mm_struct *mm) 1053 { 1054 u32 ctx = CTX_HWBITS(mm->context); 1055 int cpu = get_cpu(); 1056 1057 if (atomic_read(&mm->mm_users) == 1) { 1058 cpumask_copy(mm_cpumask(mm), cpumask_of(cpu)); 1059 goto local_flush_and_out; 1060 } 1061 1062 smp_cross_call_masked(&xcall_flush_tlb_mm, 1063 ctx, 0, 0, 1064 mm_cpumask(mm)); 1065 1066 local_flush_and_out: 1067 __flush_tlb_mm(ctx, SECONDARY_CONTEXT); 1068 1069 put_cpu(); 1070 } 1071 1072 void smp_flush_tlb_pending(struct mm_struct *mm, unsigned long nr, unsigned long *vaddrs) 1073 { 1074 u32 ctx = CTX_HWBITS(mm->context); 1075 int cpu = get_cpu(); 1076 1077 if (mm == current->mm && atomic_read(&mm->mm_users) == 1) 1078 cpumask_copy(mm_cpumask(mm), cpumask_of(cpu)); 1079 else 1080 smp_cross_call_masked(&xcall_flush_tlb_pending, 1081 ctx, nr, (unsigned long) vaddrs, 1082 mm_cpumask(mm)); 1083 1084 __flush_tlb_pending(ctx, nr, vaddrs); 1085 1086 put_cpu(); 1087 } 1088 1089 void smp_flush_tlb_kernel_range(unsigned long start, unsigned long end) 1090 { 1091 start &= PAGE_MASK; 1092 end = PAGE_ALIGN(end); 1093 if (start != end) { 1094 smp_cross_call(&xcall_flush_tlb_kernel_range, 1095 0, start, end); 1096 1097 __flush_tlb_kernel_range(start, end); 1098 } 1099 } 1100 1101 /* CPU capture. */ 1102 /* #define CAPTURE_DEBUG */ 1103 extern unsigned long xcall_capture; 1104 1105 static atomic_t smp_capture_depth = ATOMIC_INIT(0); 1106 static atomic_t smp_capture_registry = ATOMIC_INIT(0); 1107 static unsigned long penguins_are_doing_time; 1108 1109 void smp_capture(void) 1110 { 1111 int result = atomic_add_ret(1, &smp_capture_depth); 1112 1113 if (result == 1) { 1114 int ncpus = num_online_cpus(); 1115 1116 #ifdef CAPTURE_DEBUG 1117 printk("CPU[%d]: Sending penguins to jail...", 1118 smp_processor_id()); 1119 #endif 1120 penguins_are_doing_time = 1; 1121 atomic_inc(&smp_capture_registry); 1122 smp_cross_call(&xcall_capture, 0, 0, 0); 1123 while (atomic_read(&smp_capture_registry) != ncpus) 1124 rmb(); 1125 #ifdef CAPTURE_DEBUG 1126 printk("done\n"); 1127 #endif 1128 } 1129 } 1130 1131 void smp_release(void) 1132 { 1133 if (atomic_dec_and_test(&smp_capture_depth)) { 1134 #ifdef CAPTURE_DEBUG 1135 printk("CPU[%d]: Giving pardon to " 1136 "imprisoned penguins\n", 1137 smp_processor_id()); 1138 #endif 1139 penguins_are_doing_time = 0; 1140 membar_safe("#StoreLoad"); 1141 atomic_dec(&smp_capture_registry); 1142 } 1143 } 1144 1145 /* Imprisoned penguins run with %pil == PIL_NORMAL_MAX, but PSTATE_IE 1146 * set, so they can service tlb flush xcalls... 1147 */ 1148 extern void prom_world(int); 1149 1150 void smp_penguin_jailcell(int irq, struct pt_regs *regs) 1151 { 1152 clear_softint(1 << irq); 1153 1154 preempt_disable(); 1155 1156 __asm__ __volatile__("flushw"); 1157 prom_world(1); 1158 atomic_inc(&smp_capture_registry); 1159 membar_safe("#StoreLoad"); 1160 while (penguins_are_doing_time) 1161 rmb(); 1162 atomic_dec(&smp_capture_registry); 1163 prom_world(0); 1164 1165 preempt_enable(); 1166 } 1167 1168 /* /proc/profile writes can call this, don't __init it please. */ 1169 int setup_profiling_timer(unsigned int multiplier) 1170 { 1171 return -EINVAL; 1172 } 1173 1174 void __init smp_prepare_cpus(unsigned int max_cpus) 1175 { 1176 } 1177 1178 void __devinit smp_prepare_boot_cpu(void) 1179 { 1180 } 1181 1182 void __init smp_setup_processor_id(void) 1183 { 1184 if (tlb_type == spitfire) 1185 xcall_deliver_impl = spitfire_xcall_deliver; 1186 else if (tlb_type == cheetah || tlb_type == cheetah_plus) 1187 xcall_deliver_impl = cheetah_xcall_deliver; 1188 else 1189 xcall_deliver_impl = hypervisor_xcall_deliver; 1190 } 1191 1192 void __devinit smp_fill_in_sib_core_maps(void) 1193 { 1194 unsigned int i; 1195 1196 for_each_present_cpu(i) { 1197 unsigned int j; 1198 1199 cpus_clear(cpu_core_map[i]); 1200 if (cpu_data(i).core_id == 0) { 1201 cpu_set(i, cpu_core_map[i]); 1202 continue; 1203 } 1204 1205 for_each_present_cpu(j) { 1206 if (cpu_data(i).core_id == 1207 cpu_data(j).core_id) 1208 cpu_set(j, cpu_core_map[i]); 1209 } 1210 } 1211 1212 for_each_present_cpu(i) { 1213 unsigned int j; 1214 1215 cpus_clear(per_cpu(cpu_sibling_map, i)); 1216 if (cpu_data(i).proc_id == -1) { 1217 cpu_set(i, per_cpu(cpu_sibling_map, i)); 1218 continue; 1219 } 1220 1221 for_each_present_cpu(j) { 1222 if (cpu_data(i).proc_id == 1223 cpu_data(j).proc_id) 1224 cpu_set(j, per_cpu(cpu_sibling_map, i)); 1225 } 1226 } 1227 } 1228 1229 int __cpuinit __cpu_up(unsigned int cpu) 1230 { 1231 int ret = smp_boot_one_cpu(cpu); 1232 1233 if (!ret) { 1234 cpu_set(cpu, smp_commenced_mask); 1235 while (!cpu_isset(cpu, cpu_online_map)) 1236 mb(); 1237 if (!cpu_isset(cpu, cpu_online_map)) { 1238 ret = -ENODEV; 1239 } else { 1240 /* On SUN4V, writes to %tick and %stick are 1241 * not allowed. 1242 */ 1243 if (tlb_type != hypervisor) 1244 smp_synchronize_one_tick(cpu); 1245 } 1246 } 1247 return ret; 1248 } 1249 1250 #ifdef CONFIG_HOTPLUG_CPU 1251 void cpu_play_dead(void) 1252 { 1253 int cpu = smp_processor_id(); 1254 unsigned long pstate; 1255 1256 idle_task_exit(); 1257 1258 if (tlb_type == hypervisor) { 1259 struct trap_per_cpu *tb = &trap_block[cpu]; 1260 1261 sun4v_cpu_qconf(HV_CPU_QUEUE_CPU_MONDO, 1262 tb->cpu_mondo_pa, 0); 1263 sun4v_cpu_qconf(HV_CPU_QUEUE_DEVICE_MONDO, 1264 tb->dev_mondo_pa, 0); 1265 sun4v_cpu_qconf(HV_CPU_QUEUE_RES_ERROR, 1266 tb->resum_mondo_pa, 0); 1267 sun4v_cpu_qconf(HV_CPU_QUEUE_NONRES_ERROR, 1268 tb->nonresum_mondo_pa, 0); 1269 } 1270 1271 cpu_clear(cpu, smp_commenced_mask); 1272 membar_safe("#Sync"); 1273 1274 local_irq_disable(); 1275 1276 __asm__ __volatile__( 1277 "rdpr %%pstate, %0\n\t" 1278 "wrpr %0, %1, %%pstate" 1279 : "=r" (pstate) 1280 : "i" (PSTATE_IE)); 1281 1282 while (1) 1283 barrier(); 1284 } 1285 1286 int __cpu_disable(void) 1287 { 1288 int cpu = smp_processor_id(); 1289 cpuinfo_sparc *c; 1290 int i; 1291 1292 for_each_cpu_mask(i, cpu_core_map[cpu]) 1293 cpu_clear(cpu, cpu_core_map[i]); 1294 cpus_clear(cpu_core_map[cpu]); 1295 1296 for_each_cpu_mask(i, per_cpu(cpu_sibling_map, cpu)) 1297 cpu_clear(cpu, per_cpu(cpu_sibling_map, i)); 1298 cpus_clear(per_cpu(cpu_sibling_map, cpu)); 1299 1300 c = &cpu_data(cpu); 1301 1302 c->core_id = 0; 1303 c->proc_id = -1; 1304 1305 smp_wmb(); 1306 1307 /* Make sure no interrupts point to this cpu. */ 1308 fixup_irqs(); 1309 1310 local_irq_enable(); 1311 mdelay(1); 1312 local_irq_disable(); 1313 1314 ipi_call_lock(); 1315 cpu_clear(cpu, cpu_online_map); 1316 ipi_call_unlock(); 1317 1318 return 0; 1319 } 1320 1321 void __cpu_die(unsigned int cpu) 1322 { 1323 int i; 1324 1325 for (i = 0; i < 100; i++) { 1326 smp_rmb(); 1327 if (!cpu_isset(cpu, smp_commenced_mask)) 1328 break; 1329 msleep(100); 1330 } 1331 if (cpu_isset(cpu, smp_commenced_mask)) { 1332 printk(KERN_ERR "CPU %u didn't die...\n", cpu); 1333 } else { 1334 #if defined(CONFIG_SUN_LDOMS) 1335 unsigned long hv_err; 1336 int limit = 100; 1337 1338 do { 1339 hv_err = sun4v_cpu_stop(cpu); 1340 if (hv_err == HV_EOK) { 1341 cpu_clear(cpu, cpu_present_map); 1342 break; 1343 } 1344 } while (--limit > 0); 1345 if (limit <= 0) { 1346 printk(KERN_ERR "sun4v_cpu_stop() fails err=%lu\n", 1347 hv_err); 1348 } 1349 #endif 1350 } 1351 } 1352 #endif 1353 1354 void __init smp_cpus_done(unsigned int max_cpus) 1355 { 1356 } 1357 1358 void smp_send_reschedule(int cpu) 1359 { 1360 xcall_deliver((u64) &xcall_receive_signal, 0, 0, 1361 &cpumask_of_cpu(cpu)); 1362 } 1363 1364 void smp_receive_signal_client(int irq, struct pt_regs *regs) 1365 { 1366 clear_softint(1 << irq); 1367 } 1368 1369 /* This is a nop because we capture all other cpus 1370 * anyways when making the PROM active. 1371 */ 1372 void smp_send_stop(void) 1373 { 1374 } 1375 1376 unsigned long __per_cpu_base __read_mostly; 1377 unsigned long __per_cpu_shift __read_mostly; 1378 1379 EXPORT_SYMBOL(__per_cpu_base); 1380 EXPORT_SYMBOL(__per_cpu_shift); 1381 1382 void __init real_setup_per_cpu_areas(void) 1383 { 1384 unsigned long paddr, goal, size, i; 1385 char *ptr; 1386 1387 /* Copy section for each CPU (we discard the original) */ 1388 goal = PERCPU_ENOUGH_ROOM; 1389 1390 __per_cpu_shift = PAGE_SHIFT; 1391 for (size = PAGE_SIZE; size < goal; size <<= 1UL) 1392 __per_cpu_shift++; 1393 1394 paddr = lmb_alloc(size * NR_CPUS, PAGE_SIZE); 1395 if (!paddr) { 1396 prom_printf("Cannot allocate per-cpu memory.\n"); 1397 prom_halt(); 1398 } 1399 1400 ptr = __va(paddr); 1401 __per_cpu_base = ptr - __per_cpu_start; 1402 1403 for (i = 0; i < NR_CPUS; i++, ptr += size) 1404 memcpy(ptr, __per_cpu_start, __per_cpu_end - __per_cpu_start); 1405 1406 /* Setup %g5 for the boot cpu. */ 1407 __local_per_cpu_offset = __per_cpu_offset(smp_processor_id()); 1408 } 1409