1 /* 2 * SMP support for ppc. 3 * 4 * Written by Cort Dougan (cort@cs.nmt.edu) borrowing a great 5 * deal of code from the sparc and intel versions. 6 * 7 * Copyright (C) 1999 Cort Dougan <cort@cs.nmt.edu> 8 * 9 * PowerPC-64 Support added by Dave Engebretsen, Peter Bergner, and 10 * Mike Corrigan {engebret|bergner|mikec}@us.ibm.com 11 * 12 * This program is free software; you can redistribute it and/or 13 * modify it under the terms of the GNU General Public License 14 * as published by the Free Software Foundation; either version 15 * 2 of the License, or (at your option) any later version. 16 */ 17 18 #undef DEBUG 19 20 #include <linux/kernel.h> 21 #include <linux/module.h> 22 #include <linux/sched.h> 23 #include <linux/smp.h> 24 #include <linux/interrupt.h> 25 #include <linux/delay.h> 26 #include <linux/init.h> 27 #include <linux/spinlock.h> 28 #include <linux/cache.h> 29 #include <linux/err.h> 30 #include <linux/sysdev.h> 31 #include <linux/cpu.h> 32 #include <linux/notifier.h> 33 #include <linux/topology.h> 34 35 #include <asm/ptrace.h> 36 #include <asm/atomic.h> 37 #include <asm/irq.h> 38 #include <asm/page.h> 39 #include <asm/pgtable.h> 40 #include <asm/prom.h> 41 #include <asm/smp.h> 42 #include <asm/time.h> 43 #include <asm/machdep.h> 44 #include <asm/cputhreads.h> 45 #include <asm/cputable.h> 46 #include <asm/system.h> 47 #include <asm/mpic.h> 48 #include <asm/vdso_datapage.h> 49 #ifdef CONFIG_PPC64 50 #include <asm/paca.h> 51 #endif 52 53 #ifdef DEBUG 54 #include <asm/udbg.h> 55 #define DBG(fmt...) udbg_printf(fmt) 56 #else 57 #define DBG(fmt...) 58 #endif 59 60 int smp_hw_index[NR_CPUS]; 61 struct thread_info *secondary_ti; 62 63 cpumask_t cpu_possible_map = CPU_MASK_NONE; 64 cpumask_t cpu_online_map = CPU_MASK_NONE; 65 DEFINE_PER_CPU(cpumask_t, cpu_sibling_map) = CPU_MASK_NONE; 66 DEFINE_PER_CPU(cpumask_t, cpu_core_map) = CPU_MASK_NONE; 67 68 EXPORT_SYMBOL(cpu_online_map); 69 EXPORT_SYMBOL(cpu_possible_map); 70 EXPORT_PER_CPU_SYMBOL(cpu_sibling_map); 71 EXPORT_PER_CPU_SYMBOL(cpu_core_map); 72 73 /* SMP operations for this machine */ 74 struct smp_ops_t *smp_ops; 75 76 static volatile unsigned int cpu_callin_map[NR_CPUS]; 77 78 int smt_enabled_at_boot = 1; 79 80 static void (*crash_ipi_function_ptr)(struct pt_regs *) = NULL; 81 82 #ifdef CONFIG_PPC64 83 void __devinit smp_generic_kick_cpu(int nr) 84 { 85 BUG_ON(nr < 0 || nr >= NR_CPUS); 86 87 /* 88 * The processor is currently spinning, waiting for the 89 * cpu_start field to become non-zero After we set cpu_start, 90 * the processor will continue on to secondary_start 91 */ 92 paca[nr].cpu_start = 1; 93 smp_mb(); 94 } 95 #endif 96 97 void smp_message_recv(int msg) 98 { 99 switch(msg) { 100 case PPC_MSG_CALL_FUNCTION: 101 generic_smp_call_function_interrupt(); 102 break; 103 case PPC_MSG_RESCHEDULE: 104 /* XXX Do we have to do this? */ 105 set_need_resched(); 106 break; 107 case PPC_MSG_CALL_FUNC_SINGLE: 108 generic_smp_call_function_single_interrupt(); 109 break; 110 case PPC_MSG_DEBUGGER_BREAK: 111 if (crash_ipi_function_ptr) { 112 crash_ipi_function_ptr(get_irq_regs()); 113 break; 114 } 115 #ifdef CONFIG_DEBUGGER 116 debugger_ipi(get_irq_regs()); 117 break; 118 #endif /* CONFIG_DEBUGGER */ 119 /* FALLTHROUGH */ 120 default: 121 printk("SMP %d: smp_message_recv(): unknown msg %d\n", 122 smp_processor_id(), msg); 123 break; 124 } 125 } 126 127 void smp_send_reschedule(int cpu) 128 { 129 if (likely(smp_ops)) 130 smp_ops->message_pass(cpu, PPC_MSG_RESCHEDULE); 131 } 132 133 void arch_send_call_function_single_ipi(int cpu) 134 { 135 smp_ops->message_pass(cpu, PPC_MSG_CALL_FUNC_SINGLE); 136 } 137 138 void arch_send_call_function_ipi(cpumask_t mask) 139 { 140 unsigned int cpu; 141 142 for_each_cpu_mask(cpu, mask) 143 smp_ops->message_pass(cpu, PPC_MSG_CALL_FUNCTION); 144 } 145 146 #ifdef CONFIG_DEBUGGER 147 void smp_send_debugger_break(int cpu) 148 { 149 if (likely(smp_ops)) 150 smp_ops->message_pass(cpu, PPC_MSG_DEBUGGER_BREAK); 151 } 152 #endif 153 154 #ifdef CONFIG_KEXEC 155 void crash_send_ipi(void (*crash_ipi_callback)(struct pt_regs *)) 156 { 157 crash_ipi_function_ptr = crash_ipi_callback; 158 if (crash_ipi_callback && smp_ops) { 159 mb(); 160 smp_ops->message_pass(MSG_ALL_BUT_SELF, PPC_MSG_DEBUGGER_BREAK); 161 } 162 } 163 #endif 164 165 static void stop_this_cpu(void *dummy) 166 { 167 local_irq_disable(); 168 while (1) 169 ; 170 } 171 172 void smp_send_stop(void) 173 { 174 smp_call_function(stop_this_cpu, NULL, 0); 175 } 176 177 struct thread_info *current_set[NR_CPUS]; 178 179 static void __devinit smp_store_cpu_info(int id) 180 { 181 per_cpu(pvr, id) = mfspr(SPRN_PVR); 182 } 183 184 static void __init smp_create_idle(unsigned int cpu) 185 { 186 struct task_struct *p; 187 188 /* create a process for the processor */ 189 p = fork_idle(cpu); 190 if (IS_ERR(p)) 191 panic("failed fork for CPU %u: %li", cpu, PTR_ERR(p)); 192 #ifdef CONFIG_PPC64 193 paca[cpu].__current = p; 194 paca[cpu].kstack = (unsigned long) task_thread_info(p) 195 + THREAD_SIZE - STACK_FRAME_OVERHEAD; 196 #endif 197 current_set[cpu] = task_thread_info(p); 198 task_thread_info(p)->cpu = cpu; 199 } 200 201 void __init smp_prepare_cpus(unsigned int max_cpus) 202 { 203 unsigned int cpu; 204 205 DBG("smp_prepare_cpus\n"); 206 207 /* 208 * setup_cpu may need to be called on the boot cpu. We havent 209 * spun any cpus up but lets be paranoid. 210 */ 211 BUG_ON(boot_cpuid != smp_processor_id()); 212 213 /* Fixup boot cpu */ 214 smp_store_cpu_info(boot_cpuid); 215 cpu_callin_map[boot_cpuid] = 1; 216 217 if (smp_ops) 218 max_cpus = smp_ops->probe(); 219 else 220 max_cpus = 1; 221 222 smp_space_timers(max_cpus); 223 224 for_each_possible_cpu(cpu) 225 if (cpu != boot_cpuid) 226 smp_create_idle(cpu); 227 } 228 229 void __devinit smp_prepare_boot_cpu(void) 230 { 231 BUG_ON(smp_processor_id() != boot_cpuid); 232 233 cpu_set(boot_cpuid, cpu_online_map); 234 cpu_set(boot_cpuid, per_cpu(cpu_sibling_map, boot_cpuid)); 235 cpu_set(boot_cpuid, per_cpu(cpu_core_map, boot_cpuid)); 236 #ifdef CONFIG_PPC64 237 paca[boot_cpuid].__current = current; 238 #endif 239 current_set[boot_cpuid] = task_thread_info(current); 240 } 241 242 #ifdef CONFIG_HOTPLUG_CPU 243 /* State of each CPU during hotplug phases */ 244 DEFINE_PER_CPU(int, cpu_state) = { 0 }; 245 246 int generic_cpu_disable(void) 247 { 248 unsigned int cpu = smp_processor_id(); 249 250 if (cpu == boot_cpuid) 251 return -EBUSY; 252 253 cpu_clear(cpu, cpu_online_map); 254 #ifdef CONFIG_PPC64 255 vdso_data->processorCount--; 256 fixup_irqs(cpu_online_map); 257 #endif 258 return 0; 259 } 260 261 int generic_cpu_enable(unsigned int cpu) 262 { 263 /* Do the normal bootup if we haven't 264 * already bootstrapped. */ 265 if (system_state != SYSTEM_RUNNING) 266 return -ENOSYS; 267 268 /* get the target out of it's holding state */ 269 per_cpu(cpu_state, cpu) = CPU_UP_PREPARE; 270 smp_wmb(); 271 272 while (!cpu_online(cpu)) 273 cpu_relax(); 274 275 #ifdef CONFIG_PPC64 276 fixup_irqs(cpu_online_map); 277 /* counter the irq disable in fixup_irqs */ 278 local_irq_enable(); 279 #endif 280 return 0; 281 } 282 283 void generic_cpu_die(unsigned int cpu) 284 { 285 int i; 286 287 for (i = 0; i < 100; i++) { 288 smp_rmb(); 289 if (per_cpu(cpu_state, cpu) == CPU_DEAD) 290 return; 291 msleep(100); 292 } 293 printk(KERN_ERR "CPU%d didn't die...\n", cpu); 294 } 295 296 void generic_mach_cpu_die(void) 297 { 298 unsigned int cpu; 299 300 local_irq_disable(); 301 cpu = smp_processor_id(); 302 printk(KERN_DEBUG "CPU%d offline\n", cpu); 303 __get_cpu_var(cpu_state) = CPU_DEAD; 304 smp_wmb(); 305 while (__get_cpu_var(cpu_state) != CPU_UP_PREPARE) 306 cpu_relax(); 307 cpu_set(cpu, cpu_online_map); 308 local_irq_enable(); 309 } 310 #endif 311 312 static int __devinit cpu_enable(unsigned int cpu) 313 { 314 if (smp_ops && smp_ops->cpu_enable) 315 return smp_ops->cpu_enable(cpu); 316 317 return -ENOSYS; 318 } 319 320 int __cpuinit __cpu_up(unsigned int cpu) 321 { 322 int c; 323 324 secondary_ti = current_set[cpu]; 325 if (!cpu_enable(cpu)) 326 return 0; 327 328 if (smp_ops == NULL || 329 (smp_ops->cpu_bootable && !smp_ops->cpu_bootable(cpu))) 330 return -EINVAL; 331 332 /* Make sure callin-map entry is 0 (can be leftover a CPU 333 * hotplug 334 */ 335 cpu_callin_map[cpu] = 0; 336 337 /* The information for processor bringup must 338 * be written out to main store before we release 339 * the processor. 340 */ 341 smp_mb(); 342 343 /* wake up cpus */ 344 DBG("smp: kicking cpu %d\n", cpu); 345 smp_ops->kick_cpu(cpu); 346 347 /* 348 * wait to see if the cpu made a callin (is actually up). 349 * use this value that I found through experimentation. 350 * -- Cort 351 */ 352 if (system_state < SYSTEM_RUNNING) 353 for (c = 50000; c && !cpu_callin_map[cpu]; c--) 354 udelay(100); 355 #ifdef CONFIG_HOTPLUG_CPU 356 else 357 /* 358 * CPUs can take much longer to come up in the 359 * hotplug case. Wait five seconds. 360 */ 361 for (c = 25; c && !cpu_callin_map[cpu]; c--) { 362 msleep(200); 363 } 364 #endif 365 366 if (!cpu_callin_map[cpu]) { 367 printk("Processor %u is stuck.\n", cpu); 368 return -ENOENT; 369 } 370 371 printk("Processor %u found.\n", cpu); 372 373 if (smp_ops->give_timebase) 374 smp_ops->give_timebase(); 375 376 /* Wait until cpu puts itself in the online map */ 377 while (!cpu_online(cpu)) 378 cpu_relax(); 379 380 return 0; 381 } 382 383 /* Return the value of the reg property corresponding to the given 384 * logical cpu. 385 */ 386 int cpu_to_core_id(int cpu) 387 { 388 struct device_node *np; 389 const int *reg; 390 int id = -1; 391 392 np = of_get_cpu_node(cpu, NULL); 393 if (!np) 394 goto out; 395 396 reg = of_get_property(np, "reg", NULL); 397 if (!reg) 398 goto out; 399 400 id = *reg; 401 out: 402 of_node_put(np); 403 return id; 404 } 405 406 /* Must be called when no change can occur to cpu_present_map, 407 * i.e. during cpu online or offline. 408 */ 409 static struct device_node *cpu_to_l2cache(int cpu) 410 { 411 struct device_node *np; 412 const phandle *php; 413 phandle ph; 414 415 if (!cpu_present(cpu)) 416 return NULL; 417 418 np = of_get_cpu_node(cpu, NULL); 419 if (np == NULL) 420 return NULL; 421 422 php = of_get_property(np, "l2-cache", NULL); 423 if (php == NULL) 424 return NULL; 425 ph = *php; 426 of_node_put(np); 427 428 return of_find_node_by_phandle(ph); 429 } 430 431 /* Activate a secondary processor. */ 432 int __devinit start_secondary(void *unused) 433 { 434 unsigned int cpu = smp_processor_id(); 435 struct device_node *l2_cache; 436 int i, base; 437 438 atomic_inc(&init_mm.mm_count); 439 current->active_mm = &init_mm; 440 441 smp_store_cpu_info(cpu); 442 set_dec(tb_ticks_per_jiffy); 443 preempt_disable(); 444 cpu_callin_map[cpu] = 1; 445 446 smp_ops->setup_cpu(cpu); 447 if (smp_ops->take_timebase) 448 smp_ops->take_timebase(); 449 450 if (system_state > SYSTEM_BOOTING) 451 snapshot_timebase(); 452 453 secondary_cpu_time_init(); 454 455 ipi_call_lock(); 456 cpu_set(cpu, cpu_online_map); 457 /* Update sibling maps */ 458 base = cpu_first_thread_in_core(cpu); 459 for (i = 0; i < threads_per_core; i++) { 460 if (cpu_is_offline(base + i)) 461 continue; 462 cpu_set(cpu, per_cpu(cpu_sibling_map, base + i)); 463 cpu_set(base + i, per_cpu(cpu_sibling_map, cpu)); 464 465 /* cpu_core_map should be a superset of 466 * cpu_sibling_map even if we don't have cache 467 * information, so update the former here, too. 468 */ 469 cpu_set(cpu, per_cpu(cpu_core_map, base +i)); 470 cpu_set(base + i, per_cpu(cpu_core_map, cpu)); 471 } 472 l2_cache = cpu_to_l2cache(cpu); 473 for_each_online_cpu(i) { 474 struct device_node *np = cpu_to_l2cache(i); 475 if (!np) 476 continue; 477 if (np == l2_cache) { 478 cpu_set(cpu, per_cpu(cpu_core_map, i)); 479 cpu_set(i, per_cpu(cpu_core_map, cpu)); 480 } 481 of_node_put(np); 482 } 483 of_node_put(l2_cache); 484 ipi_call_unlock(); 485 486 local_irq_enable(); 487 488 cpu_idle(); 489 return 0; 490 } 491 492 int setup_profiling_timer(unsigned int multiplier) 493 { 494 return 0; 495 } 496 497 void __init smp_cpus_done(unsigned int max_cpus) 498 { 499 cpumask_t old_mask; 500 501 /* We want the setup_cpu() here to be called from CPU 0, but our 502 * init thread may have been "borrowed" by another CPU in the meantime 503 * se we pin us down to CPU 0 for a short while 504 */ 505 old_mask = current->cpus_allowed; 506 set_cpus_allowed(current, cpumask_of_cpu(boot_cpuid)); 507 508 if (smp_ops) 509 smp_ops->setup_cpu(boot_cpuid); 510 511 set_cpus_allowed(current, old_mask); 512 513 snapshot_timebases(); 514 515 dump_numa_cpu_topology(); 516 } 517 518 #ifdef CONFIG_HOTPLUG_CPU 519 int __cpu_disable(void) 520 { 521 struct device_node *l2_cache; 522 int cpu = smp_processor_id(); 523 int base, i; 524 int err; 525 526 if (!smp_ops->cpu_disable) 527 return -ENOSYS; 528 529 err = smp_ops->cpu_disable(); 530 if (err) 531 return err; 532 533 /* Update sibling maps */ 534 base = cpu_first_thread_in_core(cpu); 535 for (i = 0; i < threads_per_core; i++) { 536 cpu_clear(cpu, per_cpu(cpu_sibling_map, base + i)); 537 cpu_clear(base + i, per_cpu(cpu_sibling_map, cpu)); 538 cpu_clear(cpu, per_cpu(cpu_core_map, base +i)); 539 cpu_clear(base + i, per_cpu(cpu_core_map, cpu)); 540 } 541 542 l2_cache = cpu_to_l2cache(cpu); 543 for_each_present_cpu(i) { 544 struct device_node *np = cpu_to_l2cache(i); 545 if (!np) 546 continue; 547 if (np == l2_cache) { 548 cpu_clear(cpu, per_cpu(cpu_core_map, i)); 549 cpu_clear(i, per_cpu(cpu_core_map, cpu)); 550 } 551 of_node_put(np); 552 } 553 of_node_put(l2_cache); 554 555 556 return 0; 557 } 558 559 void __cpu_die(unsigned int cpu) 560 { 561 if (smp_ops->cpu_die) 562 smp_ops->cpu_die(cpu); 563 } 564 #endif 565