1 /* 2 * Author: Andy Fleming <afleming@freescale.com> 3 * Kumar Gala <galak@kernel.crashing.org> 4 * 5 * Copyright 2006-2008, 2011-2012, 2015 Freescale Semiconductor Inc. 6 * 7 * This program is free software; you can redistribute it and/or modify it 8 * under the terms of the GNU General Public License as published by the 9 * Free Software Foundation; either version 2 of the License, or (at your 10 * option) any later version. 11 */ 12 13 #include <linux/stddef.h> 14 #include <linux/kernel.h> 15 #include <linux/sched/hotplug.h> 16 #include <linux/init.h> 17 #include <linux/delay.h> 18 #include <linux/of.h> 19 #include <linux/kexec.h> 20 #include <linux/highmem.h> 21 #include <linux/cpu.h> 22 #include <linux/fsl/guts.h> 23 24 #include <asm/machdep.h> 25 #include <asm/pgtable.h> 26 #include <asm/page.h> 27 #include <asm/mpic.h> 28 #include <asm/cacheflush.h> 29 #include <asm/dbell.h> 30 #include <asm/code-patching.h> 31 #include <asm/cputhreads.h> 32 #include <asm/fsl_pm.h> 33 34 #include <sysdev/fsl_soc.h> 35 #include <sysdev/mpic.h> 36 #include "smp.h" 37 38 struct epapr_spin_table { 39 u32 addr_h; 40 u32 addr_l; 41 u32 r3_h; 42 u32 r3_l; 43 u32 reserved; 44 u32 pir; 45 }; 46 47 #ifdef CONFIG_HOTPLUG_CPU 48 static u64 timebase; 49 static int tb_req; 50 static int tb_valid; 51 52 static void mpc85xx_give_timebase(void) 53 { 54 unsigned long flags; 55 56 local_irq_save(flags); 57 hard_irq_disable(); 58 59 while (!tb_req) 60 barrier(); 61 tb_req = 0; 62 63 qoriq_pm_ops->freeze_time_base(true); 64 #ifdef CONFIG_PPC64 65 /* 66 * e5500/e6500 have a workaround for erratum A-006958 in place 67 * that will reread the timebase until TBL is non-zero. 68 * That would be a bad thing when the timebase is frozen. 69 * 70 * Thus, we read it manually, and instead of checking that 71 * TBL is non-zero, we ensure that TB does not change. We don't 72 * do that for the main mftb implementation, because it requires 73 * a scratch register 74 */ 75 { 76 u64 prev; 77 78 asm volatile("mfspr %0, %1" : "=r" (timebase) : 79 "i" (SPRN_TBRL)); 80 81 do { 82 prev = timebase; 83 asm volatile("mfspr %0, %1" : "=r" (timebase) : 84 "i" (SPRN_TBRL)); 85 } while (prev != timebase); 86 } 87 #else 88 timebase = get_tb(); 89 #endif 90 mb(); 91 tb_valid = 1; 92 93 while (tb_valid) 94 barrier(); 95 96 qoriq_pm_ops->freeze_time_base(false); 97 98 local_irq_restore(flags); 99 } 100 101 static void mpc85xx_take_timebase(void) 102 { 103 unsigned long flags; 104 105 local_irq_save(flags); 106 hard_irq_disable(); 107 108 tb_req = 1; 109 while (!tb_valid) 110 barrier(); 111 112 set_tb(timebase >> 32, timebase & 0xffffffff); 113 isync(); 114 tb_valid = 0; 115 116 local_irq_restore(flags); 117 } 118 119 static void smp_85xx_mach_cpu_die(void) 120 { 121 unsigned int cpu = smp_processor_id(); 122 123 local_irq_disable(); 124 hard_irq_disable(); 125 /* mask all irqs to prevent cpu wakeup */ 126 qoriq_pm_ops->irq_mask(cpu); 127 128 idle_task_exit(); 129 130 mtspr(SPRN_TCR, 0); 131 mtspr(SPRN_TSR, mfspr(SPRN_TSR)); 132 133 generic_set_cpu_dead(cpu); 134 135 cur_cpu_spec->cpu_down_flush(); 136 137 qoriq_pm_ops->cpu_die(cpu); 138 139 while (1) 140 ; 141 } 142 143 static void qoriq_cpu_kill(unsigned int cpu) 144 { 145 int i; 146 147 for (i = 0; i < 500; i++) { 148 if (is_cpu_dead(cpu)) { 149 #ifdef CONFIG_PPC64 150 paca_ptrs[cpu]->cpu_start = 0; 151 #endif 152 return; 153 } 154 msleep(20); 155 } 156 pr_err("CPU%d didn't die...\n", cpu); 157 } 158 #endif 159 160 /* 161 * To keep it compatible with old boot program which uses 162 * cache-inhibit spin table, we need to flush the cache 163 * before accessing spin table to invalidate any staled data. 164 * We also need to flush the cache after writing to spin 165 * table to push data out. 166 */ 167 static inline void flush_spin_table(void *spin_table) 168 { 169 flush_dcache_range((ulong)spin_table, 170 (ulong)spin_table + sizeof(struct epapr_spin_table)); 171 } 172 173 static inline u32 read_spin_table_addr_l(void *spin_table) 174 { 175 flush_dcache_range((ulong)spin_table, 176 (ulong)spin_table + sizeof(struct epapr_spin_table)); 177 return in_be32(&((struct epapr_spin_table *)spin_table)->addr_l); 178 } 179 180 #ifdef CONFIG_PPC64 181 static void wake_hw_thread(void *info) 182 { 183 void fsl_secondary_thread_init(void); 184 unsigned long inia; 185 int cpu = *(const int *)info; 186 187 inia = *(unsigned long *)fsl_secondary_thread_init; 188 book3e_start_thread(cpu_thread_in_core(cpu), inia); 189 } 190 #endif 191 192 static int smp_85xx_start_cpu(int cpu) 193 { 194 int ret = 0; 195 struct device_node *np; 196 const u64 *cpu_rel_addr; 197 unsigned long flags; 198 int ioremappable; 199 int hw_cpu = get_hard_smp_processor_id(cpu); 200 struct epapr_spin_table __iomem *spin_table; 201 202 np = of_get_cpu_node(cpu, NULL); 203 cpu_rel_addr = of_get_property(np, "cpu-release-addr", NULL); 204 if (!cpu_rel_addr) { 205 pr_err("No cpu-release-addr for cpu %d\n", cpu); 206 return -ENOENT; 207 } 208 209 /* 210 * A secondary core could be in a spinloop in the bootpage 211 * (0xfffff000), somewhere in highmem, or somewhere in lowmem. 212 * The bootpage and highmem can be accessed via ioremap(), but 213 * we need to directly access the spinloop if its in lowmem. 214 */ 215 ioremappable = *cpu_rel_addr > virt_to_phys(high_memory); 216 217 /* Map the spin table */ 218 if (ioremappable) 219 spin_table = ioremap_prot(*cpu_rel_addr, 220 sizeof(struct epapr_spin_table), _PAGE_COHERENT); 221 else 222 spin_table = phys_to_virt(*cpu_rel_addr); 223 224 local_irq_save(flags); 225 hard_irq_disable(); 226 227 if (qoriq_pm_ops) 228 qoriq_pm_ops->cpu_up_prepare(cpu); 229 230 /* if cpu is not spinning, reset it */ 231 if (read_spin_table_addr_l(spin_table) != 1) { 232 /* 233 * We don't set the BPTR register here since it already points 234 * to the boot page properly. 235 */ 236 mpic_reset_core(cpu); 237 238 /* 239 * wait until core is ready... 240 * We need to invalidate the stale data, in case the boot 241 * loader uses a cache-inhibited spin table. 242 */ 243 if (!spin_event_timeout( 244 read_spin_table_addr_l(spin_table) == 1, 245 10000, 100)) { 246 pr_err("timeout waiting for cpu %d to reset\n", 247 hw_cpu); 248 ret = -EAGAIN; 249 goto err; 250 } 251 } 252 253 flush_spin_table(spin_table); 254 out_be32(&spin_table->pir, hw_cpu); 255 #ifdef CONFIG_PPC64 256 out_be64((u64 *)(&spin_table->addr_h), 257 __pa(ppc_function_entry(generic_secondary_smp_init))); 258 #else 259 out_be32(&spin_table->addr_l, __pa(__early_start)); 260 #endif 261 flush_spin_table(spin_table); 262 err: 263 local_irq_restore(flags); 264 265 if (ioremappable) 266 iounmap(spin_table); 267 268 return ret; 269 } 270 271 static int smp_85xx_kick_cpu(int nr) 272 { 273 int ret = 0; 274 #ifdef CONFIG_PPC64 275 int primary = nr; 276 #endif 277 278 WARN_ON(nr < 0 || nr >= num_possible_cpus()); 279 280 pr_debug("kick CPU #%d\n", nr); 281 282 #ifdef CONFIG_PPC64 283 if (threads_per_core == 2) { 284 if (WARN_ON_ONCE(!cpu_has_feature(CPU_FTR_SMT))) 285 return -ENOENT; 286 287 booting_thread_hwid = cpu_thread_in_core(nr); 288 primary = cpu_first_thread_sibling(nr); 289 290 if (qoriq_pm_ops) 291 qoriq_pm_ops->cpu_up_prepare(nr); 292 293 /* 294 * If either thread in the core is online, use it to start 295 * the other. 296 */ 297 if (cpu_online(primary)) { 298 smp_call_function_single(primary, 299 wake_hw_thread, &nr, 1); 300 goto done; 301 } else if (cpu_online(primary + 1)) { 302 smp_call_function_single(primary + 1, 303 wake_hw_thread, &nr, 1); 304 goto done; 305 } 306 307 /* 308 * If getting here, it means both threads in the core are 309 * offline. So start the primary thread, then it will start 310 * the thread specified in booting_thread_hwid, the one 311 * corresponding to nr. 312 */ 313 314 } else if (threads_per_core == 1) { 315 /* 316 * If one core has only one thread, set booting_thread_hwid to 317 * an invalid value. 318 */ 319 booting_thread_hwid = INVALID_THREAD_HWID; 320 321 } else if (threads_per_core > 2) { 322 pr_err("Do not support more than 2 threads per CPU."); 323 return -EINVAL; 324 } 325 326 ret = smp_85xx_start_cpu(primary); 327 if (ret) 328 return ret; 329 330 done: 331 paca_ptrs[nr]->cpu_start = 1; 332 generic_set_cpu_up(nr); 333 334 return ret; 335 #else 336 ret = smp_85xx_start_cpu(nr); 337 if (ret) 338 return ret; 339 340 generic_set_cpu_up(nr); 341 342 return ret; 343 #endif 344 } 345 346 struct smp_ops_t smp_85xx_ops = { 347 .cause_nmi_ipi = NULL, 348 .kick_cpu = smp_85xx_kick_cpu, 349 .cpu_bootable = smp_generic_cpu_bootable, 350 #ifdef CONFIG_HOTPLUG_CPU 351 .cpu_disable = generic_cpu_disable, 352 .cpu_die = generic_cpu_die, 353 #endif 354 #if defined(CONFIG_KEXEC_CORE) && !defined(CONFIG_PPC64) 355 .give_timebase = smp_generic_give_timebase, 356 .take_timebase = smp_generic_take_timebase, 357 #endif 358 }; 359 360 #ifdef CONFIG_KEXEC_CORE 361 #ifdef CONFIG_PPC32 362 atomic_t kexec_down_cpus = ATOMIC_INIT(0); 363 364 void mpc85xx_smp_kexec_cpu_down(int crash_shutdown, int secondary) 365 { 366 local_irq_disable(); 367 368 if (secondary) { 369 cur_cpu_spec->cpu_down_flush(); 370 atomic_inc(&kexec_down_cpus); 371 /* loop forever */ 372 while (1); 373 } 374 } 375 376 static void mpc85xx_smp_kexec_down(void *arg) 377 { 378 if (ppc_md.kexec_cpu_down) 379 ppc_md.kexec_cpu_down(0,1); 380 } 381 #else 382 void mpc85xx_smp_kexec_cpu_down(int crash_shutdown, int secondary) 383 { 384 int cpu = smp_processor_id(); 385 int sibling = cpu_last_thread_sibling(cpu); 386 bool notified = false; 387 int disable_cpu; 388 int disable_threadbit = 0; 389 long start = mftb(); 390 long now; 391 392 local_irq_disable(); 393 hard_irq_disable(); 394 mpic_teardown_this_cpu(secondary); 395 396 if (cpu == crashing_cpu && cpu_thread_in_core(cpu) != 0) { 397 /* 398 * We enter the crash kernel on whatever cpu crashed, 399 * even if it's a secondary thread. If that's the case, 400 * disable the corresponding primary thread. 401 */ 402 disable_threadbit = 1; 403 disable_cpu = cpu_first_thread_sibling(cpu); 404 } else if (sibling != crashing_cpu && 405 cpu_thread_in_core(cpu) == 0 && 406 cpu_thread_in_core(sibling) != 0) { 407 disable_threadbit = 2; 408 disable_cpu = sibling; 409 } 410 411 if (disable_threadbit) { 412 while (paca_ptrs[disable_cpu]->kexec_state < KEXEC_STATE_REAL_MODE) { 413 barrier(); 414 now = mftb(); 415 if (!notified && now - start > 1000000) { 416 pr_info("%s/%d: waiting for cpu %d to enter KEXEC_STATE_REAL_MODE (%d)\n", 417 __func__, smp_processor_id(), 418 disable_cpu, 419 paca_ptrs[disable_cpu]->kexec_state); 420 notified = true; 421 } 422 } 423 424 if (notified) { 425 pr_info("%s: cpu %d done waiting\n", 426 __func__, disable_cpu); 427 } 428 429 mtspr(SPRN_TENC, disable_threadbit); 430 while (mfspr(SPRN_TENSR) & disable_threadbit) 431 cpu_relax(); 432 } 433 } 434 #endif 435 436 static void mpc85xx_smp_machine_kexec(struct kimage *image) 437 { 438 #ifdef CONFIG_PPC32 439 int timeout = INT_MAX; 440 int i, num_cpus = num_present_cpus(); 441 442 if (image->type == KEXEC_TYPE_DEFAULT) 443 smp_call_function(mpc85xx_smp_kexec_down, NULL, 0); 444 445 while ( (atomic_read(&kexec_down_cpus) != (num_cpus - 1)) && 446 ( timeout > 0 ) ) 447 { 448 timeout--; 449 } 450 451 if ( !timeout ) 452 printk(KERN_ERR "Unable to bring down secondary cpu(s)"); 453 454 for_each_online_cpu(i) 455 { 456 if ( i == smp_processor_id() ) continue; 457 mpic_reset_core(i); 458 } 459 #endif 460 461 default_machine_kexec(image); 462 } 463 #endif /* CONFIG_KEXEC_CORE */ 464 465 static void smp_85xx_setup_cpu(int cpu_nr) 466 { 467 mpic_setup_this_cpu(); 468 } 469 470 void __init mpc85xx_smp_init(void) 471 { 472 struct device_node *np; 473 474 475 np = of_find_node_by_type(NULL, "open-pic"); 476 if (np) { 477 smp_85xx_ops.probe = smp_mpic_probe; 478 smp_85xx_ops.setup_cpu = smp_85xx_setup_cpu; 479 smp_85xx_ops.message_pass = smp_mpic_message_pass; 480 } else 481 smp_85xx_ops.setup_cpu = NULL; 482 483 if (cpu_has_feature(CPU_FTR_DBELL)) { 484 /* 485 * If left NULL, .message_pass defaults to 486 * smp_muxed_ipi_message_pass 487 */ 488 smp_85xx_ops.message_pass = NULL; 489 smp_85xx_ops.cause_ipi = doorbell_global_ipi; 490 smp_85xx_ops.probe = NULL; 491 } 492 493 #ifdef CONFIG_HOTPLUG_CPU 494 #ifdef CONFIG_FSL_CORENET_RCPM 495 fsl_rcpm_init(); 496 #endif 497 498 #ifdef CONFIG_FSL_PMC 499 mpc85xx_setup_pmc(); 500 #endif 501 if (qoriq_pm_ops) { 502 smp_85xx_ops.give_timebase = mpc85xx_give_timebase; 503 smp_85xx_ops.take_timebase = mpc85xx_take_timebase; 504 ppc_md.cpu_die = smp_85xx_mach_cpu_die; 505 smp_85xx_ops.cpu_die = qoriq_cpu_kill; 506 } 507 #endif 508 smp_ops = &smp_85xx_ops; 509 510 #ifdef CONFIG_KEXEC_CORE 511 ppc_md.kexec_cpu_down = mpc85xx_smp_kexec_cpu_down; 512 ppc_md.machine_kexec = mpc85xx_smp_machine_kexec; 513 #endif 514 } 515