1 /* 2 * Author: Andy Fleming <afleming@freescale.com> 3 * Kumar Gala <galak@kernel.crashing.org> 4 * 5 * Copyright 2006-2008, 2011-2012, 2015 Freescale Semiconductor Inc. 6 * 7 * This program is free software; you can redistribute it and/or modify it 8 * under the terms of the GNU General Public License as published by the 9 * Free Software Foundation; either version 2 of the License, or (at your 10 * option) any later version. 11 */ 12 13 #include <linux/stddef.h> 14 #include <linux/kernel.h> 15 #include <linux/sched/hotplug.h> 16 #include <linux/init.h> 17 #include <linux/delay.h> 18 #include <linux/of.h> 19 #include <linux/kexec.h> 20 #include <linux/highmem.h> 21 #include <linux/cpu.h> 22 #include <linux/fsl/guts.h> 23 24 #include <asm/machdep.h> 25 #include <asm/pgtable.h> 26 #include <asm/page.h> 27 #include <asm/mpic.h> 28 #include <asm/cacheflush.h> 29 #include <asm/dbell.h> 30 #include <asm/code-patching.h> 31 #include <asm/cputhreads.h> 32 #include <asm/fsl_pm.h> 33 34 #include <sysdev/fsl_soc.h> 35 #include <sysdev/mpic.h> 36 #include "smp.h" 37 38 struct epapr_spin_table { 39 u32 addr_h; 40 u32 addr_l; 41 u32 r3_h; 42 u32 r3_l; 43 u32 reserved; 44 u32 pir; 45 }; 46 47 #ifdef CONFIG_HOTPLUG_CPU 48 static u64 timebase; 49 static int tb_req; 50 static int tb_valid; 51 52 static void mpc85xx_give_timebase(void) 53 { 54 unsigned long flags; 55 56 local_irq_save(flags); 57 hard_irq_disable(); 58 59 while (!tb_req) 60 barrier(); 61 tb_req = 0; 62 63 qoriq_pm_ops->freeze_time_base(true); 64 #ifdef CONFIG_PPC64 65 /* 66 * e5500/e6500 have a workaround for erratum A-006958 in place 67 * that will reread the timebase until TBL is non-zero. 68 * That would be a bad thing when the timebase is frozen. 69 * 70 * Thus, we read it manually, and instead of checking that 71 * TBL is non-zero, we ensure that TB does not change. We don't 72 * do that for the main mftb implementation, because it requires 73 * a scratch register 74 */ 75 { 76 u64 prev; 77 78 asm volatile("mfspr %0, %1" : "=r" (timebase) : 79 "i" (SPRN_TBRL)); 80 81 do { 82 prev = timebase; 83 asm volatile("mfspr %0, %1" : "=r" (timebase) : 84 "i" (SPRN_TBRL)); 85 } while (prev != timebase); 86 } 87 #else 88 timebase = get_tb(); 89 #endif 90 mb(); 91 tb_valid = 1; 92 93 while (tb_valid) 94 barrier(); 95 96 qoriq_pm_ops->freeze_time_base(false); 97 98 local_irq_restore(flags); 99 } 100 101 static void mpc85xx_take_timebase(void) 102 { 103 unsigned long flags; 104 105 local_irq_save(flags); 106 hard_irq_disable(); 107 108 tb_req = 1; 109 while (!tb_valid) 110 barrier(); 111 112 set_tb(timebase >> 32, timebase & 0xffffffff); 113 isync(); 114 tb_valid = 0; 115 116 local_irq_restore(flags); 117 } 118 119 static void smp_85xx_mach_cpu_die(void) 120 { 121 unsigned int cpu = smp_processor_id(); 122 123 local_irq_disable(); 124 hard_irq_disable(); 125 /* mask all irqs to prevent cpu wakeup */ 126 qoriq_pm_ops->irq_mask(cpu); 127 128 idle_task_exit(); 129 130 mtspr(SPRN_TCR, 0); 131 mtspr(SPRN_TSR, mfspr(SPRN_TSR)); 132 133 generic_set_cpu_dead(cpu); 134 135 cur_cpu_spec->cpu_down_flush(); 136 137 qoriq_pm_ops->cpu_die(cpu); 138 139 while (1) 140 ; 141 } 142 143 static void qoriq_cpu_kill(unsigned int cpu) 144 { 145 int i; 146 147 for (i = 0; i < 500; i++) { 148 if (is_cpu_dead(cpu)) { 149 #ifdef CONFIG_PPC64 150 paca[cpu].cpu_start = 0; 151 #endif 152 return; 153 } 154 msleep(20); 155 } 156 pr_err("CPU%d didn't die...\n", cpu); 157 } 158 #endif 159 160 /* 161 * To keep it compatible with old boot program which uses 162 * cache-inhibit spin table, we need to flush the cache 163 * before accessing spin table to invalidate any staled data. 164 * We also need to flush the cache after writing to spin 165 * table to push data out. 166 */ 167 static inline void flush_spin_table(void *spin_table) 168 { 169 flush_dcache_range((ulong)spin_table, 170 (ulong)spin_table + sizeof(struct epapr_spin_table)); 171 } 172 173 static inline u32 read_spin_table_addr_l(void *spin_table) 174 { 175 flush_dcache_range((ulong)spin_table, 176 (ulong)spin_table + sizeof(struct epapr_spin_table)); 177 return in_be32(&((struct epapr_spin_table *)spin_table)->addr_l); 178 } 179 180 #ifdef CONFIG_PPC64 181 static void wake_hw_thread(void *info) 182 { 183 void fsl_secondary_thread_init(void); 184 unsigned long inia; 185 int cpu = *(const int *)info; 186 187 inia = *(unsigned long *)fsl_secondary_thread_init; 188 book3e_start_thread(cpu_thread_in_core(cpu), inia); 189 } 190 #endif 191 192 static int smp_85xx_start_cpu(int cpu) 193 { 194 int ret = 0; 195 struct device_node *np; 196 const u64 *cpu_rel_addr; 197 unsigned long flags; 198 int ioremappable; 199 int hw_cpu = get_hard_smp_processor_id(cpu); 200 struct epapr_spin_table __iomem *spin_table; 201 202 np = of_get_cpu_node(cpu, NULL); 203 cpu_rel_addr = of_get_property(np, "cpu-release-addr", NULL); 204 if (!cpu_rel_addr) { 205 pr_err("No cpu-release-addr for cpu %d\n", cpu); 206 return -ENOENT; 207 } 208 209 /* 210 * A secondary core could be in a spinloop in the bootpage 211 * (0xfffff000), somewhere in highmem, or somewhere in lowmem. 212 * The bootpage and highmem can be accessed via ioremap(), but 213 * we need to directly access the spinloop if its in lowmem. 214 */ 215 ioremappable = *cpu_rel_addr > virt_to_phys(high_memory); 216 217 /* Map the spin table */ 218 if (ioremappable) 219 spin_table = ioremap_prot(*cpu_rel_addr, 220 sizeof(struct epapr_spin_table), _PAGE_COHERENT); 221 else 222 spin_table = phys_to_virt(*cpu_rel_addr); 223 224 local_irq_save(flags); 225 hard_irq_disable(); 226 227 if (qoriq_pm_ops) 228 qoriq_pm_ops->cpu_up_prepare(cpu); 229 230 /* if cpu is not spinning, reset it */ 231 if (read_spin_table_addr_l(spin_table) != 1) { 232 /* 233 * We don't set the BPTR register here since it already points 234 * to the boot page properly. 235 */ 236 mpic_reset_core(cpu); 237 238 /* 239 * wait until core is ready... 240 * We need to invalidate the stale data, in case the boot 241 * loader uses a cache-inhibited spin table. 242 */ 243 if (!spin_event_timeout( 244 read_spin_table_addr_l(spin_table) == 1, 245 10000, 100)) { 246 pr_err("timeout waiting for cpu %d to reset\n", 247 hw_cpu); 248 ret = -EAGAIN; 249 goto err; 250 } 251 } 252 253 flush_spin_table(spin_table); 254 out_be32(&spin_table->pir, hw_cpu); 255 #ifdef CONFIG_PPC64 256 out_be64((u64 *)(&spin_table->addr_h), 257 __pa(ppc_function_entry(generic_secondary_smp_init))); 258 #else 259 out_be32(&spin_table->addr_l, __pa(__early_start)); 260 #endif 261 flush_spin_table(spin_table); 262 err: 263 local_irq_restore(flags); 264 265 if (ioremappable) 266 iounmap(spin_table); 267 268 return ret; 269 } 270 271 static int smp_85xx_kick_cpu(int nr) 272 { 273 int ret = 0; 274 #ifdef CONFIG_PPC64 275 int primary = nr; 276 #endif 277 278 WARN_ON(nr < 0 || nr >= num_possible_cpus()); 279 280 pr_debug("kick CPU #%d\n", nr); 281 282 #ifdef CONFIG_PPC64 283 if (threads_per_core == 2) { 284 if (WARN_ON_ONCE(!cpu_has_feature(CPU_FTR_SMT))) 285 return -ENOENT; 286 287 booting_thread_hwid = cpu_thread_in_core(nr); 288 primary = cpu_first_thread_sibling(nr); 289 290 if (qoriq_pm_ops) 291 qoriq_pm_ops->cpu_up_prepare(nr); 292 293 /* 294 * If either thread in the core is online, use it to start 295 * the other. 296 */ 297 if (cpu_online(primary)) { 298 smp_call_function_single(primary, 299 wake_hw_thread, &nr, 1); 300 goto done; 301 } else if (cpu_online(primary + 1)) { 302 smp_call_function_single(primary + 1, 303 wake_hw_thread, &nr, 1); 304 goto done; 305 } 306 307 /* 308 * If getting here, it means both threads in the core are 309 * offline. So start the primary thread, then it will start 310 * the thread specified in booting_thread_hwid, the one 311 * corresponding to nr. 312 */ 313 314 } else if (threads_per_core == 1) { 315 /* 316 * If one core has only one thread, set booting_thread_hwid to 317 * an invalid value. 318 */ 319 booting_thread_hwid = INVALID_THREAD_HWID; 320 321 } else if (threads_per_core > 2) { 322 pr_err("Do not support more than 2 threads per CPU."); 323 return -EINVAL; 324 } 325 326 ret = smp_85xx_start_cpu(primary); 327 if (ret) 328 return ret; 329 330 done: 331 paca[nr].cpu_start = 1; 332 generic_set_cpu_up(nr); 333 334 return ret; 335 #else 336 ret = smp_85xx_start_cpu(nr); 337 if (ret) 338 return ret; 339 340 generic_set_cpu_up(nr); 341 342 return ret; 343 #endif 344 } 345 346 struct smp_ops_t smp_85xx_ops = { 347 .kick_cpu = smp_85xx_kick_cpu, 348 .cpu_bootable = smp_generic_cpu_bootable, 349 #ifdef CONFIG_HOTPLUG_CPU 350 .cpu_disable = generic_cpu_disable, 351 .cpu_die = generic_cpu_die, 352 #endif 353 #if defined(CONFIG_KEXEC_CORE) && !defined(CONFIG_PPC64) 354 .give_timebase = smp_generic_give_timebase, 355 .take_timebase = smp_generic_take_timebase, 356 #endif 357 }; 358 359 #ifdef CONFIG_KEXEC_CORE 360 #ifdef CONFIG_PPC32 361 atomic_t kexec_down_cpus = ATOMIC_INIT(0); 362 363 void mpc85xx_smp_kexec_cpu_down(int crash_shutdown, int secondary) 364 { 365 local_irq_disable(); 366 367 if (secondary) { 368 cur_cpu_spec->cpu_down_flush(); 369 atomic_inc(&kexec_down_cpus); 370 /* loop forever */ 371 while (1); 372 } 373 } 374 375 static void mpc85xx_smp_kexec_down(void *arg) 376 { 377 if (ppc_md.kexec_cpu_down) 378 ppc_md.kexec_cpu_down(0,1); 379 } 380 #else 381 void mpc85xx_smp_kexec_cpu_down(int crash_shutdown, int secondary) 382 { 383 int cpu = smp_processor_id(); 384 int sibling = cpu_last_thread_sibling(cpu); 385 bool notified = false; 386 int disable_cpu; 387 int disable_threadbit = 0; 388 long start = mftb(); 389 long now; 390 391 local_irq_disable(); 392 hard_irq_disable(); 393 mpic_teardown_this_cpu(secondary); 394 395 if (cpu == crashing_cpu && cpu_thread_in_core(cpu) != 0) { 396 /* 397 * We enter the crash kernel on whatever cpu crashed, 398 * even if it's a secondary thread. If that's the case, 399 * disable the corresponding primary thread. 400 */ 401 disable_threadbit = 1; 402 disable_cpu = cpu_first_thread_sibling(cpu); 403 } else if (sibling != crashing_cpu && 404 cpu_thread_in_core(cpu) == 0 && 405 cpu_thread_in_core(sibling) != 0) { 406 disable_threadbit = 2; 407 disable_cpu = sibling; 408 } 409 410 if (disable_threadbit) { 411 while (paca[disable_cpu].kexec_state < KEXEC_STATE_REAL_MODE) { 412 barrier(); 413 now = mftb(); 414 if (!notified && now - start > 1000000) { 415 pr_info("%s/%d: waiting for cpu %d to enter KEXEC_STATE_REAL_MODE (%d)\n", 416 __func__, smp_processor_id(), 417 disable_cpu, 418 paca[disable_cpu].kexec_state); 419 notified = true; 420 } 421 } 422 423 if (notified) { 424 pr_info("%s: cpu %d done waiting\n", 425 __func__, disable_cpu); 426 } 427 428 mtspr(SPRN_TENC, disable_threadbit); 429 while (mfspr(SPRN_TENSR) & disable_threadbit) 430 cpu_relax(); 431 } 432 } 433 #endif 434 435 static void mpc85xx_smp_machine_kexec(struct kimage *image) 436 { 437 #ifdef CONFIG_PPC32 438 int timeout = INT_MAX; 439 int i, num_cpus = num_present_cpus(); 440 441 if (image->type == KEXEC_TYPE_DEFAULT) 442 smp_call_function(mpc85xx_smp_kexec_down, NULL, 0); 443 444 while ( (atomic_read(&kexec_down_cpus) != (num_cpus - 1)) && 445 ( timeout > 0 ) ) 446 { 447 timeout--; 448 } 449 450 if ( !timeout ) 451 printk(KERN_ERR "Unable to bring down secondary cpu(s)"); 452 453 for_each_online_cpu(i) 454 { 455 if ( i == smp_processor_id() ) continue; 456 mpic_reset_core(i); 457 } 458 #endif 459 460 default_machine_kexec(image); 461 } 462 #endif /* CONFIG_KEXEC_CORE */ 463 464 static void smp_85xx_basic_setup(int cpu_nr) 465 { 466 if (cpu_has_feature(CPU_FTR_DBELL)) 467 doorbell_setup_this_cpu(); 468 } 469 470 static void smp_85xx_setup_cpu(int cpu_nr) 471 { 472 mpic_setup_this_cpu(); 473 smp_85xx_basic_setup(cpu_nr); 474 } 475 476 void __init mpc85xx_smp_init(void) 477 { 478 struct device_node *np; 479 480 481 np = of_find_node_by_type(NULL, "open-pic"); 482 if (np) { 483 smp_85xx_ops.probe = smp_mpic_probe; 484 smp_85xx_ops.setup_cpu = smp_85xx_setup_cpu; 485 smp_85xx_ops.message_pass = smp_mpic_message_pass; 486 } else 487 smp_85xx_ops.setup_cpu = smp_85xx_basic_setup; 488 489 if (cpu_has_feature(CPU_FTR_DBELL)) { 490 /* 491 * If left NULL, .message_pass defaults to 492 * smp_muxed_ipi_message_pass 493 */ 494 smp_85xx_ops.message_pass = NULL; 495 smp_85xx_ops.cause_ipi = doorbell_cause_ipi; 496 smp_85xx_ops.probe = NULL; 497 } 498 499 #ifdef CONFIG_HOTPLUG_CPU 500 #ifdef CONFIG_FSL_CORENET_RCPM 501 fsl_rcpm_init(); 502 #endif 503 504 #ifdef CONFIG_FSL_PMC 505 mpc85xx_setup_pmc(); 506 #endif 507 if (qoriq_pm_ops) { 508 smp_85xx_ops.give_timebase = mpc85xx_give_timebase; 509 smp_85xx_ops.take_timebase = mpc85xx_take_timebase; 510 ppc_md.cpu_die = smp_85xx_mach_cpu_die; 511 smp_85xx_ops.cpu_die = qoriq_cpu_kill; 512 } 513 #endif 514 smp_ops = &smp_85xx_ops; 515 516 #ifdef CONFIG_KEXEC_CORE 517 ppc_md.kexec_cpu_down = mpc85xx_smp_kexec_cpu_down; 518 ppc_md.machine_kexec = mpc85xx_smp_machine_kexec; 519 #endif 520 } 521