1 /* 2 * Copyright (C) 2013 Imagination Technologies 3 * Author: Paul Burton <paul.burton@imgtec.com> 4 * 5 * This program is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License as published by the 7 * Free Software Foundation; either version 2 of the License, or (at your 8 * option) any later version. 9 */ 10 11 #include <linux/cpu.h> 12 #include <linux/delay.h> 13 #include <linux/io.h> 14 #include <linux/irqchip/mips-gic.h> 15 #include <linux/sched/task_stack.h> 16 #include <linux/sched/hotplug.h> 17 #include <linux/slab.h> 18 #include <linux/smp.h> 19 #include <linux/types.h> 20 21 #include <asm/bcache.h> 22 #include <asm/mips-cm.h> 23 #include <asm/mips-cpc.h> 24 #include <asm/mips_mt.h> 25 #include <asm/mipsregs.h> 26 #include <asm/pm-cps.h> 27 #include <asm/r4kcache.h> 28 #include <asm/smp-cps.h> 29 #include <asm/time.h> 30 #include <asm/uasm.h> 31 32 static bool threads_disabled; 33 static DECLARE_BITMAP(core_power, NR_CPUS); 34 35 struct core_boot_config *mips_cps_core_bootcfg; 36 37 static int __init setup_nothreads(char *s) 38 { 39 threads_disabled = true; 40 return 0; 41 } 42 early_param("nothreads", setup_nothreads); 43 44 static unsigned core_vpe_count(unsigned core) 45 { 46 unsigned cfg; 47 48 if (threads_disabled) 49 return 1; 50 51 if ((!IS_ENABLED(CONFIG_MIPS_MT_SMP) || !cpu_has_mipsmt) 52 && (!IS_ENABLED(CONFIG_CPU_MIPSR6) || !cpu_has_vp)) 53 return 1; 54 55 mips_cm_lock_other(core, 0); 56 cfg = read_gcr_co_config() & CM_GCR_Cx_CONFIG_PVPE_MSK; 57 mips_cm_unlock_other(); 58 return (cfg >> CM_GCR_Cx_CONFIG_PVPE_SHF) + 1; 59 } 60 61 static void __init cps_smp_setup(void) 62 { 63 unsigned int ncores, nvpes, core_vpes; 64 unsigned long core_entry; 65 int c, v; 66 67 /* Detect & record VPE topology */ 68 ncores = mips_cm_numcores(); 69 pr_info("%s topology ", cpu_has_mips_r6 ? "VP" : "VPE"); 70 for (c = nvpes = 0; c < ncores; c++) { 71 core_vpes = core_vpe_count(c); 72 pr_cont("%c%u", c ? ',' : '{', core_vpes); 73 74 /* Use the number of VPEs in core 0 for smp_num_siblings */ 75 if (!c) 76 smp_num_siblings = core_vpes; 77 78 for (v = 0; v < min_t(int, core_vpes, NR_CPUS - nvpes); v++) { 79 cpu_data[nvpes + v].core = c; 80 #if defined(CONFIG_MIPS_MT_SMP) || defined(CONFIG_CPU_MIPSR6) 81 cpu_data[nvpes + v].vpe_id = v; 82 #endif 83 } 84 85 nvpes += core_vpes; 86 } 87 pr_cont("} total %u\n", nvpes); 88 89 /* Indicate present CPUs (CPU being synonymous with VPE) */ 90 for (v = 0; v < min_t(unsigned, nvpes, NR_CPUS); v++) { 91 set_cpu_possible(v, true); 92 set_cpu_present(v, true); 93 __cpu_number_map[v] = v; 94 __cpu_logical_map[v] = v; 95 } 96 97 /* Set a coherent default CCA (CWB) */ 98 change_c0_config(CONF_CM_CMASK, 0x5); 99 100 /* Core 0 is powered up (we're running on it) */ 101 bitmap_set(core_power, 0, 1); 102 103 /* Initialise core 0 */ 104 mips_cps_core_init(); 105 106 /* Make core 0 coherent with everything */ 107 write_gcr_cl_coherence(0xff); 108 109 if (mips_cm_revision() >= CM_REV_CM3) { 110 core_entry = CKSEG1ADDR((unsigned long)mips_cps_core_entry); 111 write_gcr_bev_base(core_entry); 112 } 113 114 #ifdef CONFIG_MIPS_MT_FPAFF 115 /* If we have an FPU, enroll ourselves in the FPU-full mask */ 116 if (cpu_has_fpu) 117 cpumask_set_cpu(0, &mt_fpu_cpumask); 118 #endif /* CONFIG_MIPS_MT_FPAFF */ 119 } 120 121 static void __init cps_prepare_cpus(unsigned int max_cpus) 122 { 123 unsigned ncores, core_vpes, c, cca; 124 bool cca_unsuitable; 125 u32 *entry_code; 126 127 mips_mt_set_cpuoptions(); 128 129 /* Detect whether the CCA is unsuited to multi-core SMP */ 130 cca = read_c0_config() & CONF_CM_CMASK; 131 switch (cca) { 132 case 0x4: /* CWBE */ 133 case 0x5: /* CWB */ 134 /* The CCA is coherent, multi-core is fine */ 135 cca_unsuitable = false; 136 break; 137 138 default: 139 /* CCA is not coherent, multi-core is not usable */ 140 cca_unsuitable = true; 141 } 142 143 /* Warn the user if the CCA prevents multi-core */ 144 ncores = mips_cm_numcores(); 145 if (cca_unsuitable && ncores > 1) { 146 pr_warn("Using only one core due to unsuitable CCA 0x%x\n", 147 cca); 148 149 for_each_present_cpu(c) { 150 if (cpu_data[c].core) 151 set_cpu_present(c, false); 152 } 153 } 154 155 /* 156 * Patch the start of mips_cps_core_entry to provide: 157 * 158 * s0 = kseg0 CCA 159 */ 160 entry_code = (u32 *)&mips_cps_core_entry; 161 uasm_i_addiu(&entry_code, 16, 0, cca); 162 blast_dcache_range((unsigned long)&mips_cps_core_entry, 163 (unsigned long)entry_code); 164 bc_wback_inv((unsigned long)&mips_cps_core_entry, 165 (void *)entry_code - (void *)&mips_cps_core_entry); 166 __sync(); 167 168 /* Allocate core boot configuration structs */ 169 mips_cps_core_bootcfg = kcalloc(ncores, sizeof(*mips_cps_core_bootcfg), 170 GFP_KERNEL); 171 if (!mips_cps_core_bootcfg) { 172 pr_err("Failed to allocate boot config for %u cores\n", ncores); 173 goto err_out; 174 } 175 176 /* Allocate VPE boot configuration structs */ 177 for (c = 0; c < ncores; c++) { 178 core_vpes = core_vpe_count(c); 179 mips_cps_core_bootcfg[c].vpe_config = kcalloc(core_vpes, 180 sizeof(*mips_cps_core_bootcfg[c].vpe_config), 181 GFP_KERNEL); 182 if (!mips_cps_core_bootcfg[c].vpe_config) { 183 pr_err("Failed to allocate %u VPE boot configs\n", 184 core_vpes); 185 goto err_out; 186 } 187 } 188 189 /* Mark this CPU as booted */ 190 atomic_set(&mips_cps_core_bootcfg[current_cpu_data.core].vpe_mask, 191 1 << cpu_vpe_id(¤t_cpu_data)); 192 193 return; 194 err_out: 195 /* Clean up allocations */ 196 if (mips_cps_core_bootcfg) { 197 for (c = 0; c < ncores; c++) 198 kfree(mips_cps_core_bootcfg[c].vpe_config); 199 kfree(mips_cps_core_bootcfg); 200 mips_cps_core_bootcfg = NULL; 201 } 202 203 /* Effectively disable SMP by declaring CPUs not present */ 204 for_each_possible_cpu(c) { 205 if (c == 0) 206 continue; 207 set_cpu_present(c, false); 208 } 209 } 210 211 static void boot_core(unsigned int core, unsigned int vpe_id) 212 { 213 u32 access, stat, seq_state; 214 unsigned timeout; 215 216 /* Select the appropriate core */ 217 mips_cm_lock_other(core, 0); 218 219 /* Set its reset vector */ 220 write_gcr_co_reset_base(CKSEG1ADDR((unsigned long)mips_cps_core_entry)); 221 222 /* Ensure its coherency is disabled */ 223 write_gcr_co_coherence(0); 224 225 /* Start it with the legacy memory map and exception base */ 226 write_gcr_co_reset_ext_base(CM_GCR_RESET_EXT_BASE_UEB); 227 228 /* Ensure the core can access the GCRs */ 229 access = read_gcr_access(); 230 access |= 1 << (CM_GCR_ACCESS_ACCESSEN_SHF + core); 231 write_gcr_access(access); 232 233 if (mips_cpc_present()) { 234 /* Reset the core */ 235 mips_cpc_lock_other(core); 236 237 if (mips_cm_revision() >= CM_REV_CM3) { 238 /* Run only the requested VP following the reset */ 239 write_cpc_co_vp_stop(0xf); 240 write_cpc_co_vp_run(1 << vpe_id); 241 242 /* 243 * Ensure that the VP_RUN register is written before the 244 * core leaves reset. 245 */ 246 wmb(); 247 } 248 249 write_cpc_co_cmd(CPC_Cx_CMD_RESET); 250 251 timeout = 100; 252 while (true) { 253 stat = read_cpc_co_stat_conf(); 254 seq_state = stat & CPC_Cx_STAT_CONF_SEQSTATE_MSK; 255 256 /* U6 == coherent execution, ie. the core is up */ 257 if (seq_state == CPC_Cx_STAT_CONF_SEQSTATE_U6) 258 break; 259 260 /* Delay a little while before we start warning */ 261 if (timeout) { 262 timeout--; 263 mdelay(10); 264 continue; 265 } 266 267 pr_warn("Waiting for core %u to start... STAT_CONF=0x%x\n", 268 core, stat); 269 mdelay(1000); 270 } 271 272 mips_cpc_unlock_other(); 273 } else { 274 /* Take the core out of reset */ 275 write_gcr_co_reset_release(0); 276 } 277 278 mips_cm_unlock_other(); 279 280 /* The core is now powered up */ 281 bitmap_set(core_power, core, 1); 282 } 283 284 static void remote_vpe_boot(void *dummy) 285 { 286 unsigned core = current_cpu_data.core; 287 struct core_boot_config *core_cfg = &mips_cps_core_bootcfg[core]; 288 289 mips_cps_boot_vpes(core_cfg, cpu_vpe_id(¤t_cpu_data)); 290 } 291 292 static void cps_boot_secondary(int cpu, struct task_struct *idle) 293 { 294 unsigned core = cpu_data[cpu].core; 295 unsigned vpe_id = cpu_vpe_id(&cpu_data[cpu]); 296 struct core_boot_config *core_cfg = &mips_cps_core_bootcfg[core]; 297 struct vpe_boot_config *vpe_cfg = &core_cfg->vpe_config[vpe_id]; 298 unsigned long core_entry; 299 unsigned int remote; 300 int err; 301 302 vpe_cfg->pc = (unsigned long)&smp_bootstrap; 303 vpe_cfg->sp = __KSTK_TOS(idle); 304 vpe_cfg->gp = (unsigned long)task_thread_info(idle); 305 306 atomic_or(1 << cpu_vpe_id(&cpu_data[cpu]), &core_cfg->vpe_mask); 307 308 preempt_disable(); 309 310 if (!test_bit(core, core_power)) { 311 /* Boot a VPE on a powered down core */ 312 boot_core(core, vpe_id); 313 goto out; 314 } 315 316 if (cpu_has_vp) { 317 mips_cm_lock_other(core, vpe_id); 318 core_entry = CKSEG1ADDR((unsigned long)mips_cps_core_entry); 319 write_gcr_co_reset_base(core_entry); 320 mips_cm_unlock_other(); 321 } 322 323 if (core != current_cpu_data.core) { 324 /* Boot a VPE on another powered up core */ 325 for (remote = 0; remote < NR_CPUS; remote++) { 326 if (cpu_data[remote].core != core) 327 continue; 328 if (cpu_online(remote)) 329 break; 330 } 331 if (remote >= NR_CPUS) { 332 pr_crit("No online CPU in core %u to start CPU%d\n", 333 core, cpu); 334 goto out; 335 } 336 337 err = smp_call_function_single(remote, remote_vpe_boot, 338 NULL, 1); 339 if (err) 340 panic("Failed to call remote CPU\n"); 341 goto out; 342 } 343 344 BUG_ON(!cpu_has_mipsmt && !cpu_has_vp); 345 346 /* Boot a VPE on this core */ 347 mips_cps_boot_vpes(core_cfg, vpe_id); 348 out: 349 preempt_enable(); 350 } 351 352 static void cps_init_secondary(void) 353 { 354 /* Disable MT - we only want to run 1 TC per VPE */ 355 if (cpu_has_mipsmt) 356 dmt(); 357 358 if (mips_cm_revision() >= CM_REV_CM3) { 359 unsigned ident = gic_read_local_vp_id(); 360 361 /* 362 * Ensure that our calculation of the VP ID matches up with 363 * what the GIC reports, otherwise we'll have configured 364 * interrupts incorrectly. 365 */ 366 BUG_ON(ident != mips_cm_vp_id(smp_processor_id())); 367 } 368 369 if (cpu_has_veic) 370 clear_c0_status(ST0_IM); 371 else 372 change_c0_status(ST0_IM, STATUSF_IP2 | STATUSF_IP3 | 373 STATUSF_IP4 | STATUSF_IP5 | 374 STATUSF_IP6 | STATUSF_IP7); 375 } 376 377 static void cps_smp_finish(void) 378 { 379 write_c0_compare(read_c0_count() + (8 * mips_hpt_frequency / HZ)); 380 381 #ifdef CONFIG_MIPS_MT_FPAFF 382 /* If we have an FPU, enroll ourselves in the FPU-full mask */ 383 if (cpu_has_fpu) 384 cpumask_set_cpu(smp_processor_id(), &mt_fpu_cpumask); 385 #endif /* CONFIG_MIPS_MT_FPAFF */ 386 387 local_irq_enable(); 388 } 389 390 #ifdef CONFIG_HOTPLUG_CPU 391 392 static int cps_cpu_disable(void) 393 { 394 unsigned cpu = smp_processor_id(); 395 struct core_boot_config *core_cfg; 396 397 if (!cpu) 398 return -EBUSY; 399 400 if (!cps_pm_support_state(CPS_PM_POWER_GATED)) 401 return -EINVAL; 402 403 core_cfg = &mips_cps_core_bootcfg[current_cpu_data.core]; 404 atomic_sub(1 << cpu_vpe_id(¤t_cpu_data), &core_cfg->vpe_mask); 405 smp_mb__after_atomic(); 406 set_cpu_online(cpu, false); 407 calculate_cpu_foreign_map(); 408 409 return 0; 410 } 411 412 static unsigned cpu_death_sibling; 413 static enum { 414 CPU_DEATH_HALT, 415 CPU_DEATH_POWER, 416 } cpu_death; 417 418 void play_dead(void) 419 { 420 unsigned int cpu, core, vpe_id; 421 422 local_irq_disable(); 423 idle_task_exit(); 424 cpu = smp_processor_id(); 425 core = cpu_data[cpu].core; 426 cpu_death = CPU_DEATH_POWER; 427 428 pr_debug("CPU%d going offline\n", cpu); 429 430 if (cpu_has_mipsmt || cpu_has_vp) { 431 /* Look for another online VPE within the core */ 432 for_each_online_cpu(cpu_death_sibling) { 433 if (cpu_data[cpu_death_sibling].core != core) 434 continue; 435 436 /* 437 * There is an online VPE within the core. Just halt 438 * this TC and leave the core alone. 439 */ 440 cpu_death = CPU_DEATH_HALT; 441 break; 442 } 443 } 444 445 /* This CPU has chosen its way out */ 446 (void)cpu_report_death(); 447 448 if (cpu_death == CPU_DEATH_HALT) { 449 vpe_id = cpu_vpe_id(&cpu_data[cpu]); 450 451 pr_debug("Halting core %d VP%d\n", core, vpe_id); 452 if (cpu_has_mipsmt) { 453 /* Halt this TC */ 454 write_c0_tchalt(TCHALT_H); 455 instruction_hazard(); 456 } else if (cpu_has_vp) { 457 write_cpc_cl_vp_stop(1 << vpe_id); 458 459 /* Ensure that the VP_STOP register is written */ 460 wmb(); 461 } 462 } else { 463 pr_debug("Gating power to core %d\n", core); 464 /* Power down the core */ 465 cps_pm_enter_state(CPS_PM_POWER_GATED); 466 } 467 468 /* This should never be reached */ 469 panic("Failed to offline CPU %u", cpu); 470 } 471 472 static void wait_for_sibling_halt(void *ptr_cpu) 473 { 474 unsigned cpu = (unsigned long)ptr_cpu; 475 unsigned vpe_id = cpu_vpe_id(&cpu_data[cpu]); 476 unsigned halted; 477 unsigned long flags; 478 479 do { 480 local_irq_save(flags); 481 settc(vpe_id); 482 halted = read_tc_c0_tchalt(); 483 local_irq_restore(flags); 484 } while (!(halted & TCHALT_H)); 485 } 486 487 static void cps_cpu_die(unsigned int cpu) 488 { 489 unsigned core = cpu_data[cpu].core; 490 unsigned int vpe_id = cpu_vpe_id(&cpu_data[cpu]); 491 unsigned stat; 492 int err; 493 494 /* Wait for the cpu to choose its way out */ 495 if (!cpu_wait_death(cpu, 5)) { 496 pr_err("CPU%u: didn't offline\n", cpu); 497 return; 498 } 499 500 /* 501 * Now wait for the CPU to actually offline. Without doing this that 502 * offlining may race with one or more of: 503 * 504 * - Onlining the CPU again. 505 * - Powering down the core if another VPE within it is offlined. 506 * - A sibling VPE entering a non-coherent state. 507 * 508 * In the non-MT halt case (ie. infinite loop) the CPU is doing nothing 509 * with which we could race, so do nothing. 510 */ 511 if (cpu_death == CPU_DEATH_POWER) { 512 /* 513 * Wait for the core to enter a powered down or clock gated 514 * state, the latter happening when a JTAG probe is connected 515 * in which case the CPC will refuse to power down the core. 516 */ 517 do { 518 mips_cm_lock_other(core, 0); 519 mips_cpc_lock_other(core); 520 stat = read_cpc_co_stat_conf(); 521 stat &= CPC_Cx_STAT_CONF_SEQSTATE_MSK; 522 mips_cpc_unlock_other(); 523 mips_cm_unlock_other(); 524 } while (stat != CPC_Cx_STAT_CONF_SEQSTATE_D0 && 525 stat != CPC_Cx_STAT_CONF_SEQSTATE_D2 && 526 stat != CPC_Cx_STAT_CONF_SEQSTATE_U2); 527 528 /* Indicate the core is powered off */ 529 bitmap_clear(core_power, core, 1); 530 } else if (cpu_has_mipsmt) { 531 /* 532 * Have a CPU with access to the offlined CPUs registers wait 533 * for its TC to halt. 534 */ 535 err = smp_call_function_single(cpu_death_sibling, 536 wait_for_sibling_halt, 537 (void *)(unsigned long)cpu, 1); 538 if (err) 539 panic("Failed to call remote sibling CPU\n"); 540 } else if (cpu_has_vp) { 541 do { 542 mips_cm_lock_other(core, vpe_id); 543 stat = read_cpc_co_vp_running(); 544 mips_cm_unlock_other(); 545 } while (stat & (1 << vpe_id)); 546 } 547 } 548 549 #endif /* CONFIG_HOTPLUG_CPU */ 550 551 static struct plat_smp_ops cps_smp_ops = { 552 .smp_setup = cps_smp_setup, 553 .prepare_cpus = cps_prepare_cpus, 554 .boot_secondary = cps_boot_secondary, 555 .init_secondary = cps_init_secondary, 556 .smp_finish = cps_smp_finish, 557 .send_ipi_single = mips_smp_send_ipi_single, 558 .send_ipi_mask = mips_smp_send_ipi_mask, 559 #ifdef CONFIG_HOTPLUG_CPU 560 .cpu_disable = cps_cpu_disable, 561 .cpu_die = cps_cpu_die, 562 #endif 563 }; 564 565 bool mips_cps_smp_in_use(void) 566 { 567 extern struct plat_smp_ops *mp_ops; 568 return mp_ops == &cps_smp_ops; 569 } 570 571 int register_cps_smp_ops(void) 572 { 573 if (!mips_cm_present()) { 574 pr_warn("MIPS CPS SMP unable to proceed without a CM\n"); 575 return -ENODEV; 576 } 577 578 /* check we have a GIC - we need one for IPIs */ 579 if (!(read_gcr_gic_status() & CM_GCR_GIC_STATUS_EX_MSK)) { 580 pr_warn("MIPS CPS SMP unable to proceed without a GIC\n"); 581 return -ENODEV; 582 } 583 584 register_smp_ops(&cps_smp_ops); 585 return 0; 586 } 587