1 /* 2 * Copyright (C) 2013 Imagination Technologies 3 * Author: Paul Burton <paul.burton@imgtec.com> 4 * 5 * This program is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License as published by the 7 * Free Software Foundation; either version 2 of the License, or (at your 8 * option) any later version. 9 */ 10 11 #include <linux/io.h> 12 #include <linux/sched.h> 13 #include <linux/slab.h> 14 #include <linux/smp.h> 15 #include <linux/types.h> 16 17 #include <asm/bcache.h> 18 #include <asm/gic.h> 19 #include <asm/mips-cm.h> 20 #include <asm/mips-cpc.h> 21 #include <asm/mips_mt.h> 22 #include <asm/mipsregs.h> 23 #include <asm/pm-cps.h> 24 #include <asm/r4kcache.h> 25 #include <asm/smp-cps.h> 26 #include <asm/time.h> 27 #include <asm/uasm.h> 28 29 static DECLARE_BITMAP(core_power, NR_CPUS); 30 31 struct core_boot_config *mips_cps_core_bootcfg; 32 33 static unsigned core_vpe_count(unsigned core) 34 { 35 unsigned cfg; 36 37 if (!config_enabled(CONFIG_MIPS_MT_SMP) || !cpu_has_mipsmt) 38 return 1; 39 40 write_gcr_cl_other(core << CM_GCR_Cx_OTHER_CORENUM_SHF); 41 cfg = read_gcr_co_config() & CM_GCR_Cx_CONFIG_PVPE_MSK; 42 return (cfg >> CM_GCR_Cx_CONFIG_PVPE_SHF) + 1; 43 } 44 45 static void __init cps_smp_setup(void) 46 { 47 unsigned int ncores, nvpes, core_vpes; 48 int c, v; 49 50 /* Detect & record VPE topology */ 51 ncores = mips_cm_numcores(); 52 pr_info("VPE topology "); 53 for (c = nvpes = 0; c < ncores; c++) { 54 core_vpes = core_vpe_count(c); 55 pr_cont("%c%u", c ? ',' : '{', core_vpes); 56 57 /* Use the number of VPEs in core 0 for smp_num_siblings */ 58 if (!c) 59 smp_num_siblings = core_vpes; 60 61 for (v = 0; v < min_t(int, core_vpes, NR_CPUS - nvpes); v++) { 62 cpu_data[nvpes + v].core = c; 63 #ifdef CONFIG_MIPS_MT_SMP 64 cpu_data[nvpes + v].vpe_id = v; 65 #endif 66 } 67 68 nvpes += core_vpes; 69 } 70 pr_cont("} total %u\n", nvpes); 71 72 /* Indicate present CPUs (CPU being synonymous with VPE) */ 73 for (v = 0; v < min_t(unsigned, nvpes, NR_CPUS); v++) { 74 set_cpu_possible(v, true); 75 set_cpu_present(v, true); 76 __cpu_number_map[v] = v; 77 __cpu_logical_map[v] = v; 78 } 79 80 /* Set a coherent default CCA (CWB) */ 81 change_c0_config(CONF_CM_CMASK, 0x5); 82 83 /* Core 0 is powered up (we're running on it) */ 84 bitmap_set(core_power, 0, 1); 85 86 /* Initialise core 0 */ 87 mips_cps_core_init(); 88 89 /* Make core 0 coherent with everything */ 90 write_gcr_cl_coherence(0xff); 91 } 92 93 static void __init cps_prepare_cpus(unsigned int max_cpus) 94 { 95 unsigned ncores, core_vpes, c, cca; 96 bool cca_unsuitable; 97 u32 *entry_code; 98 99 mips_mt_set_cpuoptions(); 100 101 /* Detect whether the CCA is unsuited to multi-core SMP */ 102 cca = read_c0_config() & CONF_CM_CMASK; 103 switch (cca) { 104 case 0x4: /* CWBE */ 105 case 0x5: /* CWB */ 106 /* The CCA is coherent, multi-core is fine */ 107 cca_unsuitable = false; 108 break; 109 110 default: 111 /* CCA is not coherent, multi-core is not usable */ 112 cca_unsuitable = true; 113 } 114 115 /* Warn the user if the CCA prevents multi-core */ 116 ncores = mips_cm_numcores(); 117 if (cca_unsuitable && ncores > 1) { 118 pr_warn("Using only one core due to unsuitable CCA 0x%x\n", 119 cca); 120 121 for_each_present_cpu(c) { 122 if (cpu_data[c].core) 123 set_cpu_present(c, false); 124 } 125 } 126 127 /* 128 * Patch the start of mips_cps_core_entry to provide: 129 * 130 * v0 = CM base address 131 * s0 = kseg0 CCA 132 */ 133 entry_code = (u32 *)&mips_cps_core_entry; 134 UASM_i_LA(&entry_code, 3, (long)mips_cm_base); 135 uasm_i_addiu(&entry_code, 16, 0, cca); 136 blast_dcache_range((unsigned long)&mips_cps_core_entry, 137 (unsigned long)entry_code); 138 bc_wback_inv((unsigned long)&mips_cps_core_entry, 139 (void *)entry_code - (void *)&mips_cps_core_entry); 140 __sync(); 141 142 /* Allocate core boot configuration structs */ 143 mips_cps_core_bootcfg = kcalloc(ncores, sizeof(*mips_cps_core_bootcfg), 144 GFP_KERNEL); 145 if (!mips_cps_core_bootcfg) { 146 pr_err("Failed to allocate boot config for %u cores\n", ncores); 147 goto err_out; 148 } 149 150 /* Allocate VPE boot configuration structs */ 151 for (c = 0; c < ncores; c++) { 152 core_vpes = core_vpe_count(c); 153 mips_cps_core_bootcfg[c].vpe_config = kcalloc(core_vpes, 154 sizeof(*mips_cps_core_bootcfg[c].vpe_config), 155 GFP_KERNEL); 156 if (!mips_cps_core_bootcfg[c].vpe_config) { 157 pr_err("Failed to allocate %u VPE boot configs\n", 158 core_vpes); 159 goto err_out; 160 } 161 } 162 163 /* Mark this CPU as booted */ 164 atomic_set(&mips_cps_core_bootcfg[current_cpu_data.core].vpe_mask, 165 1 << cpu_vpe_id(¤t_cpu_data)); 166 167 return; 168 err_out: 169 /* Clean up allocations */ 170 if (mips_cps_core_bootcfg) { 171 for (c = 0; c < ncores; c++) 172 kfree(mips_cps_core_bootcfg[c].vpe_config); 173 kfree(mips_cps_core_bootcfg); 174 mips_cps_core_bootcfg = NULL; 175 } 176 177 /* Effectively disable SMP by declaring CPUs not present */ 178 for_each_possible_cpu(c) { 179 if (c == 0) 180 continue; 181 set_cpu_present(c, false); 182 } 183 } 184 185 static void boot_core(unsigned core) 186 { 187 u32 access; 188 189 /* Select the appropriate core */ 190 write_gcr_cl_other(core << CM_GCR_Cx_OTHER_CORENUM_SHF); 191 192 /* Set its reset vector */ 193 write_gcr_co_reset_base(CKSEG1ADDR((unsigned long)mips_cps_core_entry)); 194 195 /* Ensure its coherency is disabled */ 196 write_gcr_co_coherence(0); 197 198 /* Ensure the core can access the GCRs */ 199 access = read_gcr_access(); 200 access |= 1 << (CM_GCR_ACCESS_ACCESSEN_SHF + core); 201 write_gcr_access(access); 202 203 if (mips_cpc_present()) { 204 /* Reset the core */ 205 mips_cpc_lock_other(core); 206 write_cpc_co_cmd(CPC_Cx_CMD_RESET); 207 mips_cpc_unlock_other(); 208 } else { 209 /* Take the core out of reset */ 210 write_gcr_co_reset_release(0); 211 } 212 213 /* The core is now powered up */ 214 bitmap_set(core_power, core, 1); 215 } 216 217 static void remote_vpe_boot(void *dummy) 218 { 219 mips_cps_boot_vpes(); 220 } 221 222 static void cps_boot_secondary(int cpu, struct task_struct *idle) 223 { 224 unsigned core = cpu_data[cpu].core; 225 unsigned vpe_id = cpu_vpe_id(&cpu_data[cpu]); 226 struct core_boot_config *core_cfg = &mips_cps_core_bootcfg[core]; 227 struct vpe_boot_config *vpe_cfg = &core_cfg->vpe_config[vpe_id]; 228 unsigned int remote; 229 int err; 230 231 vpe_cfg->pc = (unsigned long)&smp_bootstrap; 232 vpe_cfg->sp = __KSTK_TOS(idle); 233 vpe_cfg->gp = (unsigned long)task_thread_info(idle); 234 235 atomic_or(1 << cpu_vpe_id(&cpu_data[cpu]), &core_cfg->vpe_mask); 236 237 preempt_disable(); 238 239 if (!test_bit(core, core_power)) { 240 /* Boot a VPE on a powered down core */ 241 boot_core(core); 242 goto out; 243 } 244 245 if (core != current_cpu_data.core) { 246 /* Boot a VPE on another powered up core */ 247 for (remote = 0; remote < NR_CPUS; remote++) { 248 if (cpu_data[remote].core != core) 249 continue; 250 if (cpu_online(remote)) 251 break; 252 } 253 BUG_ON(remote >= NR_CPUS); 254 255 err = smp_call_function_single(remote, remote_vpe_boot, 256 NULL, 1); 257 if (err) 258 panic("Failed to call remote CPU\n"); 259 goto out; 260 } 261 262 BUG_ON(!cpu_has_mipsmt); 263 264 /* Boot a VPE on this core */ 265 mips_cps_boot_vpes(); 266 out: 267 preempt_enable(); 268 } 269 270 static void cps_init_secondary(void) 271 { 272 /* Disable MT - we only want to run 1 TC per VPE */ 273 if (cpu_has_mipsmt) 274 dmt(); 275 276 change_c0_status(ST0_IM, STATUSF_IP3 | STATUSF_IP4 | 277 STATUSF_IP6 | STATUSF_IP7); 278 } 279 280 static void cps_smp_finish(void) 281 { 282 write_c0_compare(read_c0_count() + (8 * mips_hpt_frequency / HZ)); 283 284 #ifdef CONFIG_MIPS_MT_FPAFF 285 /* If we have an FPU, enroll ourselves in the FPU-full mask */ 286 if (cpu_has_fpu) 287 cpu_set(smp_processor_id(), mt_fpu_cpumask); 288 #endif /* CONFIG_MIPS_MT_FPAFF */ 289 290 local_irq_enable(); 291 } 292 293 #ifdef CONFIG_HOTPLUG_CPU 294 295 static int cps_cpu_disable(void) 296 { 297 unsigned cpu = smp_processor_id(); 298 struct core_boot_config *core_cfg; 299 300 if (!cpu) 301 return -EBUSY; 302 303 if (!cps_pm_support_state(CPS_PM_POWER_GATED)) 304 return -EINVAL; 305 306 core_cfg = &mips_cps_core_bootcfg[current_cpu_data.core]; 307 atomic_sub(1 << cpu_vpe_id(¤t_cpu_data), &core_cfg->vpe_mask); 308 smp_mb__after_atomic(); 309 set_cpu_online(cpu, false); 310 cpu_clear(cpu, cpu_callin_map); 311 312 return 0; 313 } 314 315 static DECLARE_COMPLETION(cpu_death_chosen); 316 static unsigned cpu_death_sibling; 317 static enum { 318 CPU_DEATH_HALT, 319 CPU_DEATH_POWER, 320 } cpu_death; 321 322 void play_dead(void) 323 { 324 unsigned cpu, core; 325 326 local_irq_disable(); 327 idle_task_exit(); 328 cpu = smp_processor_id(); 329 cpu_death = CPU_DEATH_POWER; 330 331 if (cpu_has_mipsmt) { 332 core = cpu_data[cpu].core; 333 334 /* Look for another online VPE within the core */ 335 for_each_online_cpu(cpu_death_sibling) { 336 if (cpu_data[cpu_death_sibling].core != core) 337 continue; 338 339 /* 340 * There is an online VPE within the core. Just halt 341 * this TC and leave the core alone. 342 */ 343 cpu_death = CPU_DEATH_HALT; 344 break; 345 } 346 } 347 348 /* This CPU has chosen its way out */ 349 complete(&cpu_death_chosen); 350 351 if (cpu_death == CPU_DEATH_HALT) { 352 /* Halt this TC */ 353 write_c0_tchalt(TCHALT_H); 354 instruction_hazard(); 355 } else { 356 /* Power down the core */ 357 cps_pm_enter_state(CPS_PM_POWER_GATED); 358 } 359 360 /* This should never be reached */ 361 panic("Failed to offline CPU %u", cpu); 362 } 363 364 static void wait_for_sibling_halt(void *ptr_cpu) 365 { 366 unsigned cpu = (unsigned)ptr_cpu; 367 unsigned vpe_id = cpu_vpe_id(&cpu_data[cpu]); 368 unsigned halted; 369 unsigned long flags; 370 371 do { 372 local_irq_save(flags); 373 settc(vpe_id); 374 halted = read_tc_c0_tchalt(); 375 local_irq_restore(flags); 376 } while (!(halted & TCHALT_H)); 377 } 378 379 static void cps_cpu_die(unsigned int cpu) 380 { 381 unsigned core = cpu_data[cpu].core; 382 unsigned stat; 383 int err; 384 385 /* Wait for the cpu to choose its way out */ 386 if (!wait_for_completion_timeout(&cpu_death_chosen, 387 msecs_to_jiffies(5000))) { 388 pr_err("CPU%u: didn't offline\n", cpu); 389 return; 390 } 391 392 /* 393 * Now wait for the CPU to actually offline. Without doing this that 394 * offlining may race with one or more of: 395 * 396 * - Onlining the CPU again. 397 * - Powering down the core if another VPE within it is offlined. 398 * - A sibling VPE entering a non-coherent state. 399 * 400 * In the non-MT halt case (ie. infinite loop) the CPU is doing nothing 401 * with which we could race, so do nothing. 402 */ 403 if (cpu_death == CPU_DEATH_POWER) { 404 /* 405 * Wait for the core to enter a powered down or clock gated 406 * state, the latter happening when a JTAG probe is connected 407 * in which case the CPC will refuse to power down the core. 408 */ 409 do { 410 mips_cpc_lock_other(core); 411 stat = read_cpc_co_stat_conf(); 412 stat &= CPC_Cx_STAT_CONF_SEQSTATE_MSK; 413 mips_cpc_unlock_other(); 414 } while (stat != CPC_Cx_STAT_CONF_SEQSTATE_D0 && 415 stat != CPC_Cx_STAT_CONF_SEQSTATE_D2 && 416 stat != CPC_Cx_STAT_CONF_SEQSTATE_U2); 417 418 /* Indicate the core is powered off */ 419 bitmap_clear(core_power, core, 1); 420 } else if (cpu_has_mipsmt) { 421 /* 422 * Have a CPU with access to the offlined CPUs registers wait 423 * for its TC to halt. 424 */ 425 err = smp_call_function_single(cpu_death_sibling, 426 wait_for_sibling_halt, 427 (void *)cpu, 1); 428 if (err) 429 panic("Failed to call remote sibling CPU\n"); 430 } 431 } 432 433 #endif /* CONFIG_HOTPLUG_CPU */ 434 435 static struct plat_smp_ops cps_smp_ops = { 436 .smp_setup = cps_smp_setup, 437 .prepare_cpus = cps_prepare_cpus, 438 .boot_secondary = cps_boot_secondary, 439 .init_secondary = cps_init_secondary, 440 .smp_finish = cps_smp_finish, 441 .send_ipi_single = gic_send_ipi_single, 442 .send_ipi_mask = gic_send_ipi_mask, 443 #ifdef CONFIG_HOTPLUG_CPU 444 .cpu_disable = cps_cpu_disable, 445 .cpu_die = cps_cpu_die, 446 #endif 447 }; 448 449 bool mips_cps_smp_in_use(void) 450 { 451 extern struct plat_smp_ops *mp_ops; 452 return mp_ops == &cps_smp_ops; 453 } 454 455 int register_cps_smp_ops(void) 456 { 457 if (!mips_cm_present()) { 458 pr_warn("MIPS CPS SMP unable to proceed without a CM\n"); 459 return -ENODEV; 460 } 461 462 /* check we have a GIC - we need one for IPIs */ 463 if (!(read_gcr_gic_status() & CM_GCR_GIC_STATUS_EX_MSK)) { 464 pr_warn("MIPS CPS SMP unable to proceed without a GIC\n"); 465 return -ENODEV; 466 } 467 468 register_smp_ops(&cps_smp_ops); 469 return 0; 470 } 471