xref: /openbmc/linux/arch/mips/kernel/smp-cps.c (revision a977d045)
1 /*
2  * Copyright (C) 2013 Imagination Technologies
3  * Author: Paul Burton <paul.burton@imgtec.com>
4  *
5  * This program is free software; you can redistribute it and/or modify it
6  * under the terms of the GNU General Public License as published by the
7  * Free Software Foundation;  either version 2 of the  License, or (at your
8  * option) any later version.
9  */
10 
11 #include <linux/cpu.h>
12 #include <linux/delay.h>
13 #include <linux/io.h>
14 #include <linux/irqchip/mips-gic.h>
15 #include <linux/sched/task_stack.h>
16 #include <linux/sched/hotplug.h>
17 #include <linux/slab.h>
18 #include <linux/smp.h>
19 #include <linux/types.h>
20 
21 #include <asm/bcache.h>
22 #include <asm/mips-cm.h>
23 #include <asm/mips-cpc.h>
24 #include <asm/mips_mt.h>
25 #include <asm/mipsregs.h>
26 #include <asm/pm-cps.h>
27 #include <asm/r4kcache.h>
28 #include <asm/smp-cps.h>
29 #include <asm/time.h>
30 #include <asm/uasm.h>
31 
32 static bool threads_disabled;
33 static DECLARE_BITMAP(core_power, NR_CPUS);
34 
35 struct core_boot_config *mips_cps_core_bootcfg;
36 
37 static int __init setup_nothreads(char *s)
38 {
39 	threads_disabled = true;
40 	return 0;
41 }
42 early_param("nothreads", setup_nothreads);
43 
44 static unsigned core_vpe_count(unsigned core)
45 {
46 	unsigned cfg;
47 
48 	if (threads_disabled)
49 		return 1;
50 
51 	if ((!IS_ENABLED(CONFIG_MIPS_MT_SMP) || !cpu_has_mipsmt)
52 		&& (!IS_ENABLED(CONFIG_CPU_MIPSR6) || !cpu_has_vp))
53 		return 1;
54 
55 	mips_cm_lock_other(core, 0);
56 	cfg = read_gcr_co_config() & CM_GCR_Cx_CONFIG_PVPE_MSK;
57 	mips_cm_unlock_other();
58 	return (cfg >> CM_GCR_Cx_CONFIG_PVPE_SHF) + 1;
59 }
60 
61 static void __init cps_smp_setup(void)
62 {
63 	unsigned int ncores, nvpes, core_vpes;
64 	unsigned long core_entry;
65 	int c, v;
66 
67 	/* Detect & record VPE topology */
68 	ncores = mips_cm_numcores();
69 	pr_info("%s topology ", cpu_has_mips_r6 ? "VP" : "VPE");
70 	for (c = nvpes = 0; c < ncores; c++) {
71 		core_vpes = core_vpe_count(c);
72 		pr_cont("%c%u", c ? ',' : '{', core_vpes);
73 
74 		/* Use the number of VPEs in core 0 for smp_num_siblings */
75 		if (!c)
76 			smp_num_siblings = core_vpes;
77 
78 		for (v = 0; v < min_t(int, core_vpes, NR_CPUS - nvpes); v++) {
79 			cpu_data[nvpes + v].core = c;
80 #if defined(CONFIG_MIPS_MT_SMP) || defined(CONFIG_CPU_MIPSR6)
81 			cpu_data[nvpes + v].vpe_id = v;
82 #endif
83 		}
84 
85 		nvpes += core_vpes;
86 	}
87 	pr_cont("} total %u\n", nvpes);
88 
89 	/* Indicate present CPUs (CPU being synonymous with VPE) */
90 	for (v = 0; v < min_t(unsigned, nvpes, NR_CPUS); v++) {
91 		set_cpu_possible(v, true);
92 		set_cpu_present(v, true);
93 		__cpu_number_map[v] = v;
94 		__cpu_logical_map[v] = v;
95 	}
96 
97 	/* Set a coherent default CCA (CWB) */
98 	change_c0_config(CONF_CM_CMASK, 0x5);
99 
100 	/* Core 0 is powered up (we're running on it) */
101 	bitmap_set(core_power, 0, 1);
102 
103 	/* Initialise core 0 */
104 	mips_cps_core_init();
105 
106 	/* Make core 0 coherent with everything */
107 	write_gcr_cl_coherence(0xff);
108 
109 	if (mips_cm_revision() >= CM_REV_CM3) {
110 		core_entry = CKSEG1ADDR((unsigned long)mips_cps_core_entry);
111 		write_gcr_bev_base(core_entry);
112 	}
113 
114 #ifdef CONFIG_MIPS_MT_FPAFF
115 	/* If we have an FPU, enroll ourselves in the FPU-full mask */
116 	if (cpu_has_fpu)
117 		cpumask_set_cpu(0, &mt_fpu_cpumask);
118 #endif /* CONFIG_MIPS_MT_FPAFF */
119 }
120 
121 static void __init cps_prepare_cpus(unsigned int max_cpus)
122 {
123 	unsigned ncores, core_vpes, c, cca;
124 	bool cca_unsuitable;
125 	u32 *entry_code;
126 
127 	mips_mt_set_cpuoptions();
128 
129 	/* Detect whether the CCA is unsuited to multi-core SMP */
130 	cca = read_c0_config() & CONF_CM_CMASK;
131 	switch (cca) {
132 	case 0x4: /* CWBE */
133 	case 0x5: /* CWB */
134 		/* The CCA is coherent, multi-core is fine */
135 		cca_unsuitable = false;
136 		break;
137 
138 	default:
139 		/* CCA is not coherent, multi-core is not usable */
140 		cca_unsuitable = true;
141 	}
142 
143 	/* Warn the user if the CCA prevents multi-core */
144 	ncores = mips_cm_numcores();
145 	if ((cca_unsuitable || cpu_has_dc_aliases) && ncores > 1) {
146 		pr_warn("Using only one core due to %s%s%s\n",
147 			cca_unsuitable ? "unsuitable CCA" : "",
148 			(cca_unsuitable && cpu_has_dc_aliases) ? " & " : "",
149 			cpu_has_dc_aliases ? "dcache aliasing" : "");
150 
151 		for_each_present_cpu(c) {
152 			if (cpu_data[c].core)
153 				set_cpu_present(c, false);
154 		}
155 	}
156 
157 	/*
158 	 * Patch the start of mips_cps_core_entry to provide:
159 	 *
160 	 * s0 = kseg0 CCA
161 	 */
162 	entry_code = (u32 *)&mips_cps_core_entry;
163 	uasm_i_addiu(&entry_code, 16, 0, cca);
164 	blast_dcache_range((unsigned long)&mips_cps_core_entry,
165 			   (unsigned long)entry_code);
166 	bc_wback_inv((unsigned long)&mips_cps_core_entry,
167 		     (void *)entry_code - (void *)&mips_cps_core_entry);
168 	__sync();
169 
170 	/* Allocate core boot configuration structs */
171 	mips_cps_core_bootcfg = kcalloc(ncores, sizeof(*mips_cps_core_bootcfg),
172 					GFP_KERNEL);
173 	if (!mips_cps_core_bootcfg) {
174 		pr_err("Failed to allocate boot config for %u cores\n", ncores);
175 		goto err_out;
176 	}
177 
178 	/* Allocate VPE boot configuration structs */
179 	for (c = 0; c < ncores; c++) {
180 		core_vpes = core_vpe_count(c);
181 		mips_cps_core_bootcfg[c].vpe_config = kcalloc(core_vpes,
182 				sizeof(*mips_cps_core_bootcfg[c].vpe_config),
183 				GFP_KERNEL);
184 		if (!mips_cps_core_bootcfg[c].vpe_config) {
185 			pr_err("Failed to allocate %u VPE boot configs\n",
186 			       core_vpes);
187 			goto err_out;
188 		}
189 	}
190 
191 	/* Mark this CPU as booted */
192 	atomic_set(&mips_cps_core_bootcfg[current_cpu_data.core].vpe_mask,
193 		   1 << cpu_vpe_id(&current_cpu_data));
194 
195 	return;
196 err_out:
197 	/* Clean up allocations */
198 	if (mips_cps_core_bootcfg) {
199 		for (c = 0; c < ncores; c++)
200 			kfree(mips_cps_core_bootcfg[c].vpe_config);
201 		kfree(mips_cps_core_bootcfg);
202 		mips_cps_core_bootcfg = NULL;
203 	}
204 
205 	/* Effectively disable SMP by declaring CPUs not present */
206 	for_each_possible_cpu(c) {
207 		if (c == 0)
208 			continue;
209 		set_cpu_present(c, false);
210 	}
211 }
212 
213 static void boot_core(unsigned int core, unsigned int vpe_id)
214 {
215 	u32 access, stat, seq_state;
216 	unsigned timeout;
217 
218 	/* Select the appropriate core */
219 	mips_cm_lock_other(core, 0);
220 
221 	/* Set its reset vector */
222 	write_gcr_co_reset_base(CKSEG1ADDR((unsigned long)mips_cps_core_entry));
223 
224 	/* Ensure its coherency is disabled */
225 	write_gcr_co_coherence(0);
226 
227 	/* Start it with the legacy memory map and exception base */
228 	write_gcr_co_reset_ext_base(CM_GCR_RESET_EXT_BASE_UEB);
229 
230 	/* Ensure the core can access the GCRs */
231 	access = read_gcr_access();
232 	access |= 1 << (CM_GCR_ACCESS_ACCESSEN_SHF + core);
233 	write_gcr_access(access);
234 
235 	if (mips_cpc_present()) {
236 		/* Reset the core */
237 		mips_cpc_lock_other(core);
238 
239 		if (mips_cm_revision() >= CM_REV_CM3) {
240 			/* Run only the requested VP following the reset */
241 			write_cpc_co_vp_stop(0xf);
242 			write_cpc_co_vp_run(1 << vpe_id);
243 
244 			/*
245 			 * Ensure that the VP_RUN register is written before the
246 			 * core leaves reset.
247 			 */
248 			wmb();
249 		}
250 
251 		write_cpc_co_cmd(CPC_Cx_CMD_RESET);
252 
253 		timeout = 100;
254 		while (true) {
255 			stat = read_cpc_co_stat_conf();
256 			seq_state = stat & CPC_Cx_STAT_CONF_SEQSTATE_MSK;
257 
258 			/* U6 == coherent execution, ie. the core is up */
259 			if (seq_state == CPC_Cx_STAT_CONF_SEQSTATE_U6)
260 				break;
261 
262 			/* Delay a little while before we start warning */
263 			if (timeout) {
264 				timeout--;
265 				mdelay(10);
266 				continue;
267 			}
268 
269 			pr_warn("Waiting for core %u to start... STAT_CONF=0x%x\n",
270 				core, stat);
271 			mdelay(1000);
272 		}
273 
274 		mips_cpc_unlock_other();
275 	} else {
276 		/* Take the core out of reset */
277 		write_gcr_co_reset_release(0);
278 	}
279 
280 	mips_cm_unlock_other();
281 
282 	/* The core is now powered up */
283 	bitmap_set(core_power, core, 1);
284 }
285 
286 static void remote_vpe_boot(void *dummy)
287 {
288 	unsigned core = current_cpu_data.core;
289 	struct core_boot_config *core_cfg = &mips_cps_core_bootcfg[core];
290 
291 	mips_cps_boot_vpes(core_cfg, cpu_vpe_id(&current_cpu_data));
292 }
293 
294 static void cps_boot_secondary(int cpu, struct task_struct *idle)
295 {
296 	unsigned core = cpu_data[cpu].core;
297 	unsigned vpe_id = cpu_vpe_id(&cpu_data[cpu]);
298 	struct core_boot_config *core_cfg = &mips_cps_core_bootcfg[core];
299 	struct vpe_boot_config *vpe_cfg = &core_cfg->vpe_config[vpe_id];
300 	unsigned long core_entry;
301 	unsigned int remote;
302 	int err;
303 
304 	vpe_cfg->pc = (unsigned long)&smp_bootstrap;
305 	vpe_cfg->sp = __KSTK_TOS(idle);
306 	vpe_cfg->gp = (unsigned long)task_thread_info(idle);
307 
308 	atomic_or(1 << cpu_vpe_id(&cpu_data[cpu]), &core_cfg->vpe_mask);
309 
310 	preempt_disable();
311 
312 	if (!test_bit(core, core_power)) {
313 		/* Boot a VPE on a powered down core */
314 		boot_core(core, vpe_id);
315 		goto out;
316 	}
317 
318 	if (cpu_has_vp) {
319 		mips_cm_lock_other(core, vpe_id);
320 		core_entry = CKSEG1ADDR((unsigned long)mips_cps_core_entry);
321 		write_gcr_co_reset_base(core_entry);
322 		mips_cm_unlock_other();
323 	}
324 
325 	if (core != current_cpu_data.core) {
326 		/* Boot a VPE on another powered up core */
327 		for (remote = 0; remote < NR_CPUS; remote++) {
328 			if (cpu_data[remote].core != core)
329 				continue;
330 			if (cpu_online(remote))
331 				break;
332 		}
333 		if (remote >= NR_CPUS) {
334 			pr_crit("No online CPU in core %u to start CPU%d\n",
335 				core, cpu);
336 			goto out;
337 		}
338 
339 		err = smp_call_function_single(remote, remote_vpe_boot,
340 					       NULL, 1);
341 		if (err)
342 			panic("Failed to call remote CPU\n");
343 		goto out;
344 	}
345 
346 	BUG_ON(!cpu_has_mipsmt && !cpu_has_vp);
347 
348 	/* Boot a VPE on this core */
349 	mips_cps_boot_vpes(core_cfg, vpe_id);
350 out:
351 	preempt_enable();
352 }
353 
354 static void cps_init_secondary(void)
355 {
356 	/* Disable MT - we only want to run 1 TC per VPE */
357 	if (cpu_has_mipsmt)
358 		dmt();
359 
360 	if (mips_cm_revision() >= CM_REV_CM3) {
361 		unsigned ident = gic_read_local_vp_id();
362 
363 		/*
364 		 * Ensure that our calculation of the VP ID matches up with
365 		 * what the GIC reports, otherwise we'll have configured
366 		 * interrupts incorrectly.
367 		 */
368 		BUG_ON(ident != mips_cm_vp_id(smp_processor_id()));
369 	}
370 
371 	if (cpu_has_veic)
372 		clear_c0_status(ST0_IM);
373 	else
374 		change_c0_status(ST0_IM, STATUSF_IP2 | STATUSF_IP3 |
375 					 STATUSF_IP4 | STATUSF_IP5 |
376 					 STATUSF_IP6 | STATUSF_IP7);
377 }
378 
379 static void cps_smp_finish(void)
380 {
381 	write_c0_compare(read_c0_count() + (8 * mips_hpt_frequency / HZ));
382 
383 #ifdef CONFIG_MIPS_MT_FPAFF
384 	/* If we have an FPU, enroll ourselves in the FPU-full mask */
385 	if (cpu_has_fpu)
386 		cpumask_set_cpu(smp_processor_id(), &mt_fpu_cpumask);
387 #endif /* CONFIG_MIPS_MT_FPAFF */
388 
389 	local_irq_enable();
390 }
391 
392 #ifdef CONFIG_HOTPLUG_CPU
393 
394 static int cps_cpu_disable(void)
395 {
396 	unsigned cpu = smp_processor_id();
397 	struct core_boot_config *core_cfg;
398 
399 	if (!cpu)
400 		return -EBUSY;
401 
402 	if (!cps_pm_support_state(CPS_PM_POWER_GATED))
403 		return -EINVAL;
404 
405 	core_cfg = &mips_cps_core_bootcfg[current_cpu_data.core];
406 	atomic_sub(1 << cpu_vpe_id(&current_cpu_data), &core_cfg->vpe_mask);
407 	smp_mb__after_atomic();
408 	set_cpu_online(cpu, false);
409 	calculate_cpu_foreign_map();
410 
411 	return 0;
412 }
413 
414 static unsigned cpu_death_sibling;
415 static enum {
416 	CPU_DEATH_HALT,
417 	CPU_DEATH_POWER,
418 } cpu_death;
419 
420 void play_dead(void)
421 {
422 	unsigned int cpu, core, vpe_id;
423 
424 	local_irq_disable();
425 	idle_task_exit();
426 	cpu = smp_processor_id();
427 	core = cpu_data[cpu].core;
428 	cpu_death = CPU_DEATH_POWER;
429 
430 	pr_debug("CPU%d going offline\n", cpu);
431 
432 	if (cpu_has_mipsmt || cpu_has_vp) {
433 		/* Look for another online VPE within the core */
434 		for_each_online_cpu(cpu_death_sibling) {
435 			if (cpu_data[cpu_death_sibling].core != core)
436 				continue;
437 
438 			/*
439 			 * There is an online VPE within the core. Just halt
440 			 * this TC and leave the core alone.
441 			 */
442 			cpu_death = CPU_DEATH_HALT;
443 			break;
444 		}
445 	}
446 
447 	/* This CPU has chosen its way out */
448 	(void)cpu_report_death();
449 
450 	if (cpu_death == CPU_DEATH_HALT) {
451 		vpe_id = cpu_vpe_id(&cpu_data[cpu]);
452 
453 		pr_debug("Halting core %d VP%d\n", core, vpe_id);
454 		if (cpu_has_mipsmt) {
455 			/* Halt this TC */
456 			write_c0_tchalt(TCHALT_H);
457 			instruction_hazard();
458 		} else if (cpu_has_vp) {
459 			write_cpc_cl_vp_stop(1 << vpe_id);
460 
461 			/* Ensure that the VP_STOP register is written */
462 			wmb();
463 		}
464 	} else {
465 		pr_debug("Gating power to core %d\n", core);
466 		/* Power down the core */
467 		cps_pm_enter_state(CPS_PM_POWER_GATED);
468 	}
469 
470 	/* This should never be reached */
471 	panic("Failed to offline CPU %u", cpu);
472 }
473 
474 static void wait_for_sibling_halt(void *ptr_cpu)
475 {
476 	unsigned cpu = (unsigned long)ptr_cpu;
477 	unsigned vpe_id = cpu_vpe_id(&cpu_data[cpu]);
478 	unsigned halted;
479 	unsigned long flags;
480 
481 	do {
482 		local_irq_save(flags);
483 		settc(vpe_id);
484 		halted = read_tc_c0_tchalt();
485 		local_irq_restore(flags);
486 	} while (!(halted & TCHALT_H));
487 }
488 
489 static void cps_cpu_die(unsigned int cpu)
490 {
491 	unsigned core = cpu_data[cpu].core;
492 	unsigned int vpe_id = cpu_vpe_id(&cpu_data[cpu]);
493 	ktime_t fail_time;
494 	unsigned stat;
495 	int err;
496 
497 	/* Wait for the cpu to choose its way out */
498 	if (!cpu_wait_death(cpu, 5)) {
499 		pr_err("CPU%u: didn't offline\n", cpu);
500 		return;
501 	}
502 
503 	/*
504 	 * Now wait for the CPU to actually offline. Without doing this that
505 	 * offlining may race with one or more of:
506 	 *
507 	 *   - Onlining the CPU again.
508 	 *   - Powering down the core if another VPE within it is offlined.
509 	 *   - A sibling VPE entering a non-coherent state.
510 	 *
511 	 * In the non-MT halt case (ie. infinite loop) the CPU is doing nothing
512 	 * with which we could race, so do nothing.
513 	 */
514 	if (cpu_death == CPU_DEATH_POWER) {
515 		/*
516 		 * Wait for the core to enter a powered down or clock gated
517 		 * state, the latter happening when a JTAG probe is connected
518 		 * in which case the CPC will refuse to power down the core.
519 		 */
520 		fail_time = ktime_add_ms(ktime_get(), 2000);
521 		do {
522 			mips_cm_lock_other(core, 0);
523 			mips_cpc_lock_other(core);
524 			stat = read_cpc_co_stat_conf();
525 			stat &= CPC_Cx_STAT_CONF_SEQSTATE_MSK;
526 			mips_cpc_unlock_other();
527 			mips_cm_unlock_other();
528 
529 			if (stat == CPC_Cx_STAT_CONF_SEQSTATE_D0 ||
530 			    stat == CPC_Cx_STAT_CONF_SEQSTATE_D2 ||
531 			    stat == CPC_Cx_STAT_CONF_SEQSTATE_U2)
532 				break;
533 
534 			/*
535 			 * The core ought to have powered down, but didn't &
536 			 * now we don't really know what state it's in. It's
537 			 * likely that its _pwr_up pin has been wired to logic
538 			 * 1 & it powered back up as soon as we powered it
539 			 * down...
540 			 *
541 			 * The best we can do is warn the user & continue in
542 			 * the hope that the core is doing nothing harmful &
543 			 * might behave properly if we online it later.
544 			 */
545 			if (WARN(ktime_after(ktime_get(), fail_time),
546 				 "CPU%u hasn't powered down, seq. state %u\n",
547 				 cpu, stat >> CPC_Cx_STAT_CONF_SEQSTATE_SHF))
548 				break;
549 		} while (1);
550 
551 		/* Indicate the core is powered off */
552 		bitmap_clear(core_power, core, 1);
553 	} else if (cpu_has_mipsmt) {
554 		/*
555 		 * Have a CPU with access to the offlined CPUs registers wait
556 		 * for its TC to halt.
557 		 */
558 		err = smp_call_function_single(cpu_death_sibling,
559 					       wait_for_sibling_halt,
560 					       (void *)(unsigned long)cpu, 1);
561 		if (err)
562 			panic("Failed to call remote sibling CPU\n");
563 	} else if (cpu_has_vp) {
564 		do {
565 			mips_cm_lock_other(core, vpe_id);
566 			stat = read_cpc_co_vp_running();
567 			mips_cm_unlock_other();
568 		} while (stat & (1 << vpe_id));
569 	}
570 }
571 
572 #endif /* CONFIG_HOTPLUG_CPU */
573 
574 static struct plat_smp_ops cps_smp_ops = {
575 	.smp_setup		= cps_smp_setup,
576 	.prepare_cpus		= cps_prepare_cpus,
577 	.boot_secondary		= cps_boot_secondary,
578 	.init_secondary		= cps_init_secondary,
579 	.smp_finish		= cps_smp_finish,
580 	.send_ipi_single	= mips_smp_send_ipi_single,
581 	.send_ipi_mask		= mips_smp_send_ipi_mask,
582 #ifdef CONFIG_HOTPLUG_CPU
583 	.cpu_disable		= cps_cpu_disable,
584 	.cpu_die		= cps_cpu_die,
585 #endif
586 };
587 
588 bool mips_cps_smp_in_use(void)
589 {
590 	extern struct plat_smp_ops *mp_ops;
591 	return mp_ops == &cps_smp_ops;
592 }
593 
594 int register_cps_smp_ops(void)
595 {
596 	if (!mips_cm_present()) {
597 		pr_warn("MIPS CPS SMP unable to proceed without a CM\n");
598 		return -ENODEV;
599 	}
600 
601 	/* check we have a GIC - we need one for IPIs */
602 	if (!(read_gcr_gic_status() & CM_GCR_GIC_STATUS_EX_MSK)) {
603 		pr_warn("MIPS CPS SMP unable to proceed without a GIC\n");
604 		return -ENODEV;
605 	}
606 
607 	register_smp_ops(&cps_smp_ops);
608 	return 0;
609 }
610