1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * SMP support for SoCs with APMU
4  *
5  * Copyright (C) 2014  Renesas Electronics Corporation
6  * Copyright (C) 2013  Magnus Damm
7  */
8 #include <linux/cpu_pm.h>
9 #include <linux/delay.h>
10 #include <linux/init.h>
11 #include <linux/io.h>
12 #include <linux/ioport.h>
13 #include <linux/of_address.h>
14 #include <linux/smp.h>
15 #include <linux/suspend.h>
16 #include <linux/threads.h>
17 #include <asm/cacheflush.h>
18 #include <asm/cp15.h>
19 #include <asm/proc-fns.h>
20 #include <asm/smp_plat.h>
21 #include <asm/suspend.h>
22 #include "common.h"
23 #include "rcar-gen2.h"
24 
25 static struct {
26 	void __iomem *iomem;
27 	int bit;
28 } apmu_cpus[NR_CPUS];
29 
30 #define WUPCR_OFFS	 0x10		/* Wake Up Control Register */
31 #define PSTR_OFFS	 0x40		/* Power Status Register */
32 #define CPUNCR_OFFS(n)	(0x100 + (0x10 * (n)))
33 					/* CPUn Power Status Control Register */
34 #define DBGRCR_OFFS	0x180		/* Debug Resource Reset Control Reg. */
35 
36 /* Power Status Register */
37 #define CPUNST(r, n)	(((r) >> (n * 4)) & 3)	/* CPUn Status Bit */
38 #define CPUST_RUN	0		/* Run Mode */
39 #define CPUST_STANDBY	3		/* CoreStandby Mode */
40 
41 /* Debug Resource Reset Control Register */
42 #define DBGCPUREN	BIT(24)		/* CPU Other Reset Request Enable */
43 #define DBGCPUNREN(n)	BIT((n) + 20)	/* CPUn Reset Request Enable */
44 #define DBGCPUPREN	BIT(19)		/* CPU Peripheral Reset Req. Enable */
45 
46 static int __maybe_unused apmu_power_on(void __iomem *p, int bit)
47 {
48 	/* request power on */
49 	writel_relaxed(BIT(bit), p + WUPCR_OFFS);
50 
51 	/* wait for APMU to finish */
52 	while (readl_relaxed(p + WUPCR_OFFS) != 0)
53 		;
54 
55 	return 0;
56 }
57 
58 static int __maybe_unused apmu_power_off(void __iomem *p, int bit)
59 {
60 	/* request Core Standby for next WFI */
61 	writel_relaxed(3, p + CPUNCR_OFFS(bit));
62 	return 0;
63 }
64 
65 static int __maybe_unused apmu_power_off_poll(void __iomem *p, int bit)
66 {
67 	int k;
68 
69 	for (k = 0; k < 1000; k++) {
70 		if (CPUNST(readl_relaxed(p + PSTR_OFFS), bit) == CPUST_STANDBY)
71 			return 1;
72 
73 		mdelay(1);
74 	}
75 
76 	return 0;
77 }
78 
79 static int __maybe_unused apmu_wrap(int cpu, int (*fn)(void __iomem *p, int cpu))
80 {
81 	void __iomem *p = apmu_cpus[cpu].iomem;
82 
83 	return p ? fn(p, apmu_cpus[cpu].bit) : -EINVAL;
84 }
85 
86 #if defined(CONFIG_HOTPLUG_CPU) || defined(CONFIG_SUSPEND)
87 /* nicked from arch/arm/mach-exynos/hotplug.c */
88 static inline void cpu_enter_lowpower_a15(void)
89 {
90 	unsigned int v;
91 
92 	asm volatile(
93 	"       mrc     p15, 0, %0, c1, c0, 0\n"
94 	"       bic     %0, %0, %1\n"
95 	"       mcr     p15, 0, %0, c1, c0, 0\n"
96 		: "=&r" (v)
97 		: "Ir" (CR_C)
98 		: "cc");
99 
100 	flush_cache_louis();
101 
102 	asm volatile(
103 	/*
104 	 * Turn off coherency
105 	 */
106 	"       mrc     p15, 0, %0, c1, c0, 1\n"
107 	"       bic     %0, %0, %1\n"
108 	"       mcr     p15, 0, %0, c1, c0, 1\n"
109 		: "=&r" (v)
110 		: "Ir" (0x40)
111 		: "cc");
112 
113 	isb();
114 	dsb();
115 }
116 
117 static void shmobile_smp_apmu_cpu_shutdown(unsigned int cpu)
118 {
119 
120 	/* Select next sleep mode using the APMU */
121 	apmu_wrap(cpu, apmu_power_off);
122 
123 	/* Do ARM specific CPU shutdown */
124 	cpu_enter_lowpower_a15();
125 }
126 #endif
127 
128 #if defined(CONFIG_HOTPLUG_CPU)
129 static void shmobile_smp_apmu_cpu_die(unsigned int cpu)
130 {
131 	/* For this particular CPU deregister boot vector */
132 	shmobile_smp_hook(cpu, 0, 0);
133 
134 	/* Shutdown CPU core */
135 	shmobile_smp_apmu_cpu_shutdown(cpu);
136 
137 	/* jump to shared mach-shmobile sleep / reset code */
138 	shmobile_smp_sleep();
139 }
140 
141 static int shmobile_smp_apmu_cpu_kill(unsigned int cpu)
142 {
143 	return apmu_wrap(cpu, apmu_power_off_poll);
144 }
145 #endif
146 
147 #if defined(CONFIG_SUSPEND)
148 static int shmobile_smp_apmu_do_suspend(unsigned long cpu)
149 {
150 	shmobile_smp_hook(cpu, __pa_symbol(cpu_resume), 0);
151 	shmobile_smp_apmu_cpu_shutdown(cpu);
152 	cpu_do_idle(); /* WFI selects Core Standby */
153 	return 1;
154 }
155 
156 static inline void cpu_leave_lowpower(void)
157 {
158 	unsigned int v;
159 
160 	asm volatile("mrc    p15, 0, %0, c1, c0, 0\n"
161 		     "       orr     %0, %0, %1\n"
162 		     "       mcr     p15, 0, %0, c1, c0, 0\n"
163 		     "       mrc     p15, 0, %0, c1, c0, 1\n"
164 		     "       orr     %0, %0, %2\n"
165 		     "       mcr     p15, 0, %0, c1, c0, 1\n"
166 		     : "=&r" (v)
167 		     : "Ir" (CR_C), "Ir" (0x40)
168 		     : "cc");
169 }
170 
171 static int shmobile_smp_apmu_enter_suspend(suspend_state_t state)
172 {
173 	cpu_suspend(smp_processor_id(), shmobile_smp_apmu_do_suspend);
174 	cpu_leave_lowpower();
175 	return 0;
176 }
177 
178 void __init shmobile_smp_apmu_suspend_init(void)
179 {
180 	shmobile_suspend_ops.enter = shmobile_smp_apmu_enter_suspend;
181 }
182 #endif
183 
184 #ifdef CONFIG_SMP
185 static void apmu_init_cpu(struct resource *res, int cpu, int bit)
186 {
187 	u32 x;
188 
189 	if ((cpu >= ARRAY_SIZE(apmu_cpus)) || apmu_cpus[cpu].iomem)
190 		return;
191 
192 	apmu_cpus[cpu].iomem = ioremap(res->start, resource_size(res));
193 	apmu_cpus[cpu].bit = bit;
194 
195 	pr_debug("apmu ioremap %d %d %pr\n", cpu, bit, res);
196 
197 	/* Setup for debug mode */
198 	x = readl(apmu_cpus[cpu].iomem + DBGRCR_OFFS);
199 	x |= DBGCPUREN | DBGCPUNREN(bit) | DBGCPUPREN;
200 	writel(x, apmu_cpus[cpu].iomem + DBGRCR_OFFS);
201 }
202 
203 static const struct of_device_id apmu_ids[] = {
204 	{ .compatible = "renesas,apmu" },
205 	{ /*sentinel*/ }
206 };
207 
208 static void apmu_parse_dt(void (*fn)(struct resource *res, int cpu, int bit))
209 {
210 	struct device_node *np_apmu, *np_cpu;
211 	struct resource res;
212 	int bit, index;
213 	u32 id;
214 
215 	for_each_matching_node(np_apmu, apmu_ids) {
216 		/* only enable the cluster that includes the boot CPU */
217 		bool is_allowed = false;
218 
219 		for (bit = 0; bit < CONFIG_NR_CPUS; bit++) {
220 			np_cpu = of_parse_phandle(np_apmu, "cpus", bit);
221 			if (np_cpu) {
222 				if (!of_property_read_u32(np_cpu, "reg", &id)) {
223 					if (id == cpu_logical_map(0)) {
224 						is_allowed = true;
225 						of_node_put(np_cpu);
226 						break;
227 					}
228 
229 				}
230 				of_node_put(np_cpu);
231 			}
232 		}
233 		if (!is_allowed)
234 			continue;
235 
236 		for (bit = 0; bit < CONFIG_NR_CPUS; bit++) {
237 			np_cpu = of_parse_phandle(np_apmu, "cpus", bit);
238 			if (np_cpu) {
239 				if (!of_property_read_u32(np_cpu, "reg", &id)) {
240 					index = get_logical_index(id);
241 					if ((index >= 0) &&
242 					    !of_address_to_resource(np_apmu,
243 								    0, &res))
244 						fn(&res, index, bit);
245 				}
246 				of_node_put(np_cpu);
247 			}
248 		}
249 	}
250 }
251 
252 static void __init shmobile_smp_apmu_setup_boot(void)
253 {
254 	/* install boot code shared by all CPUs */
255 	shmobile_boot_fn = __pa_symbol(shmobile_smp_boot);
256 	shmobile_boot_fn_gen2 = shmobile_boot_fn;
257 }
258 
259 static int shmobile_smp_apmu_boot_secondary(unsigned int cpu,
260 					    struct task_struct *idle)
261 {
262 	/* For this particular CPU register boot vector */
263 	shmobile_smp_hook(cpu, __pa_symbol(shmobile_boot_apmu), 0);
264 
265 	return apmu_wrap(cpu, apmu_power_on);
266 }
267 
268 static void __init shmobile_smp_apmu_prepare_cpus_dt(unsigned int max_cpus)
269 {
270 	shmobile_smp_apmu_setup_boot();
271 	apmu_parse_dt(apmu_init_cpu);
272 	rcar_gen2_pm_init();
273 }
274 
275 static struct smp_operations apmu_smp_ops __initdata = {
276 	.smp_prepare_cpus	= shmobile_smp_apmu_prepare_cpus_dt,
277 	.smp_boot_secondary	= shmobile_smp_apmu_boot_secondary,
278 #ifdef CONFIG_HOTPLUG_CPU
279 	.cpu_can_disable	= shmobile_smp_cpu_can_disable,
280 	.cpu_die		= shmobile_smp_apmu_cpu_die,
281 	.cpu_kill		= shmobile_smp_apmu_cpu_kill,
282 #endif
283 };
284 
285 CPU_METHOD_OF_DECLARE(shmobile_smp_apmu, "renesas,apmu", &apmu_smp_ops);
286 #endif /* CONFIG_SMP */
287