1 /*
2  * Broadcom STB CPU SMP and hotplug support for ARM
3  *
4  * Copyright (C) 2013-2014 Broadcom Corporation
5  *
6  * This program is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU General Public License as
8  * published by the Free Software Foundation version 2.
9  *
10  * This program is distributed "as is" WITHOUT ANY WARRANTY of any
11  * kind, whether express or implied; without even the implied warranty
12  * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
13  * GNU General Public License for more details.
14  */
15 
16 #include <linux/delay.h>
17 #include <linux/errno.h>
18 #include <linux/init.h>
19 #include <linux/io.h>
20 #include <linux/jiffies.h>
21 #include <linux/of_address.h>
22 #include <linux/of_platform.h>
23 #include <linux/printk.h>
24 #include <linux/regmap.h>
25 #include <linux/smp.h>
26 #include <linux/mfd/syscon.h>
27 
28 #include <asm/cacheflush.h>
29 #include <asm/cp15.h>
30 #include <asm/mach-types.h>
31 #include <asm/smp_plat.h>
32 
33 enum {
34 	ZONE_MAN_CLKEN_MASK		= BIT(0),
35 	ZONE_MAN_RESET_CNTL_MASK	= BIT(1),
36 	ZONE_MAN_MEM_PWR_MASK		= BIT(4),
37 	ZONE_RESERVED_1_MASK		= BIT(5),
38 	ZONE_MAN_ISO_CNTL_MASK		= BIT(6),
39 	ZONE_MANUAL_CONTROL_MASK	= BIT(7),
40 	ZONE_PWR_DN_REQ_MASK		= BIT(9),
41 	ZONE_PWR_UP_REQ_MASK		= BIT(10),
42 	ZONE_BLK_RST_ASSERT_MASK	= BIT(12),
43 	ZONE_PWR_OFF_STATE_MASK		= BIT(25),
44 	ZONE_PWR_ON_STATE_MASK		= BIT(26),
45 	ZONE_DPG_PWR_STATE_MASK		= BIT(28),
46 	ZONE_MEM_PWR_STATE_MASK		= BIT(29),
47 	ZONE_RESET_STATE_MASK		= BIT(31),
48 	CPU0_PWR_ZONE_CTRL_REG		= 1,
49 	CPU_RESET_CONFIG_REG		= 2,
50 };
51 
52 static void __iomem *cpubiuctrl_block;
53 static void __iomem *hif_cont_block;
54 static u32 cpu0_pwr_zone_ctrl_reg;
55 static u32 cpu_rst_cfg_reg;
56 static u32 hif_cont_reg;
57 
58 #ifdef CONFIG_HOTPLUG_CPU
59 /*
60  * We must quiesce a dying CPU before it can be killed by the boot CPU. Because
61  * one or more cache may be disabled, we must flush to ensure coherency. We
62  * cannot use traditionl completion structures or spinlocks as they rely on
63  * coherency.
64  */
65 static DEFINE_PER_CPU_ALIGNED(int, per_cpu_sw_state);
66 
67 static int per_cpu_sw_state_rd(u32 cpu)
68 {
69 	sync_cache_r(SHIFT_PERCPU_PTR(&per_cpu_sw_state, per_cpu_offset(cpu)));
70 	return per_cpu(per_cpu_sw_state, cpu);
71 }
72 
73 static void per_cpu_sw_state_wr(u32 cpu, int val)
74 {
75 	dmb();
76 	per_cpu(per_cpu_sw_state, cpu) = val;
77 	sync_cache_w(SHIFT_PERCPU_PTR(&per_cpu_sw_state, per_cpu_offset(cpu)));
78 }
79 #else
80 static inline void per_cpu_sw_state_wr(u32 cpu, int val) { }
81 #endif
82 
83 static void __iomem *pwr_ctrl_get_base(u32 cpu)
84 {
85 	void __iomem *base = cpubiuctrl_block + cpu0_pwr_zone_ctrl_reg;
86 	base += (cpu_logical_map(cpu) * 4);
87 	return base;
88 }
89 
90 static u32 pwr_ctrl_rd(u32 cpu)
91 {
92 	void __iomem *base = pwr_ctrl_get_base(cpu);
93 	return readl_relaxed(base);
94 }
95 
96 static void pwr_ctrl_set(unsigned int cpu, u32 val, u32 mask)
97 {
98 	void __iomem *base = pwr_ctrl_get_base(cpu);
99 	writel((readl(base) & mask) | val, base);
100 }
101 
102 static void pwr_ctrl_clr(unsigned int cpu, u32 val, u32 mask)
103 {
104 	void __iomem *base = pwr_ctrl_get_base(cpu);
105 	writel((readl(base) & mask) & ~val, base);
106 }
107 
108 #define POLL_TMOUT_MS 500
109 static int pwr_ctrl_wait_tmout(unsigned int cpu, u32 set, u32 mask)
110 {
111 	const unsigned long timeo = jiffies + msecs_to_jiffies(POLL_TMOUT_MS);
112 	u32 tmp;
113 
114 	do {
115 		tmp = pwr_ctrl_rd(cpu) & mask;
116 		if (!set == !tmp)
117 			return 0;
118 	} while (time_before(jiffies, timeo));
119 
120 	tmp = pwr_ctrl_rd(cpu) & mask;
121 	if (!set == !tmp)
122 		return 0;
123 
124 	return -ETIMEDOUT;
125 }
126 
127 static void cpu_rst_cfg_set(u32 cpu, int set)
128 {
129 	u32 val;
130 	val = readl_relaxed(cpubiuctrl_block + cpu_rst_cfg_reg);
131 	if (set)
132 		val |= BIT(cpu_logical_map(cpu));
133 	else
134 		val &= ~BIT(cpu_logical_map(cpu));
135 	writel_relaxed(val, cpubiuctrl_block + cpu_rst_cfg_reg);
136 }
137 
138 static void cpu_set_boot_addr(u32 cpu, unsigned long boot_addr)
139 {
140 	const int reg_ofs = cpu_logical_map(cpu) * 8;
141 	writel_relaxed(0, hif_cont_block + hif_cont_reg + reg_ofs);
142 	writel_relaxed(boot_addr, hif_cont_block + hif_cont_reg + 4 + reg_ofs);
143 }
144 
145 static void brcmstb_cpu_boot(u32 cpu)
146 {
147 	/* Mark this CPU as "up" */
148 	per_cpu_sw_state_wr(cpu, 1);
149 
150 	/*
151 	 * Set the reset vector to point to the secondary_startup
152 	 * routine
153 	 */
154 	cpu_set_boot_addr(cpu, virt_to_phys(secondary_startup));
155 
156 	/* Unhalt the cpu */
157 	cpu_rst_cfg_set(cpu, 0);
158 }
159 
160 static void brcmstb_cpu_power_on(u32 cpu)
161 {
162 	/*
163 	 * The secondary cores power was cut, so we must go through
164 	 * power-on initialization.
165 	 */
166 	pwr_ctrl_set(cpu, ZONE_MAN_ISO_CNTL_MASK, 0xffffff00);
167 	pwr_ctrl_set(cpu, ZONE_MANUAL_CONTROL_MASK, -1);
168 	pwr_ctrl_set(cpu, ZONE_RESERVED_1_MASK, -1);
169 
170 	pwr_ctrl_set(cpu, ZONE_MAN_MEM_PWR_MASK, -1);
171 
172 	if (pwr_ctrl_wait_tmout(cpu, 1, ZONE_MEM_PWR_STATE_MASK))
173 		panic("ZONE_MEM_PWR_STATE_MASK set timeout");
174 
175 	pwr_ctrl_set(cpu, ZONE_MAN_CLKEN_MASK, -1);
176 
177 	if (pwr_ctrl_wait_tmout(cpu, 1, ZONE_DPG_PWR_STATE_MASK))
178 		panic("ZONE_DPG_PWR_STATE_MASK set timeout");
179 
180 	pwr_ctrl_clr(cpu, ZONE_MAN_ISO_CNTL_MASK, -1);
181 	pwr_ctrl_set(cpu, ZONE_MAN_RESET_CNTL_MASK, -1);
182 }
183 
184 static int brcmstb_cpu_get_power_state(u32 cpu)
185 {
186 	int tmp = pwr_ctrl_rd(cpu);
187 	return (tmp & ZONE_RESET_STATE_MASK) ? 0 : 1;
188 }
189 
190 #ifdef CONFIG_HOTPLUG_CPU
191 
192 static void brcmstb_cpu_die(u32 cpu)
193 {
194 	v7_exit_coherency_flush(all);
195 
196 	per_cpu_sw_state_wr(cpu, 0);
197 
198 	/* Sit and wait to die */
199 	wfi();
200 
201 	/* We should never get here... */
202 	while (1)
203 		;
204 }
205 
206 static int brcmstb_cpu_kill(u32 cpu)
207 {
208 	/*
209 	 * Ordinarily, the hardware forbids power-down of CPU0 (which is good
210 	 * because it is the boot CPU), but this is not true when using BPCM
211 	 * manual mode.  Consequently, we must avoid turning off CPU0 here to
212 	 * ensure that TI2C master reset will work.
213 	 */
214 	if (cpu == 0) {
215 		pr_warn("SMP: refusing to power off CPU0\n");
216 		return 1;
217 	}
218 
219 	while (per_cpu_sw_state_rd(cpu))
220 		;
221 
222 	pwr_ctrl_set(cpu, ZONE_MANUAL_CONTROL_MASK, -1);
223 	pwr_ctrl_clr(cpu, ZONE_MAN_RESET_CNTL_MASK, -1);
224 	pwr_ctrl_clr(cpu, ZONE_MAN_CLKEN_MASK, -1);
225 	pwr_ctrl_set(cpu, ZONE_MAN_ISO_CNTL_MASK, -1);
226 	pwr_ctrl_clr(cpu, ZONE_MAN_MEM_PWR_MASK, -1);
227 
228 	if (pwr_ctrl_wait_tmout(cpu, 0, ZONE_MEM_PWR_STATE_MASK))
229 		panic("ZONE_MEM_PWR_STATE_MASK clear timeout");
230 
231 	pwr_ctrl_clr(cpu, ZONE_RESERVED_1_MASK, -1);
232 
233 	if (pwr_ctrl_wait_tmout(cpu, 0, ZONE_DPG_PWR_STATE_MASK))
234 		panic("ZONE_DPG_PWR_STATE_MASK clear timeout");
235 
236 	/* Flush pipeline before resetting CPU */
237 	mb();
238 
239 	/* Assert reset on the CPU */
240 	cpu_rst_cfg_set(cpu, 1);
241 
242 	return 1;
243 }
244 
245 #endif /* CONFIG_HOTPLUG_CPU */
246 
247 static int __init setup_hifcpubiuctrl_regs(struct device_node *np)
248 {
249 	int rc = 0;
250 	char *name;
251 	struct device_node *syscon_np = NULL;
252 
253 	name = "syscon-cpu";
254 
255 	syscon_np = of_parse_phandle(np, name, 0);
256 	if (!syscon_np) {
257 		pr_err("can't find phandle %s\n", name);
258 		rc = -EINVAL;
259 		goto cleanup;
260 	}
261 
262 	cpubiuctrl_block = of_iomap(syscon_np, 0);
263 	if (!cpubiuctrl_block) {
264 		pr_err("iomap failed for cpubiuctrl_block\n");
265 		rc = -EINVAL;
266 		goto cleanup;
267 	}
268 
269 	rc = of_property_read_u32_index(np, name, CPU0_PWR_ZONE_CTRL_REG,
270 					&cpu0_pwr_zone_ctrl_reg);
271 	if (rc) {
272 		pr_err("failed to read 1st entry from %s property (%d)\n", name,
273 			rc);
274 		rc = -EINVAL;
275 		goto cleanup;
276 	}
277 
278 	rc = of_property_read_u32_index(np, name, CPU_RESET_CONFIG_REG,
279 					&cpu_rst_cfg_reg);
280 	if (rc) {
281 		pr_err("failed to read 2nd entry from %s property (%d)\n", name,
282 			rc);
283 		rc = -EINVAL;
284 		goto cleanup;
285 	}
286 
287 cleanup:
288 	of_node_put(syscon_np);
289 	return rc;
290 }
291 
292 static int __init setup_hifcont_regs(struct device_node *np)
293 {
294 	int rc = 0;
295 	char *name;
296 	struct device_node *syscon_np = NULL;
297 
298 	name = "syscon-cont";
299 
300 	syscon_np = of_parse_phandle(np, name, 0);
301 	if (!syscon_np) {
302 		pr_err("can't find phandle %s\n", name);
303 		rc = -EINVAL;
304 		goto cleanup;
305 	}
306 
307 	hif_cont_block = of_iomap(syscon_np, 0);
308 	if (!hif_cont_block) {
309 		pr_err("iomap failed for hif_cont_block\n");
310 		rc = -EINVAL;
311 		goto cleanup;
312 	}
313 
314 	/* Offset is at top of hif_cont_block */
315 	hif_cont_reg = 0;
316 
317 cleanup:
318 	of_node_put(syscon_np);
319 	return rc;
320 }
321 
322 static void __init brcmstb_cpu_ctrl_setup(unsigned int max_cpus)
323 {
324 	int rc;
325 	struct device_node *np;
326 	char *name;
327 
328 	name = "brcm,brcmstb-smpboot";
329 	np = of_find_compatible_node(NULL, NULL, name);
330 	if (!np) {
331 		pr_err("can't find compatible node %s\n", name);
332 		return;
333 	}
334 
335 	rc = setup_hifcpubiuctrl_regs(np);
336 	if (rc)
337 		return;
338 
339 	rc = setup_hifcont_regs(np);
340 	if (rc)
341 		return;
342 }
343 
344 static int brcmstb_boot_secondary(unsigned int cpu, struct task_struct *idle)
345 {
346 	/* Missing the brcm,brcmstb-smpboot DT node? */
347 	if (!cpubiuctrl_block || !hif_cont_block)
348 		return -ENODEV;
349 
350 	/* Bring up power to the core if necessary */
351 	if (brcmstb_cpu_get_power_state(cpu) == 0)
352 		brcmstb_cpu_power_on(cpu);
353 
354 	brcmstb_cpu_boot(cpu);
355 
356 	return 0;
357 }
358 
359 static const struct smp_operations brcmstb_smp_ops __initconst = {
360 	.smp_prepare_cpus	= brcmstb_cpu_ctrl_setup,
361 	.smp_boot_secondary	= brcmstb_boot_secondary,
362 #ifdef CONFIG_HOTPLUG_CPU
363 	.cpu_kill		= brcmstb_cpu_kill,
364 	.cpu_die		= brcmstb_cpu_die,
365 #endif
366 };
367 
368 CPU_METHOD_OF_DECLARE(brcmstb_smp, "brcm,brahma-b15", &brcmstb_smp_ops);
369