xref: /openbmc/linux/arch/arm/mach-exynos/platsmp.c (revision 9cfc5c90)
1  /*
2  * Copyright (c) 2010-2011 Samsung Electronics Co., Ltd.
3  *		http://www.samsung.com
4  *
5  * Cloned from linux/arch/arm/mach-vexpress/platsmp.c
6  *
7  *  Copyright (C) 2002 ARM Ltd.
8  *  All Rights Reserved
9  *
10  * This program is free software; you can redistribute it and/or modify
11  * it under the terms of the GNU General Public License version 2 as
12  * published by the Free Software Foundation.
13 */
14 
15 #include <linux/init.h>
16 #include <linux/errno.h>
17 #include <linux/delay.h>
18 #include <linux/device.h>
19 #include <linux/jiffies.h>
20 #include <linux/smp.h>
21 #include <linux/io.h>
22 #include <linux/of_address.h>
23 
24 #include <asm/cacheflush.h>
25 #include <asm/cp15.h>
26 #include <asm/smp_plat.h>
27 #include <asm/smp_scu.h>
28 #include <asm/firmware.h>
29 
30 #include <mach/map.h>
31 
32 #include "common.h"
33 #include "regs-pmu.h"
34 
35 extern void exynos4_secondary_startup(void);
36 
37 #ifdef CONFIG_HOTPLUG_CPU
38 static inline void cpu_leave_lowpower(u32 core_id)
39 {
40 	unsigned int v;
41 
42 	asm volatile(
43 	"mrc	p15, 0, %0, c1, c0, 0\n"
44 	"	orr	%0, %0, %1\n"
45 	"	mcr	p15, 0, %0, c1, c0, 0\n"
46 	"	mrc	p15, 0, %0, c1, c0, 1\n"
47 	"	orr	%0, %0, %2\n"
48 	"	mcr	p15, 0, %0, c1, c0, 1\n"
49 	  : "=&r" (v)
50 	  : "Ir" (CR_C), "Ir" (0x40)
51 	  : "cc");
52 }
53 
54 static inline void platform_do_lowpower(unsigned int cpu, int *spurious)
55 {
56 	u32 mpidr = cpu_logical_map(cpu);
57 	u32 core_id = MPIDR_AFFINITY_LEVEL(mpidr, 0);
58 
59 	for (;;) {
60 
61 		/* Turn the CPU off on next WFI instruction. */
62 		exynos_cpu_power_down(core_id);
63 
64 		wfi();
65 
66 		if (pen_release == core_id) {
67 			/*
68 			 * OK, proper wakeup, we're done
69 			 */
70 			break;
71 		}
72 
73 		/*
74 		 * Getting here, means that we have come out of WFI without
75 		 * having been woken up - this shouldn't happen
76 		 *
77 		 * Just note it happening - when we're woken, we can report
78 		 * its occurrence.
79 		 */
80 		(*spurious)++;
81 	}
82 }
83 #endif /* CONFIG_HOTPLUG_CPU */
84 
85 /**
86  * exynos_core_power_down : power down the specified cpu
87  * @cpu : the cpu to power down
88  *
89  * Power down the specified cpu. The sequence must be finished by a
90  * call to cpu_do_idle()
91  *
92  */
93 void exynos_cpu_power_down(int cpu)
94 {
95 	u32 core_conf;
96 
97 	if (cpu == 0 && (soc_is_exynos5420() || soc_is_exynos5800())) {
98 		/*
99 		 * Bypass power down for CPU0 during suspend. Check for
100 		 * the SYS_PWR_REG value to decide if we are suspending
101 		 * the system.
102 		 */
103 		int val = pmu_raw_readl(EXYNOS5_ARM_CORE0_SYS_PWR_REG);
104 
105 		if (!(val & S5P_CORE_LOCAL_PWR_EN))
106 			return;
107 	}
108 
109 	core_conf = pmu_raw_readl(EXYNOS_ARM_CORE_CONFIGURATION(cpu));
110 	core_conf &= ~S5P_CORE_LOCAL_PWR_EN;
111 	pmu_raw_writel(core_conf, EXYNOS_ARM_CORE_CONFIGURATION(cpu));
112 }
113 
114 /**
115  * exynos_cpu_power_up : power up the specified cpu
116  * @cpu : the cpu to power up
117  *
118  * Power up the specified cpu
119  */
120 void exynos_cpu_power_up(int cpu)
121 {
122 	u32 core_conf = S5P_CORE_LOCAL_PWR_EN;
123 
124 	if (soc_is_exynos3250())
125 		core_conf |= S5P_CORE_AUTOWAKEUP_EN;
126 
127 	pmu_raw_writel(core_conf,
128 			EXYNOS_ARM_CORE_CONFIGURATION(cpu));
129 }
130 
131 /**
132  * exynos_cpu_power_state : returns the power state of the cpu
133  * @cpu : the cpu to retrieve the power state from
134  *
135  */
136 int exynos_cpu_power_state(int cpu)
137 {
138 	return (pmu_raw_readl(EXYNOS_ARM_CORE_STATUS(cpu)) &
139 			S5P_CORE_LOCAL_PWR_EN);
140 }
141 
142 /**
143  * exynos_cluster_power_down : power down the specified cluster
144  * @cluster : the cluster to power down
145  */
146 void exynos_cluster_power_down(int cluster)
147 {
148 	pmu_raw_writel(0, EXYNOS_COMMON_CONFIGURATION(cluster));
149 }
150 
151 /**
152  * exynos_cluster_power_up : power up the specified cluster
153  * @cluster : the cluster to power up
154  */
155 void exynos_cluster_power_up(int cluster)
156 {
157 	pmu_raw_writel(S5P_CORE_LOCAL_PWR_EN,
158 			EXYNOS_COMMON_CONFIGURATION(cluster));
159 }
160 
161 /**
162  * exynos_cluster_power_state : returns the power state of the cluster
163  * @cluster : the cluster to retrieve the power state from
164  *
165  */
166 int exynos_cluster_power_state(int cluster)
167 {
168 	return (pmu_raw_readl(EXYNOS_COMMON_STATUS(cluster)) &
169 		S5P_CORE_LOCAL_PWR_EN);
170 }
171 
172 static void __iomem *cpu_boot_reg_base(void)
173 {
174 	if (soc_is_exynos4210() && samsung_rev() == EXYNOS4210_REV_1_1)
175 		return pmu_base_addr + S5P_INFORM5;
176 	return sysram_base_addr;
177 }
178 
179 static inline void __iomem *cpu_boot_reg(int cpu)
180 {
181 	void __iomem *boot_reg;
182 
183 	boot_reg = cpu_boot_reg_base();
184 	if (!boot_reg)
185 		return IOMEM_ERR_PTR(-ENODEV);
186 	if (soc_is_exynos4412())
187 		boot_reg += 4*cpu;
188 	else if (soc_is_exynos5420() || soc_is_exynos5800())
189 		boot_reg += 4;
190 	return boot_reg;
191 }
192 
193 /*
194  * Set wake up by local power mode and execute software reset for given core.
195  *
196  * Currently this is needed only when booting secondary CPU on Exynos3250.
197  */
198 void exynos_core_restart(u32 core_id)
199 {
200 	u32 val;
201 
202 	if (!of_machine_is_compatible("samsung,exynos3250"))
203 		return;
204 
205 	while (!pmu_raw_readl(S5P_PMU_SPARE2))
206 		udelay(10);
207 	udelay(10);
208 
209 	val = pmu_raw_readl(EXYNOS_ARM_CORE_STATUS(core_id));
210 	val |= S5P_CORE_WAKEUP_FROM_LOCAL_CFG;
211 	pmu_raw_writel(val, EXYNOS_ARM_CORE_STATUS(core_id));
212 
213 	pmu_raw_writel(EXYNOS_CORE_PO_RESET(core_id), EXYNOS_SWRESET);
214 }
215 
216 /*
217  * Write pen_release in a way that is guaranteed to be visible to all
218  * observers, irrespective of whether they're taking part in coherency
219  * or not.  This is necessary for the hotplug code to work reliably.
220  */
221 static void write_pen_release(int val)
222 {
223 	pen_release = val;
224 	smp_wmb();
225 	sync_cache_w(&pen_release);
226 }
227 
228 static void __iomem *scu_base_addr(void)
229 {
230 	return (void __iomem *)(S5P_VA_SCU);
231 }
232 
233 static DEFINE_SPINLOCK(boot_lock);
234 
235 static void exynos_secondary_init(unsigned int cpu)
236 {
237 	/*
238 	 * let the primary processor know we're out of the
239 	 * pen, then head off into the C entry point
240 	 */
241 	write_pen_release(-1);
242 
243 	/*
244 	 * Synchronise with the boot thread.
245 	 */
246 	spin_lock(&boot_lock);
247 	spin_unlock(&boot_lock);
248 }
249 
250 int exynos_set_boot_addr(u32 core_id, unsigned long boot_addr)
251 {
252 	int ret;
253 
254 	/*
255 	 * Try to set boot address using firmware first
256 	 * and fall back to boot register if it fails.
257 	 */
258 	ret = call_firmware_op(set_cpu_boot_addr, core_id, boot_addr);
259 	if (ret && ret != -ENOSYS)
260 		goto fail;
261 	if (ret == -ENOSYS) {
262 		void __iomem *boot_reg = cpu_boot_reg(core_id);
263 
264 		if (IS_ERR(boot_reg)) {
265 			ret = PTR_ERR(boot_reg);
266 			goto fail;
267 		}
268 		__raw_writel(boot_addr, boot_reg);
269 		ret = 0;
270 	}
271 fail:
272 	return ret;
273 }
274 
275 int exynos_get_boot_addr(u32 core_id, unsigned long *boot_addr)
276 {
277 	int ret;
278 
279 	/*
280 	 * Try to get boot address using firmware first
281 	 * and fall back to boot register if it fails.
282 	 */
283 	ret = call_firmware_op(get_cpu_boot_addr, core_id, boot_addr);
284 	if (ret && ret != -ENOSYS)
285 		goto fail;
286 	if (ret == -ENOSYS) {
287 		void __iomem *boot_reg = cpu_boot_reg(core_id);
288 
289 		if (IS_ERR(boot_reg)) {
290 			ret = PTR_ERR(boot_reg);
291 			goto fail;
292 		}
293 		*boot_addr = __raw_readl(boot_reg);
294 		ret = 0;
295 	}
296 fail:
297 	return ret;
298 }
299 
300 static int exynos_boot_secondary(unsigned int cpu, struct task_struct *idle)
301 {
302 	unsigned long timeout;
303 	u32 mpidr = cpu_logical_map(cpu);
304 	u32 core_id = MPIDR_AFFINITY_LEVEL(mpidr, 0);
305 	int ret = -ENOSYS;
306 
307 	/*
308 	 * Set synchronisation state between this boot processor
309 	 * and the secondary one
310 	 */
311 	spin_lock(&boot_lock);
312 
313 	/*
314 	 * The secondary processor is waiting to be released from
315 	 * the holding pen - release it, then wait for it to flag
316 	 * that it has been released by resetting pen_release.
317 	 *
318 	 * Note that "pen_release" is the hardware CPU core ID, whereas
319 	 * "cpu" is Linux's internal ID.
320 	 */
321 	write_pen_release(core_id);
322 
323 	if (!exynos_cpu_power_state(core_id)) {
324 		exynos_cpu_power_up(core_id);
325 		timeout = 10;
326 
327 		/* wait max 10 ms until cpu1 is on */
328 		while (exynos_cpu_power_state(core_id)
329 		       != S5P_CORE_LOCAL_PWR_EN) {
330 			if (timeout-- == 0)
331 				break;
332 
333 			mdelay(1);
334 		}
335 
336 		if (timeout == 0) {
337 			printk(KERN_ERR "cpu1 power enable failed");
338 			spin_unlock(&boot_lock);
339 			return -ETIMEDOUT;
340 		}
341 	}
342 
343 	exynos_core_restart(core_id);
344 
345 	/*
346 	 * Send the secondary CPU a soft interrupt, thereby causing
347 	 * the boot monitor to read the system wide flags register,
348 	 * and branch to the address found there.
349 	 */
350 
351 	timeout = jiffies + (1 * HZ);
352 	while (time_before(jiffies, timeout)) {
353 		unsigned long boot_addr;
354 
355 		smp_rmb();
356 
357 		boot_addr = virt_to_phys(exynos4_secondary_startup);
358 
359 		ret = exynos_set_boot_addr(core_id, boot_addr);
360 		if (ret)
361 			goto fail;
362 
363 		call_firmware_op(cpu_boot, core_id);
364 
365 		if (soc_is_exynos3250())
366 			dsb_sev();
367 		else
368 			arch_send_wakeup_ipi_mask(cpumask_of(cpu));
369 
370 		if (pen_release == -1)
371 			break;
372 
373 		udelay(10);
374 	}
375 
376 	if (pen_release != -1)
377 		ret = -ETIMEDOUT;
378 
379 	/*
380 	 * now the secondary core is starting up let it run its
381 	 * calibrations, then wait for it to finish
382 	 */
383 fail:
384 	spin_unlock(&boot_lock);
385 
386 	return pen_release != -1 ? ret : 0;
387 }
388 
389 /*
390  * Initialise the CPU possible map early - this describes the CPUs
391  * which may be present or become present in the system.
392  */
393 
394 static void __init exynos_smp_init_cpus(void)
395 {
396 	void __iomem *scu_base = scu_base_addr();
397 	unsigned int i, ncores;
398 
399 	if (read_cpuid_part() == ARM_CPU_PART_CORTEX_A9)
400 		ncores = scu_base ? scu_get_core_count(scu_base) : 1;
401 	else
402 		/*
403 		 * CPU Nodes are passed thru DT and set_cpu_possible
404 		 * is set by "arm_dt_init_cpu_maps".
405 		 */
406 		return;
407 
408 	/* sanity check */
409 	if (ncores > nr_cpu_ids) {
410 		pr_warn("SMP: %u cores greater than maximum (%u), clipping\n",
411 			ncores, nr_cpu_ids);
412 		ncores = nr_cpu_ids;
413 	}
414 
415 	for (i = 0; i < ncores; i++)
416 		set_cpu_possible(i, true);
417 }
418 
419 static void __init exynos_smp_prepare_cpus(unsigned int max_cpus)
420 {
421 	int i;
422 
423 	exynos_sysram_init();
424 
425 	exynos_set_delayed_reset_assertion(true);
426 
427 	if (read_cpuid_part() == ARM_CPU_PART_CORTEX_A9)
428 		scu_enable(scu_base_addr());
429 
430 	/*
431 	 * Write the address of secondary startup into the
432 	 * system-wide flags register. The boot monitor waits
433 	 * until it receives a soft interrupt, and then the
434 	 * secondary CPU branches to this address.
435 	 *
436 	 * Try using firmware operation first and fall back to
437 	 * boot register if it fails.
438 	 */
439 	for (i = 1; i < max_cpus; ++i) {
440 		unsigned long boot_addr;
441 		u32 mpidr;
442 		u32 core_id;
443 		int ret;
444 
445 		mpidr = cpu_logical_map(i);
446 		core_id = MPIDR_AFFINITY_LEVEL(mpidr, 0);
447 		boot_addr = virt_to_phys(exynos4_secondary_startup);
448 
449 		ret = exynos_set_boot_addr(core_id, boot_addr);
450 		if (ret)
451 			break;
452 	}
453 }
454 
455 #ifdef CONFIG_HOTPLUG_CPU
456 /*
457  * platform-specific code to shutdown a CPU
458  *
459  * Called with IRQs disabled
460  */
461 static void exynos_cpu_die(unsigned int cpu)
462 {
463 	int spurious = 0;
464 	u32 mpidr = cpu_logical_map(cpu);
465 	u32 core_id = MPIDR_AFFINITY_LEVEL(mpidr, 0);
466 
467 	v7_exit_coherency_flush(louis);
468 
469 	platform_do_lowpower(cpu, &spurious);
470 
471 	/*
472 	 * bring this CPU back into the world of cache
473 	 * coherency, and then restore interrupts
474 	 */
475 	cpu_leave_lowpower(core_id);
476 
477 	if (spurious)
478 		pr_warn("CPU%u: %u spurious wakeup calls\n", cpu, spurious);
479 }
480 #endif /* CONFIG_HOTPLUG_CPU */
481 
482 struct smp_operations exynos_smp_ops __initdata = {
483 	.smp_init_cpus		= exynos_smp_init_cpus,
484 	.smp_prepare_cpus	= exynos_smp_prepare_cpus,
485 	.smp_secondary_init	= exynos_secondary_init,
486 	.smp_boot_secondary	= exynos_boot_secondary,
487 #ifdef CONFIG_HOTPLUG_CPU
488 	.cpu_die		= exynos_cpu_die,
489 #endif
490 };
491