xref: /openbmc/linux/arch/arm/mach-spear/hotplug.c (revision b96fc2f3)
1a7ed099fSArnd Bergmann /*
2a7ed099fSArnd Bergmann  * linux/arch/arm/mach-spear13xx/hotplug.c
3a7ed099fSArnd Bergmann  *
4a7ed099fSArnd Bergmann  * Copyright (C) 2012 ST Microelectronics Ltd.
5a7ed099fSArnd Bergmann  * Deepak Sikri <deepak.sikri@st.com>
6a7ed099fSArnd Bergmann  *
7a7ed099fSArnd Bergmann  * based upon linux/arch/arm/mach-realview/hotplug.c
8a7ed099fSArnd Bergmann  *
9a7ed099fSArnd Bergmann  * This program is free software; you can redistribute it and/or modify
10a7ed099fSArnd Bergmann  * it under the terms of the GNU General Public License version 2 as
11a7ed099fSArnd Bergmann  * published by the Free Software Foundation.
12a7ed099fSArnd Bergmann  */
13a7ed099fSArnd Bergmann #include <linux/kernel.h>
14a7ed099fSArnd Bergmann #include <linux/errno.h>
15a7ed099fSArnd Bergmann #include <linux/smp.h>
16a7ed099fSArnd Bergmann #include <asm/cp15.h>
17a7ed099fSArnd Bergmann #include <asm/smp_plat.h>
18a7ed099fSArnd Bergmann 
19a7ed099fSArnd Bergmann static inline void cpu_enter_lowpower(void)
20a7ed099fSArnd Bergmann {
21a7ed099fSArnd Bergmann 	unsigned int v;
22a7ed099fSArnd Bergmann 
23a7ed099fSArnd Bergmann 	asm volatile(
24a7ed099fSArnd Bergmann 	"	mcr	p15, 0, %1, c7, c5, 0\n"
25a7ed099fSArnd Bergmann 	"	dsb\n"
26a7ed099fSArnd Bergmann 	/*
27a7ed099fSArnd Bergmann 	 * Turn off coherency
28a7ed099fSArnd Bergmann 	 */
29a7ed099fSArnd Bergmann 	"	mrc	p15, 0, %0, c1, c0, 1\n"
30a7ed099fSArnd Bergmann 	"	bic	%0, %0, #0x20\n"
31a7ed099fSArnd Bergmann 	"	mcr	p15, 0, %0, c1, c0, 1\n"
32a7ed099fSArnd Bergmann 	"	mrc	p15, 0, %0, c1, c0, 0\n"
33a7ed099fSArnd Bergmann 	"	bic	%0, %0, %2\n"
34a7ed099fSArnd Bergmann 	"	mcr	p15, 0, %0, c1, c0, 0\n"
35a7ed099fSArnd Bergmann 	: "=&r" (v)
36a7ed099fSArnd Bergmann 	: "r" (0), "Ir" (CR_C)
37a7ed099fSArnd Bergmann 	: "cc", "memory");
38a7ed099fSArnd Bergmann }
39a7ed099fSArnd Bergmann 
40a7ed099fSArnd Bergmann static inline void cpu_leave_lowpower(void)
41a7ed099fSArnd Bergmann {
42a7ed099fSArnd Bergmann 	unsigned int v;
43a7ed099fSArnd Bergmann 
44a7ed099fSArnd Bergmann 	asm volatile("mrc	p15, 0, %0, c1, c0, 0\n"
45a7ed099fSArnd Bergmann 	"	orr	%0, %0, %1\n"
46a7ed099fSArnd Bergmann 	"	mcr	p15, 0, %0, c1, c0, 0\n"
47a7ed099fSArnd Bergmann 	"	mrc	p15, 0, %0, c1, c0, 1\n"
48a7ed099fSArnd Bergmann 	"	orr	%0, %0, #0x20\n"
49a7ed099fSArnd Bergmann 	"	mcr	p15, 0, %0, c1, c0, 1\n"
50a7ed099fSArnd Bergmann 	: "=&r" (v)
51a7ed099fSArnd Bergmann 	: "Ir" (CR_C)
52a7ed099fSArnd Bergmann 	: "cc");
53a7ed099fSArnd Bergmann }
54a7ed099fSArnd Bergmann 
55a7ed099fSArnd Bergmann static inline void spear13xx_do_lowpower(unsigned int cpu, int *spurious)
56a7ed099fSArnd Bergmann {
57a7ed099fSArnd Bergmann 	for (;;) {
58a7ed099fSArnd Bergmann 		wfi();
59a7ed099fSArnd Bergmann 
60a7ed099fSArnd Bergmann 		if (pen_release == cpu) {
61a7ed099fSArnd Bergmann 			/*
62a7ed099fSArnd Bergmann 			 * OK, proper wakeup, we're done
63a7ed099fSArnd Bergmann 			 */
64a7ed099fSArnd Bergmann 			break;
65a7ed099fSArnd Bergmann 		}
66a7ed099fSArnd Bergmann 
67a7ed099fSArnd Bergmann 		/*
68a7ed099fSArnd Bergmann 		 * Getting here, means that we have come out of WFI without
69a7ed099fSArnd Bergmann 		 * having been woken up - this shouldn't happen
70a7ed099fSArnd Bergmann 		 *
71a7ed099fSArnd Bergmann 		 * Just note it happening - when we're woken, we can report
72a7ed099fSArnd Bergmann 		 * its occurrence.
73a7ed099fSArnd Bergmann 		 */
74a7ed099fSArnd Bergmann 		(*spurious)++;
75a7ed099fSArnd Bergmann 	}
76a7ed099fSArnd Bergmann }
77a7ed099fSArnd Bergmann 
78a7ed099fSArnd Bergmann /*
79a7ed099fSArnd Bergmann  * platform-specific code to shutdown a CPU
80a7ed099fSArnd Bergmann  *
81a7ed099fSArnd Bergmann  * Called with IRQs disabled
82a7ed099fSArnd Bergmann  */
83b96fc2f3SStephen Boyd void spear13xx_cpu_die(unsigned int cpu)
84a7ed099fSArnd Bergmann {
85a7ed099fSArnd Bergmann 	int spurious = 0;
86a7ed099fSArnd Bergmann 
87a7ed099fSArnd Bergmann 	/*
88a7ed099fSArnd Bergmann 	 * we're ready for shutdown now, so do it
89a7ed099fSArnd Bergmann 	 */
90a7ed099fSArnd Bergmann 	cpu_enter_lowpower();
91a7ed099fSArnd Bergmann 	spear13xx_do_lowpower(cpu, &spurious);
92a7ed099fSArnd Bergmann 
93a7ed099fSArnd Bergmann 	/*
94a7ed099fSArnd Bergmann 	 * bring this CPU back into the world of cache
95a7ed099fSArnd Bergmann 	 * coherency, and then restore interrupts
96a7ed099fSArnd Bergmann 	 */
97a7ed099fSArnd Bergmann 	cpu_leave_lowpower();
98a7ed099fSArnd Bergmann 
99a7ed099fSArnd Bergmann 	if (spurious)
100a7ed099fSArnd Bergmann 		pr_warn("CPU%u: %u spurious wakeup calls\n", cpu, spurious);
101a7ed099fSArnd Bergmann }
102