xref: /openbmc/linux/arch/arm/mach-spear/hotplug.c (revision a7ed099ffc8edf2a6dccd8a22469347f5cdcfa57)
1*a7ed099fSArnd Bergmann /*
2*a7ed099fSArnd Bergmann  * linux/arch/arm/mach-spear13xx/hotplug.c
3*a7ed099fSArnd Bergmann  *
4*a7ed099fSArnd Bergmann  * Copyright (C) 2012 ST Microelectronics Ltd.
5*a7ed099fSArnd Bergmann  * Deepak Sikri <deepak.sikri@st.com>
6*a7ed099fSArnd Bergmann  *
7*a7ed099fSArnd Bergmann  * based upon linux/arch/arm/mach-realview/hotplug.c
8*a7ed099fSArnd Bergmann  *
9*a7ed099fSArnd Bergmann  * This program is free software; you can redistribute it and/or modify
10*a7ed099fSArnd Bergmann  * it under the terms of the GNU General Public License version 2 as
11*a7ed099fSArnd Bergmann  * published by the Free Software Foundation.
12*a7ed099fSArnd Bergmann  */
13*a7ed099fSArnd Bergmann #include <linux/kernel.h>
14*a7ed099fSArnd Bergmann #include <linux/errno.h>
15*a7ed099fSArnd Bergmann #include <linux/smp.h>
16*a7ed099fSArnd Bergmann #include <asm/cacheflush.h>
17*a7ed099fSArnd Bergmann #include <asm/cp15.h>
18*a7ed099fSArnd Bergmann #include <asm/smp_plat.h>
19*a7ed099fSArnd Bergmann 
20*a7ed099fSArnd Bergmann static inline void cpu_enter_lowpower(void)
21*a7ed099fSArnd Bergmann {
22*a7ed099fSArnd Bergmann 	unsigned int v;
23*a7ed099fSArnd Bergmann 
24*a7ed099fSArnd Bergmann 	flush_cache_all();
25*a7ed099fSArnd Bergmann 	asm volatile(
26*a7ed099fSArnd Bergmann 	"	mcr	p15, 0, %1, c7, c5, 0\n"
27*a7ed099fSArnd Bergmann 	"	dsb\n"
28*a7ed099fSArnd Bergmann 	/*
29*a7ed099fSArnd Bergmann 	 * Turn off coherency
30*a7ed099fSArnd Bergmann 	 */
31*a7ed099fSArnd Bergmann 	"	mrc	p15, 0, %0, c1, c0, 1\n"
32*a7ed099fSArnd Bergmann 	"	bic	%0, %0, #0x20\n"
33*a7ed099fSArnd Bergmann 	"	mcr	p15, 0, %0, c1, c0, 1\n"
34*a7ed099fSArnd Bergmann 	"	mrc	p15, 0, %0, c1, c0, 0\n"
35*a7ed099fSArnd Bergmann 	"	bic	%0, %0, %2\n"
36*a7ed099fSArnd Bergmann 	"	mcr	p15, 0, %0, c1, c0, 0\n"
37*a7ed099fSArnd Bergmann 	: "=&r" (v)
38*a7ed099fSArnd Bergmann 	: "r" (0), "Ir" (CR_C)
39*a7ed099fSArnd Bergmann 	: "cc", "memory");
40*a7ed099fSArnd Bergmann }
41*a7ed099fSArnd Bergmann 
42*a7ed099fSArnd Bergmann static inline void cpu_leave_lowpower(void)
43*a7ed099fSArnd Bergmann {
44*a7ed099fSArnd Bergmann 	unsigned int v;
45*a7ed099fSArnd Bergmann 
46*a7ed099fSArnd Bergmann 	asm volatile("mrc	p15, 0, %0, c1, c0, 0\n"
47*a7ed099fSArnd Bergmann 	"	orr	%0, %0, %1\n"
48*a7ed099fSArnd Bergmann 	"	mcr	p15, 0, %0, c1, c0, 0\n"
49*a7ed099fSArnd Bergmann 	"	mrc	p15, 0, %0, c1, c0, 1\n"
50*a7ed099fSArnd Bergmann 	"	orr	%0, %0, #0x20\n"
51*a7ed099fSArnd Bergmann 	"	mcr	p15, 0, %0, c1, c0, 1\n"
52*a7ed099fSArnd Bergmann 	: "=&r" (v)
53*a7ed099fSArnd Bergmann 	: "Ir" (CR_C)
54*a7ed099fSArnd Bergmann 	: "cc");
55*a7ed099fSArnd Bergmann }
56*a7ed099fSArnd Bergmann 
57*a7ed099fSArnd Bergmann static inline void spear13xx_do_lowpower(unsigned int cpu, int *spurious)
58*a7ed099fSArnd Bergmann {
59*a7ed099fSArnd Bergmann 	for (;;) {
60*a7ed099fSArnd Bergmann 		wfi();
61*a7ed099fSArnd Bergmann 
62*a7ed099fSArnd Bergmann 		if (pen_release == cpu) {
63*a7ed099fSArnd Bergmann 			/*
64*a7ed099fSArnd Bergmann 			 * OK, proper wakeup, we're done
65*a7ed099fSArnd Bergmann 			 */
66*a7ed099fSArnd Bergmann 			break;
67*a7ed099fSArnd Bergmann 		}
68*a7ed099fSArnd Bergmann 
69*a7ed099fSArnd Bergmann 		/*
70*a7ed099fSArnd Bergmann 		 * Getting here, means that we have come out of WFI without
71*a7ed099fSArnd Bergmann 		 * having been woken up - this shouldn't happen
72*a7ed099fSArnd Bergmann 		 *
73*a7ed099fSArnd Bergmann 		 * Just note it happening - when we're woken, we can report
74*a7ed099fSArnd Bergmann 		 * its occurrence.
75*a7ed099fSArnd Bergmann 		 */
76*a7ed099fSArnd Bergmann 		(*spurious)++;
77*a7ed099fSArnd Bergmann 	}
78*a7ed099fSArnd Bergmann }
79*a7ed099fSArnd Bergmann 
80*a7ed099fSArnd Bergmann /*
81*a7ed099fSArnd Bergmann  * platform-specific code to shutdown a CPU
82*a7ed099fSArnd Bergmann  *
83*a7ed099fSArnd Bergmann  * Called with IRQs disabled
84*a7ed099fSArnd Bergmann  */
85*a7ed099fSArnd Bergmann void __ref spear13xx_cpu_die(unsigned int cpu)
86*a7ed099fSArnd Bergmann {
87*a7ed099fSArnd Bergmann 	int spurious = 0;
88*a7ed099fSArnd Bergmann 
89*a7ed099fSArnd Bergmann 	/*
90*a7ed099fSArnd Bergmann 	 * we're ready for shutdown now, so do it
91*a7ed099fSArnd Bergmann 	 */
92*a7ed099fSArnd Bergmann 	cpu_enter_lowpower();
93*a7ed099fSArnd Bergmann 	spear13xx_do_lowpower(cpu, &spurious);
94*a7ed099fSArnd Bergmann 
95*a7ed099fSArnd Bergmann 	/*
96*a7ed099fSArnd Bergmann 	 * bring this CPU back into the world of cache
97*a7ed099fSArnd Bergmann 	 * coherency, and then restore interrupts
98*a7ed099fSArnd Bergmann 	 */
99*a7ed099fSArnd Bergmann 	cpu_leave_lowpower();
100*a7ed099fSArnd Bergmann 
101*a7ed099fSArnd Bergmann 	if (spurious)
102*a7ed099fSArnd Bergmann 		pr_warn("CPU%u: %u spurious wakeup calls\n", cpu, spurious);
103*a7ed099fSArnd Bergmann }
104