xref: /openbmc/linux/arch/arm/mach-spear/hotplug.c (revision a85e4c52)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * linux/arch/arm/mach-spear13xx/hotplug.c
4  *
5  * Copyright (C) 2012 ST Microelectronics Ltd.
6  * Deepak Sikri <deepak.sikri@st.com>
7  *
8  * based upon linux/arch/arm/mach-realview/hotplug.c
9  */
10 #include <linux/kernel.h>
11 #include <linux/errno.h>
12 #include <linux/smp.h>
13 #include <asm/cp15.h>
14 #include <asm/smp_plat.h>
15 
16 #include "generic.h"
17 
18 static inline void cpu_enter_lowpower(void)
19 {
20 	unsigned int v;
21 
22 	asm volatile(
23 	"	mcr	p15, 0, %1, c7, c5, 0\n"
24 	"	dsb\n"
25 	/*
26 	 * Turn off coherency
27 	 */
28 	"	mrc	p15, 0, %0, c1, c0, 1\n"
29 	"	bic	%0, %0, #0x20\n"
30 	"	mcr	p15, 0, %0, c1, c0, 1\n"
31 	"	mrc	p15, 0, %0, c1, c0, 0\n"
32 	"	bic	%0, %0, %2\n"
33 	"	mcr	p15, 0, %0, c1, c0, 0\n"
34 	: "=&r" (v)
35 	: "r" (0), "Ir" (CR_C)
36 	: "cc", "memory");
37 }
38 
39 static inline void cpu_leave_lowpower(void)
40 {
41 	unsigned int v;
42 
43 	asm volatile("mrc	p15, 0, %0, c1, c0, 0\n"
44 	"	orr	%0, %0, %1\n"
45 	"	mcr	p15, 0, %0, c1, c0, 0\n"
46 	"	mrc	p15, 0, %0, c1, c0, 1\n"
47 	"	orr	%0, %0, #0x20\n"
48 	"	mcr	p15, 0, %0, c1, c0, 1\n"
49 	: "=&r" (v)
50 	: "Ir" (CR_C)
51 	: "cc");
52 }
53 
54 static inline void spear13xx_do_lowpower(unsigned int cpu, int *spurious)
55 {
56 	for (;;) {
57 		wfi();
58 
59 		if (spear_pen_release == cpu) {
60 			/*
61 			 * OK, proper wakeup, we're done
62 			 */
63 			break;
64 		}
65 
66 		/*
67 		 * Getting here, means that we have come out of WFI without
68 		 * having been woken up - this shouldn't happen
69 		 *
70 		 * Just note it happening - when we're woken, we can report
71 		 * its occurrence.
72 		 */
73 		(*spurious)++;
74 	}
75 }
76 
77 /*
78  * platform-specific code to shutdown a CPU
79  *
80  * Called with IRQs disabled
81  */
82 void spear13xx_cpu_die(unsigned int cpu)
83 {
84 	int spurious = 0;
85 
86 	/*
87 	 * we're ready for shutdown now, so do it
88 	 */
89 	cpu_enter_lowpower();
90 	spear13xx_do_lowpower(cpu, &spurious);
91 
92 	/*
93 	 * bring this CPU back into the world of cache
94 	 * coherency, and then restore interrupts
95 	 */
96 	cpu_leave_lowpower();
97 
98 	if (spurious)
99 		pr_warn("CPU%u: %u spurious wakeup calls\n", cpu, spurious);
100 }
101