1*d2912cb1SThomas Gleixner // SPDX-License-Identifier: GPL-2.0-only
2a7ed099fSArnd Bergmann /*
3a7ed099fSArnd Bergmann * linux/arch/arm/mach-spear13xx/hotplug.c
4a7ed099fSArnd Bergmann *
5a7ed099fSArnd Bergmann * Copyright (C) 2012 ST Microelectronics Ltd.
6a7ed099fSArnd Bergmann * Deepak Sikri <deepak.sikri@st.com>
7a7ed099fSArnd Bergmann *
8a7ed099fSArnd Bergmann * based upon linux/arch/arm/mach-realview/hotplug.c
9a7ed099fSArnd Bergmann */
10a7ed099fSArnd Bergmann #include <linux/kernel.h>
11a7ed099fSArnd Bergmann #include <linux/errno.h>
12a7ed099fSArnd Bergmann #include <linux/smp.h>
13a7ed099fSArnd Bergmann #include <asm/cp15.h>
14a7ed099fSArnd Bergmann #include <asm/smp_plat.h>
15a7ed099fSArnd Bergmann
166213f70eSRussell King #include "generic.h"
176213f70eSRussell King
cpu_enter_lowpower(void)18a7ed099fSArnd Bergmann static inline void cpu_enter_lowpower(void)
19a7ed099fSArnd Bergmann {
20a7ed099fSArnd Bergmann unsigned int v;
21a7ed099fSArnd Bergmann
22a7ed099fSArnd Bergmann asm volatile(
23a7ed099fSArnd Bergmann " mcr p15, 0, %1, c7, c5, 0\n"
24a7ed099fSArnd Bergmann " dsb\n"
25a7ed099fSArnd Bergmann /*
26a7ed099fSArnd Bergmann * Turn off coherency
27a7ed099fSArnd Bergmann */
28a7ed099fSArnd Bergmann " mrc p15, 0, %0, c1, c0, 1\n"
29a7ed099fSArnd Bergmann " bic %0, %0, #0x20\n"
30a7ed099fSArnd Bergmann " mcr p15, 0, %0, c1, c0, 1\n"
31a7ed099fSArnd Bergmann " mrc p15, 0, %0, c1, c0, 0\n"
32a7ed099fSArnd Bergmann " bic %0, %0, %2\n"
33a7ed099fSArnd Bergmann " mcr p15, 0, %0, c1, c0, 0\n"
34a7ed099fSArnd Bergmann : "=&r" (v)
35a7ed099fSArnd Bergmann : "r" (0), "Ir" (CR_C)
36a7ed099fSArnd Bergmann : "cc", "memory");
37a7ed099fSArnd Bergmann }
38a7ed099fSArnd Bergmann
cpu_leave_lowpower(void)39a7ed099fSArnd Bergmann static inline void cpu_leave_lowpower(void)
40a7ed099fSArnd Bergmann {
41a7ed099fSArnd Bergmann unsigned int v;
42a7ed099fSArnd Bergmann
43a7ed099fSArnd Bergmann asm volatile("mrc p15, 0, %0, c1, c0, 0\n"
44a7ed099fSArnd Bergmann " orr %0, %0, %1\n"
45a7ed099fSArnd Bergmann " mcr p15, 0, %0, c1, c0, 0\n"
46a7ed099fSArnd Bergmann " mrc p15, 0, %0, c1, c0, 1\n"
47a7ed099fSArnd Bergmann " orr %0, %0, #0x20\n"
48a7ed099fSArnd Bergmann " mcr p15, 0, %0, c1, c0, 1\n"
49a7ed099fSArnd Bergmann : "=&r" (v)
50a7ed099fSArnd Bergmann : "Ir" (CR_C)
51a7ed099fSArnd Bergmann : "cc");
52a7ed099fSArnd Bergmann }
53a7ed099fSArnd Bergmann
spear13xx_do_lowpower(unsigned int cpu,int * spurious)54a7ed099fSArnd Bergmann static inline void spear13xx_do_lowpower(unsigned int cpu, int *spurious)
55a7ed099fSArnd Bergmann {
56a7ed099fSArnd Bergmann for (;;) {
57a7ed099fSArnd Bergmann wfi();
58a7ed099fSArnd Bergmann
596213f70eSRussell King if (spear_pen_release == cpu) {
60a7ed099fSArnd Bergmann /*
61a7ed099fSArnd Bergmann * OK, proper wakeup, we're done
62a7ed099fSArnd Bergmann */
63a7ed099fSArnd Bergmann break;
64a7ed099fSArnd Bergmann }
65a7ed099fSArnd Bergmann
66a7ed099fSArnd Bergmann /*
67a7ed099fSArnd Bergmann * Getting here, means that we have come out of WFI without
68a7ed099fSArnd Bergmann * having been woken up - this shouldn't happen
69a7ed099fSArnd Bergmann *
70a7ed099fSArnd Bergmann * Just note it happening - when we're woken, we can report
71a7ed099fSArnd Bergmann * its occurrence.
72a7ed099fSArnd Bergmann */
73a7ed099fSArnd Bergmann (*spurious)++;
74a7ed099fSArnd Bergmann }
75a7ed099fSArnd Bergmann }
76a7ed099fSArnd Bergmann
77a7ed099fSArnd Bergmann /*
78a7ed099fSArnd Bergmann * platform-specific code to shutdown a CPU
79a7ed099fSArnd Bergmann *
80a7ed099fSArnd Bergmann * Called with IRQs disabled
81a7ed099fSArnd Bergmann */
spear13xx_cpu_die(unsigned int cpu)82b96fc2f3SStephen Boyd void spear13xx_cpu_die(unsigned int cpu)
83a7ed099fSArnd Bergmann {
84a7ed099fSArnd Bergmann int spurious = 0;
85a7ed099fSArnd Bergmann
86a7ed099fSArnd Bergmann /*
87a7ed099fSArnd Bergmann * we're ready for shutdown now, so do it
88a7ed099fSArnd Bergmann */
89a7ed099fSArnd Bergmann cpu_enter_lowpower();
90a7ed099fSArnd Bergmann spear13xx_do_lowpower(cpu, &spurious);
91a7ed099fSArnd Bergmann
92a7ed099fSArnd Bergmann /*
93a7ed099fSArnd Bergmann * bring this CPU back into the world of cache
94a7ed099fSArnd Bergmann * coherency, and then restore interrupts
95a7ed099fSArnd Bergmann */
96a7ed099fSArnd Bergmann cpu_leave_lowpower();
97a7ed099fSArnd Bergmann
98a7ed099fSArnd Bergmann if (spurious)
99a7ed099fSArnd Bergmann pr_warn("CPU%u: %u spurious wakeup calls\n", cpu, spurious);
100a7ed099fSArnd Bergmann }
101