xref: /openbmc/linux/arch/arm/mach-versatile/hotplug.c (revision 4f2c0a4acffbec01079c28f839422e64ddeff004)
1*d7445676SArnd Bergmann // SPDX-License-Identifier: GPL-2.0-only
2*d7445676SArnd Bergmann /*
3*d7445676SArnd Bergmann  *  Copyright (C) 2002 ARM Ltd.
4*d7445676SArnd Bergmann  *  All Rights Reserved
5*d7445676SArnd Bergmann  *
6*d7445676SArnd Bergmann  * This hotplug implementation is _specific_ to the situation found on
7*d7445676SArnd Bergmann  * ARM development platforms where there is _no_ possibility of actually
8*d7445676SArnd Bergmann  * taking a CPU offline, resetting it, or otherwise.  Real platforms must
9*d7445676SArnd Bergmann  * NOT copy this code.
10*d7445676SArnd Bergmann  */
11*d7445676SArnd Bergmann #include <linux/kernel.h>
12*d7445676SArnd Bergmann #include <linux/errno.h>
13*d7445676SArnd Bergmann #include <linux/smp.h>
14*d7445676SArnd Bergmann 
15*d7445676SArnd Bergmann #include <asm/smp_plat.h>
16*d7445676SArnd Bergmann #include <asm/cp15.h>
17*d7445676SArnd Bergmann 
18*d7445676SArnd Bergmann #include "platsmp.h"
19*d7445676SArnd Bergmann 
versatile_immitation_enter_lowpower(unsigned int actrl_mask)20*d7445676SArnd Bergmann static inline void versatile_immitation_enter_lowpower(unsigned int actrl_mask)
21*d7445676SArnd Bergmann {
22*d7445676SArnd Bergmann 	unsigned int v;
23*d7445676SArnd Bergmann 
24*d7445676SArnd Bergmann 	asm volatile(
25*d7445676SArnd Bergmann 		"mcr	p15, 0, %1, c7, c5, 0\n"
26*d7445676SArnd Bergmann 	"	mcr	p15, 0, %1, c7, c10, 4\n"
27*d7445676SArnd Bergmann 	/*
28*d7445676SArnd Bergmann 	 * Turn off coherency
29*d7445676SArnd Bergmann 	 */
30*d7445676SArnd Bergmann 	"	mrc	p15, 0, %0, c1, c0, 1\n"
31*d7445676SArnd Bergmann 	"	bic	%0, %0, %3\n"
32*d7445676SArnd Bergmann 	"	mcr	p15, 0, %0, c1, c0, 1\n"
33*d7445676SArnd Bergmann 	"	mrc	p15, 0, %0, c1, c0, 0\n"
34*d7445676SArnd Bergmann 	"	bic	%0, %0, %2\n"
35*d7445676SArnd Bergmann 	"	mcr	p15, 0, %0, c1, c0, 0\n"
36*d7445676SArnd Bergmann 	  : "=&r" (v)
37*d7445676SArnd Bergmann 	  : "r" (0), "Ir" (CR_C), "Ir" (actrl_mask)
38*d7445676SArnd Bergmann 	  : "cc");
39*d7445676SArnd Bergmann }
40*d7445676SArnd Bergmann 
versatile_immitation_leave_lowpower(unsigned int actrl_mask)41*d7445676SArnd Bergmann static inline void versatile_immitation_leave_lowpower(unsigned int actrl_mask)
42*d7445676SArnd Bergmann {
43*d7445676SArnd Bergmann 	unsigned int v;
44*d7445676SArnd Bergmann 
45*d7445676SArnd Bergmann 	asm volatile(
46*d7445676SArnd Bergmann 		"mrc	p15, 0, %0, c1, c0, 0\n"
47*d7445676SArnd Bergmann 	"	orr	%0, %0, %1\n"
48*d7445676SArnd Bergmann 	"	mcr	p15, 0, %0, c1, c0, 0\n"
49*d7445676SArnd Bergmann 	"	mrc	p15, 0, %0, c1, c0, 1\n"
50*d7445676SArnd Bergmann 	"	orr	%0, %0, %2\n"
51*d7445676SArnd Bergmann 	"	mcr	p15, 0, %0, c1, c0, 1\n"
52*d7445676SArnd Bergmann 	  : "=&r" (v)
53*d7445676SArnd Bergmann 	  : "Ir" (CR_C), "Ir" (actrl_mask)
54*d7445676SArnd Bergmann 	  : "cc");
55*d7445676SArnd Bergmann }
56*d7445676SArnd Bergmann 
versatile_immitation_do_lowpower(unsigned int cpu,int * spurious)57*d7445676SArnd Bergmann static inline void versatile_immitation_do_lowpower(unsigned int cpu, int *spurious)
58*d7445676SArnd Bergmann {
59*d7445676SArnd Bergmann 	/*
60*d7445676SArnd Bergmann 	 * there is no power-control hardware on this platform, so all
61*d7445676SArnd Bergmann 	 * we can do is put the core into WFI; this is safe as the calling
62*d7445676SArnd Bergmann 	 * code will have already disabled interrupts.
63*d7445676SArnd Bergmann 	 *
64*d7445676SArnd Bergmann 	 * This code should not be used outside Versatile platforms.
65*d7445676SArnd Bergmann 	 */
66*d7445676SArnd Bergmann 	for (;;) {
67*d7445676SArnd Bergmann 		wfi();
68*d7445676SArnd Bergmann 
69*d7445676SArnd Bergmann 		if (versatile_cpu_release == cpu_logical_map(cpu)) {
70*d7445676SArnd Bergmann 			/*
71*d7445676SArnd Bergmann 			 * OK, proper wakeup, we're done
72*d7445676SArnd Bergmann 			 */
73*d7445676SArnd Bergmann 			break;
74*d7445676SArnd Bergmann 		}
75*d7445676SArnd Bergmann 
76*d7445676SArnd Bergmann 		/*
77*d7445676SArnd Bergmann 		 * Getting here, means that we have come out of WFI without
78*d7445676SArnd Bergmann 		 * having been woken up - this shouldn't happen
79*d7445676SArnd Bergmann 		 *
80*d7445676SArnd Bergmann 		 * Just note it happening - when we're woken, we can report
81*d7445676SArnd Bergmann 		 * its occurrence.
82*d7445676SArnd Bergmann 		 */
83*d7445676SArnd Bergmann 		(*spurious)++;
84*d7445676SArnd Bergmann 	}
85*d7445676SArnd Bergmann }
86*d7445676SArnd Bergmann 
87*d7445676SArnd Bergmann /*
88*d7445676SArnd Bergmann  * platform-specific code to shutdown a CPU.
89*d7445676SArnd Bergmann  * This code supports immitation-style CPU hotplug for Versatile/Realview/
90*d7445676SArnd Bergmann  * Versatile Express platforms that are unable to do real CPU hotplug.
91*d7445676SArnd Bergmann  */
versatile_immitation_cpu_die(unsigned int cpu,unsigned int actrl_mask)92*d7445676SArnd Bergmann void versatile_immitation_cpu_die(unsigned int cpu, unsigned int actrl_mask)
93*d7445676SArnd Bergmann {
94*d7445676SArnd Bergmann 	int spurious = 0;
95*d7445676SArnd Bergmann 
96*d7445676SArnd Bergmann 	versatile_immitation_enter_lowpower(actrl_mask);
97*d7445676SArnd Bergmann 	versatile_immitation_do_lowpower(cpu, &spurious);
98*d7445676SArnd Bergmann 	versatile_immitation_leave_lowpower(actrl_mask);
99*d7445676SArnd Bergmann 
100*d7445676SArnd Bergmann 	if (spurious)
101*d7445676SArnd Bergmann 		pr_warn("CPU%u: %u spurious wakeup calls\n", cpu, spurious);
102*d7445676SArnd Bergmann }
103