xref: /openbmc/linux/kernel/cpu_pm.c (revision 3c8c1539)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright (C) 2011 Google, Inc.
4  *
5  * Author:
6  *	Colin Cross <ccross@android.com>
7  */
8 
9 #include <linux/kernel.h>
10 #include <linux/cpu_pm.h>
11 #include <linux/module.h>
12 #include <linux/notifier.h>
13 #include <linux/spinlock.h>
14 #include <linux/syscore_ops.h>
15 
16 /*
17  * atomic_notifiers use a spinlock_t, which can block under PREEMPT_RT.
18  * Notifications for cpu_pm will be issued by the idle task itself, which can
19  * never block, IOW it requires using a raw_spinlock_t.
20  */
21 static struct {
22 	struct raw_notifier_head chain;
23 	raw_spinlock_t lock;
24 } cpu_pm_notifier = {
25 	.chain = RAW_NOTIFIER_INIT(cpu_pm_notifier.chain),
26 	.lock  = __RAW_SPIN_LOCK_UNLOCKED(cpu_pm_notifier.lock),
27 };
28 
29 static int cpu_pm_notify(enum cpu_pm_event event)
30 {
31 	int ret;
32 
33 	/*
34 	 * This introduces a RCU read critical section, which could be
35 	 * disfunctional in cpu idle. Copy RCU_NONIDLE code to let RCU know
36 	 * this.
37 	 */
38 	rcu_irq_enter_irqson();
39 	rcu_read_lock();
40 	ret = raw_notifier_call_chain(&cpu_pm_notifier.chain, event, NULL);
41 	rcu_read_unlock();
42 	rcu_irq_exit_irqson();
43 
44 	return notifier_to_errno(ret);
45 }
46 
47 static int cpu_pm_notify_robust(enum cpu_pm_event event_up, enum cpu_pm_event event_down)
48 {
49 	unsigned long flags;
50 	int ret;
51 
52 	rcu_irq_enter_irqson();
53 	raw_spin_lock_irqsave(&cpu_pm_notifier.lock, flags);
54 	ret = raw_notifier_call_chain_robust(&cpu_pm_notifier.chain, event_up, event_down, NULL);
55 	raw_spin_unlock_irqrestore(&cpu_pm_notifier.lock, flags);
56 	rcu_irq_exit_irqson();
57 
58 	return notifier_to_errno(ret);
59 }
60 
61 /**
62  * cpu_pm_register_notifier - register a driver with cpu_pm
63  * @nb: notifier block to register
64  *
65  * Add a driver to a list of drivers that are notified about
66  * CPU and CPU cluster low power entry and exit.
67  *
68  * This function has the same return conditions as raw_notifier_chain_register.
69  */
70 int cpu_pm_register_notifier(struct notifier_block *nb)
71 {
72 	unsigned long flags;
73 	int ret;
74 
75 	raw_spin_lock_irqsave(&cpu_pm_notifier.lock, flags);
76 	ret = raw_notifier_chain_register(&cpu_pm_notifier.chain, nb);
77 	raw_spin_unlock_irqrestore(&cpu_pm_notifier.lock, flags);
78 	return ret;
79 }
80 EXPORT_SYMBOL_GPL(cpu_pm_register_notifier);
81 
82 /**
83  * cpu_pm_unregister_notifier - unregister a driver with cpu_pm
84  * @nb: notifier block to be unregistered
85  *
86  * Remove a driver from the CPU PM notifier list.
87  *
88  * This function has the same return conditions as raw_notifier_chain_unregister.
89  */
90 int cpu_pm_unregister_notifier(struct notifier_block *nb)
91 {
92 	unsigned long flags;
93 	int ret;
94 
95 	raw_spin_lock_irqsave(&cpu_pm_notifier.lock, flags);
96 	ret = raw_notifier_chain_unregister(&cpu_pm_notifier.chain, nb);
97 	raw_spin_unlock_irqrestore(&cpu_pm_notifier.lock, flags);
98 	return ret;
99 }
100 EXPORT_SYMBOL_GPL(cpu_pm_unregister_notifier);
101 
102 /**
103  * cpu_pm_enter - CPU low power entry notifier
104  *
105  * Notifies listeners that a single CPU is entering a low power state that may
106  * cause some blocks in the same power domain as the cpu to reset.
107  *
108  * Must be called on the affected CPU with interrupts disabled.  Platform is
109  * responsible for ensuring that cpu_pm_enter is not called twice on the same
110  * CPU before cpu_pm_exit is called. Notified drivers can include VFP
111  * co-processor, interrupt controller and its PM extensions, local CPU
112  * timers context save/restore which shouldn't be interrupted. Hence it
113  * must be called with interrupts disabled.
114  *
115  * Return conditions are same as __raw_notifier_call_chain.
116  */
117 int cpu_pm_enter(void)
118 {
119 	return cpu_pm_notify_robust(CPU_PM_ENTER, CPU_PM_ENTER_FAILED);
120 }
121 EXPORT_SYMBOL_GPL(cpu_pm_enter);
122 
123 /**
124  * cpu_pm_exit - CPU low power exit notifier
125  *
126  * Notifies listeners that a single CPU is exiting a low power state that may
127  * have caused some blocks in the same power domain as the cpu to reset.
128  *
129  * Notified drivers can include VFP co-processor, interrupt controller
130  * and its PM extensions, local CPU timers context save/restore which
131  * shouldn't be interrupted. Hence it must be called with interrupts disabled.
132  *
133  * Return conditions are same as __raw_notifier_call_chain.
134  */
135 int cpu_pm_exit(void)
136 {
137 	return cpu_pm_notify(CPU_PM_EXIT);
138 }
139 EXPORT_SYMBOL_GPL(cpu_pm_exit);
140 
141 /**
142  * cpu_cluster_pm_enter - CPU cluster low power entry notifier
143  *
144  * Notifies listeners that all cpus in a power domain are entering a low power
145  * state that may cause some blocks in the same power domain to reset.
146  *
147  * Must be called after cpu_pm_enter has been called on all cpus in the power
148  * domain, and before cpu_pm_exit has been called on any cpu in the power
149  * domain. Notified drivers can include VFP co-processor, interrupt controller
150  * and its PM extensions, local CPU timers context save/restore which
151  * shouldn't be interrupted. Hence it must be called with interrupts disabled.
152  *
153  * Must be called with interrupts disabled.
154  *
155  * Return conditions are same as __raw_notifier_call_chain.
156  */
157 int cpu_cluster_pm_enter(void)
158 {
159 	return cpu_pm_notify_robust(CPU_CLUSTER_PM_ENTER, CPU_CLUSTER_PM_ENTER_FAILED);
160 }
161 EXPORT_SYMBOL_GPL(cpu_cluster_pm_enter);
162 
163 /**
164  * cpu_cluster_pm_exit - CPU cluster low power exit notifier
165  *
166  * Notifies listeners that all cpus in a power domain are exiting form a
167  * low power state that may have caused some blocks in the same power domain
168  * to reset.
169  *
170  * Must be called after cpu_cluster_pm_enter has been called for the power
171  * domain, and before cpu_pm_exit has been called on any cpu in the power
172  * domain. Notified drivers can include VFP co-processor, interrupt controller
173  * and its PM extensions, local CPU timers context save/restore which
174  * shouldn't be interrupted. Hence it must be called with interrupts disabled.
175  *
176  * Return conditions are same as __raw_notifier_call_chain.
177  */
178 int cpu_cluster_pm_exit(void)
179 {
180 	return cpu_pm_notify(CPU_CLUSTER_PM_EXIT);
181 }
182 EXPORT_SYMBOL_GPL(cpu_cluster_pm_exit);
183 
184 #ifdef CONFIG_PM
185 static int cpu_pm_suspend(void)
186 {
187 	int ret;
188 
189 	ret = cpu_pm_enter();
190 	if (ret)
191 		return ret;
192 
193 	ret = cpu_cluster_pm_enter();
194 	return ret;
195 }
196 
197 static void cpu_pm_resume(void)
198 {
199 	cpu_cluster_pm_exit();
200 	cpu_pm_exit();
201 }
202 
203 static struct syscore_ops cpu_pm_syscore_ops = {
204 	.suspend = cpu_pm_suspend,
205 	.resume = cpu_pm_resume,
206 };
207 
208 static int cpu_pm_init(void)
209 {
210 	register_syscore_ops(&cpu_pm_syscore_ops);
211 	return 0;
212 }
213 core_initcall(cpu_pm_init);
214 #endif
215