1 /*
2  *  cpuidle-powernv - idle state cpuidle driver.
3  *  Adapted from drivers/cpuidle/cpuidle-pseries
4  *
5  */
6 
7 #include <linux/kernel.h>
8 #include <linux/module.h>
9 #include <linux/init.h>
10 #include <linux/moduleparam.h>
11 #include <linux/cpuidle.h>
12 #include <linux/cpu.h>
13 #include <linux/notifier.h>
14 #include <linux/clockchips.h>
15 #include <linux/of.h>
16 
17 #include <asm/machdep.h>
18 #include <asm/firmware.h>
19 #include <asm/opal.h>
20 #include <asm/runlatch.h>
21 
22 #define MAX_POWERNV_IDLE_STATES	8
23 
24 struct cpuidle_driver powernv_idle_driver = {
25 	.name             = "powernv_idle",
26 	.owner            = THIS_MODULE,
27 };
28 
29 static int max_idle_state;
30 static struct cpuidle_state *cpuidle_state_table;
31 
32 static int snooze_loop(struct cpuidle_device *dev,
33 			struct cpuidle_driver *drv,
34 			int index)
35 {
36 	local_irq_enable();
37 	set_thread_flag(TIF_POLLING_NRFLAG);
38 
39 	ppc64_runlatch_off();
40 	while (!need_resched()) {
41 		HMT_low();
42 		HMT_very_low();
43 	}
44 
45 	HMT_medium();
46 	ppc64_runlatch_on();
47 	clear_thread_flag(TIF_POLLING_NRFLAG);
48 	smp_mb();
49 	return index;
50 }
51 
52 static int nap_loop(struct cpuidle_device *dev,
53 			struct cpuidle_driver *drv,
54 			int index)
55 {
56 	ppc64_runlatch_off();
57 	power7_idle();
58 	ppc64_runlatch_on();
59 	return index;
60 }
61 
62 static int fastsleep_loop(struct cpuidle_device *dev,
63 				struct cpuidle_driver *drv,
64 				int index)
65 {
66 	unsigned long old_lpcr = mfspr(SPRN_LPCR);
67 	unsigned long new_lpcr;
68 
69 	if (unlikely(system_state < SYSTEM_RUNNING))
70 		return index;
71 
72 	new_lpcr = old_lpcr;
73 	/* Do not exit powersave upon decrementer as we've setup the timer
74 	 * offload.
75 	 */
76 	new_lpcr &= ~LPCR_PECE1;
77 
78 	mtspr(SPRN_LPCR, new_lpcr);
79 	power7_sleep();
80 
81 	mtspr(SPRN_LPCR, old_lpcr);
82 
83 	return index;
84 }
85 
86 /*
87  * States for dedicated partition case.
88  */
89 static struct cpuidle_state powernv_states[MAX_POWERNV_IDLE_STATES] = {
90 	{ /* Snooze */
91 		.name = "snooze",
92 		.desc = "snooze",
93 		.exit_latency = 0,
94 		.target_residency = 0,
95 		.enter = &snooze_loop },
96 };
97 
98 static int powernv_cpuidle_add_cpu_notifier(struct notifier_block *n,
99 			unsigned long action, void *hcpu)
100 {
101 	int hotcpu = (unsigned long)hcpu;
102 	struct cpuidle_device *dev =
103 				per_cpu(cpuidle_devices, hotcpu);
104 
105 	if (dev && cpuidle_get_driver()) {
106 		switch (action) {
107 		case CPU_ONLINE:
108 		case CPU_ONLINE_FROZEN:
109 			cpuidle_pause_and_lock();
110 			cpuidle_enable_device(dev);
111 			cpuidle_resume_and_unlock();
112 			break;
113 
114 		case CPU_DEAD:
115 		case CPU_DEAD_FROZEN:
116 			cpuidle_pause_and_lock();
117 			cpuidle_disable_device(dev);
118 			cpuidle_resume_and_unlock();
119 			break;
120 
121 		default:
122 			return NOTIFY_DONE;
123 		}
124 	}
125 	return NOTIFY_OK;
126 }
127 
128 static struct notifier_block setup_hotplug_notifier = {
129 	.notifier_call = powernv_cpuidle_add_cpu_notifier,
130 };
131 
132 /*
133  * powernv_cpuidle_driver_init()
134  */
135 static int powernv_cpuidle_driver_init(void)
136 {
137 	int idle_state;
138 	struct cpuidle_driver *drv = &powernv_idle_driver;
139 
140 	drv->state_count = 0;
141 
142 	for (idle_state = 0; idle_state < max_idle_state; ++idle_state) {
143 		/* Is the state not enabled? */
144 		if (cpuidle_state_table[idle_state].enter == NULL)
145 			continue;
146 
147 		drv->states[drv->state_count] =	/* structure copy */
148 			cpuidle_state_table[idle_state];
149 
150 		drv->state_count += 1;
151 	}
152 
153 	return 0;
154 }
155 
156 static int powernv_add_idle_states(void)
157 {
158 	struct device_node *power_mgt;
159 	int nr_idle_states = 1; /* Snooze */
160 	int dt_idle_states;
161 	const __be32 *idle_state_flags;
162 	const __be32 *idle_state_latency;
163 	u32 len_flags, flags, latency_ns;
164 	int i;
165 
166 	/* Currently we have snooze statically defined */
167 
168 	power_mgt = of_find_node_by_path("/ibm,opal/power-mgt");
169 	if (!power_mgt) {
170 		pr_warn("opal: PowerMgmt Node not found\n");
171 		return nr_idle_states;
172 	}
173 
174 	idle_state_flags = of_get_property(power_mgt, "ibm,cpu-idle-state-flags", &len_flags);
175 	if (!idle_state_flags) {
176 		pr_warn("DT-PowerMgmt: missing ibm,cpu-idle-state-flags\n");
177 		return nr_idle_states;
178 	}
179 
180 	idle_state_latency = of_get_property(power_mgt,
181 			"ibm,cpu-idle-state-latencies-ns", NULL);
182 	if (!idle_state_latency) {
183 		pr_warn("DT-PowerMgmt: missing ibm,cpu-idle-state-latencies-ns\n");
184 		return nr_idle_states;
185 	}
186 
187 	dt_idle_states = len_flags / sizeof(u32);
188 
189 	for (i = 0; i < dt_idle_states; i++) {
190 
191 		flags = be32_to_cpu(idle_state_flags[i]);
192 
193 		/* Cpuidle accepts exit_latency in us and we estimate
194 		 * target residency to be 10x exit_latency
195 		 */
196 		latency_ns = be32_to_cpu(idle_state_latency[i]);
197 		if (flags & OPAL_PM_NAP_ENABLED) {
198 			/* Add NAP state */
199 			strcpy(powernv_states[nr_idle_states].name, "Nap");
200 			strcpy(powernv_states[nr_idle_states].desc, "Nap");
201 			powernv_states[nr_idle_states].flags = 0;
202 			powernv_states[nr_idle_states].exit_latency =
203 					((unsigned int)latency_ns) / 1000;
204 			powernv_states[nr_idle_states].target_residency =
205 					((unsigned int)latency_ns / 100);
206 			powernv_states[nr_idle_states].enter = &nap_loop;
207 			nr_idle_states++;
208 		}
209 
210 		if (flags & OPAL_PM_SLEEP_ENABLED ||
211 			flags & OPAL_PM_SLEEP_ENABLED_ER1) {
212 			/* Add FASTSLEEP state */
213 			strcpy(powernv_states[nr_idle_states].name, "FastSleep");
214 			strcpy(powernv_states[nr_idle_states].desc, "FastSleep");
215 			powernv_states[nr_idle_states].flags = CPUIDLE_FLAG_TIMER_STOP;
216 			powernv_states[nr_idle_states].exit_latency =
217 					((unsigned int)latency_ns) / 1000;
218 			powernv_states[nr_idle_states].target_residency =
219 					((unsigned int)latency_ns / 100);
220 			powernv_states[nr_idle_states].enter = &fastsleep_loop;
221 			nr_idle_states++;
222 		}
223 	}
224 
225 	return nr_idle_states;
226 }
227 
228 /*
229  * powernv_idle_probe()
230  * Choose state table for shared versus dedicated partition
231  */
232 static int powernv_idle_probe(void)
233 {
234 	if (cpuidle_disable != IDLE_NO_OVERRIDE)
235 		return -ENODEV;
236 
237 	if (firmware_has_feature(FW_FEATURE_OPALv3)) {
238 		cpuidle_state_table = powernv_states;
239 		/* Device tree can indicate more idle states */
240 		max_idle_state = powernv_add_idle_states();
241  	} else
242  		return -ENODEV;
243 
244 	return 0;
245 }
246 
247 static int __init powernv_processor_idle_init(void)
248 {
249 	int retval;
250 
251 	retval = powernv_idle_probe();
252 	if (retval)
253 		return retval;
254 
255 	powernv_cpuidle_driver_init();
256 	retval = cpuidle_register(&powernv_idle_driver, NULL);
257 	if (retval) {
258 		printk(KERN_DEBUG "Registration of powernv driver failed.\n");
259 		return retval;
260 	}
261 
262 	register_cpu_notifier(&setup_hotplug_notifier);
263 	printk(KERN_DEBUG "powernv_idle_driver registered\n");
264 	return 0;
265 }
266 
267 device_initcall(powernv_processor_idle_init);
268