1 /* 2 * OMAP4+ CPU idle Routines 3 * 4 * Copyright (C) 2011-2013 Texas Instruments, Inc. 5 * Santosh Shilimkar <santosh.shilimkar@ti.com> 6 * Rajendra Nayak <rnayak@ti.com> 7 * 8 * This program is free software; you can redistribute it and/or modify 9 * it under the terms of the GNU General Public License version 2 as 10 * published by the Free Software Foundation. 11 */ 12 13 #include <linux/sched.h> 14 #include <linux/cpuidle.h> 15 #include <linux/cpu_pm.h> 16 #include <linux/export.h> 17 #include <linux/tick.h> 18 19 #include <asm/cpuidle.h> 20 21 #include "common.h" 22 #include "pm.h" 23 #include "prm.h" 24 #include "soc.h" 25 #include "clockdomain.h" 26 27 #define MAX_CPUS 2 28 29 /* Machine specific information */ 30 struct idle_statedata { 31 u32 cpu_state; 32 u32 mpu_logic_state; 33 u32 mpu_state; 34 u32 mpu_state_vote; 35 }; 36 37 static struct idle_statedata omap4_idle_data[] = { 38 { 39 .cpu_state = PWRDM_POWER_ON, 40 .mpu_state = PWRDM_POWER_ON, 41 .mpu_logic_state = PWRDM_POWER_RET, 42 }, 43 { 44 .cpu_state = PWRDM_POWER_OFF, 45 .mpu_state = PWRDM_POWER_RET, 46 .mpu_logic_state = PWRDM_POWER_RET, 47 }, 48 { 49 .cpu_state = PWRDM_POWER_OFF, 50 .mpu_state = PWRDM_POWER_RET, 51 .mpu_logic_state = PWRDM_POWER_OFF, 52 }, 53 }; 54 55 static struct idle_statedata omap5_idle_data[] = { 56 { 57 .cpu_state = PWRDM_POWER_ON, 58 .mpu_state = PWRDM_POWER_ON, 59 .mpu_logic_state = PWRDM_POWER_ON, 60 }, 61 { 62 .cpu_state = PWRDM_POWER_RET, 63 .mpu_state = PWRDM_POWER_RET, 64 .mpu_logic_state = PWRDM_POWER_RET, 65 }, 66 }; 67 68 static struct powerdomain *mpu_pd, *cpu_pd[MAX_CPUS]; 69 static struct clockdomain *cpu_clkdm[MAX_CPUS]; 70 71 static atomic_t abort_barrier; 72 static bool cpu_done[MAX_CPUS]; 73 static struct idle_statedata *state_ptr = &omap4_idle_data[0]; 74 static DEFINE_RAW_SPINLOCK(mpu_lock); 75 76 /* Private functions */ 77 78 /** 79 * omap_enter_idle_[simple/coupled] - OMAP4PLUS cpuidle entry functions 80 * @dev: cpuidle device 81 * @drv: cpuidle driver 82 * @index: the index of state to be entered 83 * 84 * Called from the CPUidle framework to program the device to the 85 * specified low power state selected by the governor. 86 * Returns the amount of time spent in the low power state. 87 */ 88 static int omap_enter_idle_simple(struct cpuidle_device *dev, 89 struct cpuidle_driver *drv, 90 int index) 91 { 92 omap_do_wfi(); 93 return index; 94 } 95 96 static int omap_enter_idle_smp(struct cpuidle_device *dev, 97 struct cpuidle_driver *drv, 98 int index) 99 { 100 struct idle_statedata *cx = state_ptr + index; 101 unsigned long flag; 102 103 raw_spin_lock_irqsave(&mpu_lock, flag); 104 cx->mpu_state_vote++; 105 if (cx->mpu_state_vote == num_online_cpus()) { 106 pwrdm_set_logic_retst(mpu_pd, cx->mpu_logic_state); 107 omap_set_pwrdm_state(mpu_pd, cx->mpu_state); 108 } 109 raw_spin_unlock_irqrestore(&mpu_lock, flag); 110 111 omap4_enter_lowpower(dev->cpu, cx->cpu_state); 112 113 raw_spin_lock_irqsave(&mpu_lock, flag); 114 if (cx->mpu_state_vote == num_online_cpus()) 115 omap_set_pwrdm_state(mpu_pd, PWRDM_POWER_ON); 116 cx->mpu_state_vote--; 117 raw_spin_unlock_irqrestore(&mpu_lock, flag); 118 119 return index; 120 } 121 122 static int omap_enter_idle_coupled(struct cpuidle_device *dev, 123 struct cpuidle_driver *drv, 124 int index) 125 { 126 struct idle_statedata *cx = state_ptr + index; 127 u32 mpuss_can_lose_context = 0; 128 129 /* 130 * CPU0 has to wait and stay ON until CPU1 is OFF state. 131 * This is necessary to honour hardware recommondation 132 * of triggeing all the possible low power modes once CPU1 is 133 * out of coherency and in OFF mode. 134 */ 135 if (dev->cpu == 0 && cpumask_test_cpu(1, cpu_online_mask)) { 136 while (pwrdm_read_pwrst(cpu_pd[1]) != PWRDM_POWER_OFF) { 137 cpu_relax(); 138 139 /* 140 * CPU1 could have already entered & exited idle 141 * without hitting off because of a wakeup 142 * or a failed attempt to hit off mode. Check for 143 * that here, otherwise we could spin forever 144 * waiting for CPU1 off. 145 */ 146 if (cpu_done[1]) 147 goto fail; 148 149 } 150 } 151 152 mpuss_can_lose_context = (cx->mpu_state == PWRDM_POWER_RET) && 153 (cx->mpu_logic_state == PWRDM_POWER_OFF); 154 155 /* Enter broadcast mode for periodic timers */ 156 tick_broadcast_enable(); 157 158 /* Enter broadcast mode for one-shot timers */ 159 tick_broadcast_enter(); 160 161 /* 162 * Call idle CPU PM enter notifier chain so that 163 * VFP and per CPU interrupt context is saved. 164 */ 165 cpu_pm_enter(); 166 167 if (dev->cpu == 0) { 168 pwrdm_set_logic_retst(mpu_pd, cx->mpu_logic_state); 169 omap_set_pwrdm_state(mpu_pd, cx->mpu_state); 170 171 /* 172 * Call idle CPU cluster PM enter notifier chain 173 * to save GIC and wakeupgen context. 174 */ 175 if (mpuss_can_lose_context) 176 cpu_cluster_pm_enter(); 177 } 178 179 omap4_enter_lowpower(dev->cpu, cx->cpu_state); 180 cpu_done[dev->cpu] = true; 181 182 /* Wakeup CPU1 only if it is not offlined */ 183 if (dev->cpu == 0 && cpumask_test_cpu(1, cpu_online_mask)) { 184 185 if (IS_PM44XX_ERRATUM(PM_OMAP4_ROM_SMP_BOOT_ERRATUM_GICD) && 186 mpuss_can_lose_context) 187 gic_dist_disable(); 188 189 clkdm_deny_idle(cpu_clkdm[1]); 190 omap_set_pwrdm_state(cpu_pd[1], PWRDM_POWER_ON); 191 clkdm_allow_idle(cpu_clkdm[1]); 192 193 if (IS_PM44XX_ERRATUM(PM_OMAP4_ROM_SMP_BOOT_ERRATUM_GICD) && 194 mpuss_can_lose_context) { 195 while (gic_dist_disabled()) { 196 udelay(1); 197 cpu_relax(); 198 } 199 gic_timer_retrigger(); 200 } 201 } 202 203 /* 204 * Call idle CPU PM exit notifier chain to restore 205 * VFP and per CPU IRQ context. 206 */ 207 cpu_pm_exit(); 208 209 /* 210 * Call idle CPU cluster PM exit notifier chain 211 * to restore GIC and wakeupgen context. 212 */ 213 if (dev->cpu == 0 && mpuss_can_lose_context) 214 cpu_cluster_pm_exit(); 215 216 tick_broadcast_exit(); 217 218 fail: 219 cpuidle_coupled_parallel_barrier(dev, &abort_barrier); 220 cpu_done[dev->cpu] = false; 221 222 return index; 223 } 224 225 static struct cpuidle_driver omap4_idle_driver = { 226 .name = "omap4_idle", 227 .owner = THIS_MODULE, 228 .states = { 229 { 230 /* C1 - CPU0 ON + CPU1 ON + MPU ON */ 231 .exit_latency = 2 + 2, 232 .target_residency = 5, 233 .enter = omap_enter_idle_simple, 234 .name = "C1", 235 .desc = "CPUx ON, MPUSS ON" 236 }, 237 { 238 /* C2 - CPU0 OFF + CPU1 OFF + MPU CSWR */ 239 .exit_latency = 328 + 440, 240 .target_residency = 960, 241 .flags = CPUIDLE_FLAG_COUPLED, 242 .enter = omap_enter_idle_coupled, 243 .name = "C2", 244 .desc = "CPUx OFF, MPUSS CSWR", 245 }, 246 { 247 /* C3 - CPU0 OFF + CPU1 OFF + MPU OSWR */ 248 .exit_latency = 460 + 518, 249 .target_residency = 1100, 250 .flags = CPUIDLE_FLAG_COUPLED, 251 .enter = omap_enter_idle_coupled, 252 .name = "C3", 253 .desc = "CPUx OFF, MPUSS OSWR", 254 }, 255 }, 256 .state_count = ARRAY_SIZE(omap4_idle_data), 257 .safe_state_index = 0, 258 }; 259 260 static struct cpuidle_driver omap5_idle_driver = { 261 .name = "omap5_idle", 262 .owner = THIS_MODULE, 263 .states = { 264 { 265 /* C1 - CPU0 ON + CPU1 ON + MPU ON */ 266 .exit_latency = 2 + 2, 267 .target_residency = 5, 268 .enter = omap_enter_idle_simple, 269 .name = "C1", 270 .desc = "CPUx WFI, MPUSS ON" 271 }, 272 { 273 /* C2 - CPU0 RET + CPU1 RET + MPU CSWR */ 274 .exit_latency = 48 + 60, 275 .target_residency = 100, 276 .flags = CPUIDLE_FLAG_TIMER_STOP, 277 .enter = omap_enter_idle_smp, 278 .name = "C2", 279 .desc = "CPUx CSWR, MPUSS CSWR", 280 }, 281 }, 282 .state_count = ARRAY_SIZE(omap5_idle_data), 283 .safe_state_index = 0, 284 }; 285 286 /* Public functions */ 287 288 /** 289 * omap4_idle_init - Init routine for OMAP4+ idle 290 * 291 * Registers the OMAP4+ specific cpuidle driver to the cpuidle 292 * framework with the valid set of states. 293 */ 294 int __init omap4_idle_init(void) 295 { 296 struct cpuidle_driver *idle_driver; 297 298 if (soc_is_omap54xx()) { 299 state_ptr = &omap5_idle_data[0]; 300 idle_driver = &omap5_idle_driver; 301 } else { 302 state_ptr = &omap4_idle_data[0]; 303 idle_driver = &omap4_idle_driver; 304 } 305 306 mpu_pd = pwrdm_lookup("mpu_pwrdm"); 307 cpu_pd[0] = pwrdm_lookup("cpu0_pwrdm"); 308 cpu_pd[1] = pwrdm_lookup("cpu1_pwrdm"); 309 if ((!mpu_pd) || (!cpu_pd[0]) || (!cpu_pd[1])) 310 return -ENODEV; 311 312 cpu_clkdm[0] = clkdm_lookup("mpu0_clkdm"); 313 cpu_clkdm[1] = clkdm_lookup("mpu1_clkdm"); 314 if (!cpu_clkdm[0] || !cpu_clkdm[1]) 315 return -ENODEV; 316 317 return cpuidle_register(idle_driver, cpu_online_mask); 318 } 319