1 /* 2 * ladder.c - the residency ladder algorithm 3 * 4 * Copyright (C) 2001, 2002 Andy Grover <andrew.grover@intel.com> 5 * Copyright (C) 2001, 2002 Paul Diefenbaugh <paul.s.diefenbaugh@intel.com> 6 * Copyright (C) 2004, 2005 Dominik Brodowski <linux@brodo.de> 7 * 8 * (C) 2006-2007 Venkatesh Pallipadi <venkatesh.pallipadi@intel.com> 9 * Shaohua Li <shaohua.li@intel.com> 10 * Adam Belay <abelay@novell.com> 11 * 12 * This code is licenced under the GPL. 13 */ 14 15 #include <linux/kernel.h> 16 #include <linux/cpuidle.h> 17 #include <linux/pm_qos.h> 18 #include <linux/jiffies.h> 19 #include <linux/tick.h> 20 #include <linux/cpu.h> 21 22 #include <asm/io.h> 23 #include <linux/uaccess.h> 24 25 #define PROMOTION_COUNT 4 26 #define DEMOTION_COUNT 1 27 28 struct ladder_device_state { 29 struct { 30 u32 promotion_count; 31 u32 demotion_count; 32 u32 promotion_time; 33 u32 demotion_time; 34 } threshold; 35 struct { 36 int promotion_count; 37 int demotion_count; 38 } stats; 39 }; 40 41 struct ladder_device { 42 struct ladder_device_state states[CPUIDLE_STATE_MAX]; 43 int last_state_idx; 44 }; 45 46 static DEFINE_PER_CPU(struct ladder_device, ladder_devices); 47 48 /** 49 * ladder_do_selection - prepares private data for a state change 50 * @ldev: the ladder device 51 * @old_idx: the current state index 52 * @new_idx: the new target state index 53 */ 54 static inline void ladder_do_selection(struct ladder_device *ldev, 55 int old_idx, int new_idx) 56 { 57 ldev->states[old_idx].stats.promotion_count = 0; 58 ldev->states[old_idx].stats.demotion_count = 0; 59 ldev->last_state_idx = new_idx; 60 } 61 62 /** 63 * ladder_select_state - selects the next state to enter 64 * @drv: cpuidle driver 65 * @dev: the CPU 66 * @dummy: not used 67 */ 68 static int ladder_select_state(struct cpuidle_driver *drv, 69 struct cpuidle_device *dev, bool *dummy) 70 { 71 struct ladder_device *ldev = this_cpu_ptr(&ladder_devices); 72 struct device *device = get_cpu_device(dev->cpu); 73 struct ladder_device_state *last_state; 74 int last_residency, last_idx = ldev->last_state_idx; 75 int first_idx = drv->states[0].flags & CPUIDLE_FLAG_POLLING ? 1 : 0; 76 int latency_req = pm_qos_request(PM_QOS_CPU_DMA_LATENCY); 77 int resume_latency = dev_pm_qos_raw_read_value(device); 78 79 if (resume_latency < latency_req && 80 resume_latency != PM_QOS_RESUME_LATENCY_NO_CONSTRAINT) 81 latency_req = resume_latency; 82 83 /* Special case when user has set very strict latency requirement */ 84 if (unlikely(latency_req == 0)) { 85 ladder_do_selection(ldev, last_idx, 0); 86 return 0; 87 } 88 89 last_state = &ldev->states[last_idx]; 90 91 last_residency = cpuidle_get_last_residency(dev) - drv->states[last_idx].exit_latency; 92 93 /* consider promotion */ 94 if (last_idx < drv->state_count - 1 && 95 !drv->states[last_idx + 1].disabled && 96 !dev->states_usage[last_idx + 1].disable && 97 last_residency > last_state->threshold.promotion_time && 98 drv->states[last_idx + 1].exit_latency <= latency_req) { 99 last_state->stats.promotion_count++; 100 last_state->stats.demotion_count = 0; 101 if (last_state->stats.promotion_count >= last_state->threshold.promotion_count) { 102 ladder_do_selection(ldev, last_idx, last_idx + 1); 103 return last_idx + 1; 104 } 105 } 106 107 /* consider demotion */ 108 if (last_idx > first_idx && 109 (drv->states[last_idx].disabled || 110 dev->states_usage[last_idx].disable || 111 drv->states[last_idx].exit_latency > latency_req)) { 112 int i; 113 114 for (i = last_idx - 1; i > first_idx; i--) { 115 if (drv->states[i].exit_latency <= latency_req) 116 break; 117 } 118 ladder_do_selection(ldev, last_idx, i); 119 return i; 120 } 121 122 if (last_idx > first_idx && 123 last_residency < last_state->threshold.demotion_time) { 124 last_state->stats.demotion_count++; 125 last_state->stats.promotion_count = 0; 126 if (last_state->stats.demotion_count >= last_state->threshold.demotion_count) { 127 ladder_do_selection(ldev, last_idx, last_idx - 1); 128 return last_idx - 1; 129 } 130 } 131 132 /* otherwise remain at the current state */ 133 return last_idx; 134 } 135 136 /** 137 * ladder_enable_device - setup for the governor 138 * @drv: cpuidle driver 139 * @dev: the CPU 140 */ 141 static int ladder_enable_device(struct cpuidle_driver *drv, 142 struct cpuidle_device *dev) 143 { 144 int i; 145 int first_idx = drv->states[0].flags & CPUIDLE_FLAG_POLLING ? 1 : 0; 146 struct ladder_device *ldev = &per_cpu(ladder_devices, dev->cpu); 147 struct ladder_device_state *lstate; 148 struct cpuidle_state *state; 149 150 ldev->last_state_idx = first_idx; 151 152 for (i = first_idx; i < drv->state_count; i++) { 153 state = &drv->states[i]; 154 lstate = &ldev->states[i]; 155 156 lstate->stats.promotion_count = 0; 157 lstate->stats.demotion_count = 0; 158 159 lstate->threshold.promotion_count = PROMOTION_COUNT; 160 lstate->threshold.demotion_count = DEMOTION_COUNT; 161 162 if (i < drv->state_count - 1) 163 lstate->threshold.promotion_time = state->exit_latency; 164 if (i > first_idx) 165 lstate->threshold.demotion_time = state->exit_latency; 166 } 167 168 return 0; 169 } 170 171 /** 172 * ladder_reflect - update the correct last_state_idx 173 * @dev: the CPU 174 * @index: the index of actual state entered 175 */ 176 static void ladder_reflect(struct cpuidle_device *dev, int index) 177 { 178 struct ladder_device *ldev = this_cpu_ptr(&ladder_devices); 179 if (index > 0) 180 ldev->last_state_idx = index; 181 } 182 183 static struct cpuidle_governor ladder_governor = { 184 .name = "ladder", 185 .rating = 10, 186 .enable = ladder_enable_device, 187 .select = ladder_select_state, 188 .reflect = ladder_reflect, 189 }; 190 191 /** 192 * init_ladder - initializes the governor 193 */ 194 static int __init init_ladder(void) 195 { 196 /* 197 * When NO_HZ is disabled, or when booting with nohz=off, the ladder 198 * governor is better so give it a higher rating than the menu 199 * governor. 200 */ 201 if (!tick_nohz_enabled) 202 ladder_governor.rating = 25; 203 204 return cpuidle_register_governor(&ladder_governor); 205 } 206 207 postcore_initcall(init_ladder); 208