1 /* 2 * menu.c - the menu idle governor 3 * 4 * Copyright (C) 2006-2007 Adam Belay <abelay@novell.com> 5 * 6 * This code is licenced under the GPL. 7 */ 8 9 #include <linux/kernel.h> 10 #include <linux/cpuidle.h> 11 #include <linux/latency.h> 12 #include <linux/time.h> 13 #include <linux/ktime.h> 14 #include <linux/hrtimer.h> 15 #include <linux/tick.h> 16 17 #define BREAK_FUZZ 4 /* 4 us */ 18 19 struct menu_device { 20 int last_state_idx; 21 22 unsigned int expected_us; 23 unsigned int predicted_us; 24 unsigned int last_measured_us; 25 unsigned int elapsed_us; 26 }; 27 28 static DEFINE_PER_CPU(struct menu_device, menu_devices); 29 30 /** 31 * menu_select - selects the next idle state to enter 32 * @dev: the CPU 33 */ 34 static int menu_select(struct cpuidle_device *dev) 35 { 36 struct menu_device *data = &__get_cpu_var(menu_devices); 37 int i; 38 39 /* determine the expected residency time */ 40 data->expected_us = 41 (u32) ktime_to_ns(tick_nohz_get_sleep_length()) / 1000; 42 43 /* find the deepest idle state that satisfies our constraints */ 44 for (i = 1; i < dev->state_count; i++) { 45 struct cpuidle_state *s = &dev->states[i]; 46 47 if (s->target_residency > data->expected_us) 48 break; 49 if (s->target_residency > data->predicted_us) 50 break; 51 if (s->exit_latency > system_latency_constraint()) 52 break; 53 } 54 55 data->last_state_idx = i - 1; 56 return i - 1; 57 } 58 59 /** 60 * menu_reflect - attempts to guess what happened after entry 61 * @dev: the CPU 62 * 63 * NOTE: it's important to be fast here because this operation will add to 64 * the overall exit latency. 65 */ 66 static void menu_reflect(struct cpuidle_device *dev) 67 { 68 struct menu_device *data = &__get_cpu_var(menu_devices); 69 int last_idx = data->last_state_idx; 70 unsigned int measured_us = 71 cpuidle_get_last_residency(dev) + data->elapsed_us; 72 struct cpuidle_state *target = &dev->states[last_idx]; 73 74 /* 75 * Ugh, this idle state doesn't support residency measurements, so we 76 * are basically lost in the dark. As a compromise, assume we slept 77 * for one full standard timer tick. However, be aware that this 78 * could potentially result in a suboptimal state transition. 79 */ 80 if (!(target->flags & CPUIDLE_FLAG_TIME_VALID)) 81 measured_us = USEC_PER_SEC / HZ; 82 83 /* Predict time remaining until next break event */ 84 if (measured_us + BREAK_FUZZ < data->expected_us - target->exit_latency) { 85 data->predicted_us = max(measured_us, data->last_measured_us); 86 data->last_measured_us = measured_us; 87 data->elapsed_us = 0; 88 } else { 89 if (data->elapsed_us < data->elapsed_us + measured_us) 90 data->elapsed_us = measured_us; 91 else 92 data->elapsed_us = -1; 93 data->predicted_us = max(measured_us, data->last_measured_us); 94 } 95 } 96 97 /** 98 * menu_enable_device - scans a CPU's states and does setup 99 * @dev: the CPU 100 */ 101 static int menu_enable_device(struct cpuidle_device *dev) 102 { 103 struct menu_device *data = &per_cpu(menu_devices, dev->cpu); 104 105 memset(data, 0, sizeof(struct menu_device)); 106 107 return 0; 108 } 109 110 static struct cpuidle_governor menu_governor = { 111 .name = "menu", 112 .rating = 20, 113 .enable = menu_enable_device, 114 .select = menu_select, 115 .reflect = menu_reflect, 116 .owner = THIS_MODULE, 117 }; 118 119 /** 120 * init_menu - initializes the governor 121 */ 122 static int __init init_menu(void) 123 { 124 return cpuidle_register_governor(&menu_governor); 125 } 126 127 /** 128 * exit_menu - exits the governor 129 */ 130 static void __exit exit_menu(void) 131 { 132 cpuidle_unregister_governor(&menu_governor); 133 } 134 135 MODULE_LICENSE("GPL"); 136 module_init(init_menu); 137 module_exit(exit_menu); 138