cpuidle.c (cdd38c5f1ce4398ec58fec95904b75824daab7b5) cpuidle.c (e67198cc05b8ecbb7b8e2d8ef9fb5c8d26821873)
1/*
2 * cpuidle.c - core cpuidle infrastructure
3 *
4 * (C) 2006-2007 Venkatesh Pallipadi <venkatesh.pallipadi@intel.com>
5 * Shaohua Li <shaohua.li@intel.com>
6 * Adam Belay <abelay@novell.com>
7 *
8 * This code is licenced under the GPL.

--- 9 unchanged lines hidden (view full) ---

18#include <linux/cpu.h>
19#include <linux/cpuidle.h>
20#include <linux/ktime.h>
21#include <linux/hrtimer.h>
22#include <linux/module.h>
23#include <linux/suspend.h>
24#include <linux/tick.h>
25#include <linux/mmu_context.h>
1/*
2 * cpuidle.c - core cpuidle infrastructure
3 *
4 * (C) 2006-2007 Venkatesh Pallipadi <venkatesh.pallipadi@intel.com>
5 * Shaohua Li <shaohua.li@intel.com>
6 * Adam Belay <abelay@novell.com>
7 *
8 * This code is licenced under the GPL.

--- 9 unchanged lines hidden (view full) ---

18#include <linux/cpu.h>
19#include <linux/cpuidle.h>
20#include <linux/ktime.h>
21#include <linux/hrtimer.h>
22#include <linux/module.h>
23#include <linux/suspend.h>
24#include <linux/tick.h>
25#include <linux/mmu_context.h>
26#include <linux/context_tracking.h>
26#include <trace/events/power.h>
27
28#include "cpuidle.h"
29
30DEFINE_PER_CPU(struct cpuidle_device *, cpuidle_devices);
31DEFINE_PER_CPU(struct cpuidle_device, cpuidle_dev);
32
33DEFINE_MUTEX(cpuidle_lock);

--- 111 unchanged lines hidden (view full) ---

145 tick_freeze();
146 /*
147 * The state used here cannot be a "coupled" one, because the "coupled"
148 * cpuidle mechanism enables interrupts and doing that with timekeeping
149 * suspended is generally unsafe.
150 */
151 stop_critical_timings();
152 if (!(target_state->flags & CPUIDLE_FLAG_RCU_IDLE))
27#include <trace/events/power.h>
28
29#include "cpuidle.h"
30
31DEFINE_PER_CPU(struct cpuidle_device *, cpuidle_devices);
32DEFINE_PER_CPU(struct cpuidle_device, cpuidle_dev);
33
34DEFINE_MUTEX(cpuidle_lock);

--- 111 unchanged lines hidden (view full) ---

146 tick_freeze();
147 /*
148 * The state used here cannot be a "coupled" one, because the "coupled"
149 * cpuidle mechanism enables interrupts and doing that with timekeeping
150 * suspended is generally unsafe.
151 */
152 stop_critical_timings();
153 if (!(target_state->flags & CPUIDLE_FLAG_RCU_IDLE))
153 rcu_idle_enter();
154 ct_idle_enter();
154 target_state->enter_s2idle(dev, drv, index);
155 if (WARN_ON_ONCE(!irqs_disabled()))
156 local_irq_disable();
157 if (!(target_state->flags & CPUIDLE_FLAG_RCU_IDLE))
155 target_state->enter_s2idle(dev, drv, index);
156 if (WARN_ON_ONCE(!irqs_disabled()))
157 local_irq_disable();
158 if (!(target_state->flags & CPUIDLE_FLAG_RCU_IDLE))
158 rcu_idle_exit();
159 ct_idle_exit();
159 tick_unfreeze();
160 start_critical_timings();
161
162 time_end = ns_to_ktime(local_clock());
163
164 dev->states_usage[index].s2idle_time += ktime_us_delta(time_end, time_start);
165 dev->states_usage[index].s2idle_usage++;
166}

--- 61 unchanged lines hidden (view full) ---

228 /* Take note of the planned idle state. */
229 sched_idle_set_state(target_state);
230
231 trace_cpu_idle(index, dev->cpu);
232 time_start = ns_to_ktime(local_clock());
233
234 stop_critical_timings();
235 if (!(target_state->flags & CPUIDLE_FLAG_RCU_IDLE))
160 tick_unfreeze();
161 start_critical_timings();
162
163 time_end = ns_to_ktime(local_clock());
164
165 dev->states_usage[index].s2idle_time += ktime_us_delta(time_end, time_start);
166 dev->states_usage[index].s2idle_usage++;
167}

--- 61 unchanged lines hidden (view full) ---

229 /* Take note of the planned idle state. */
230 sched_idle_set_state(target_state);
231
232 trace_cpu_idle(index, dev->cpu);
233 time_start = ns_to_ktime(local_clock());
234
235 stop_critical_timings();
236 if (!(target_state->flags & CPUIDLE_FLAG_RCU_IDLE))
236 rcu_idle_enter();
237 ct_idle_enter();
237 entered_state = target_state->enter(dev, drv, index);
238 if (!(target_state->flags & CPUIDLE_FLAG_RCU_IDLE))
238 entered_state = target_state->enter(dev, drv, index);
239 if (!(target_state->flags & CPUIDLE_FLAG_RCU_IDLE))
239 rcu_idle_exit();
240 ct_idle_exit();
240 start_critical_timings();
241
242 sched_clock_idle_wakeup_event();
243 time_end = ns_to_ktime(local_clock());
244 trace_cpu_idle(PWR_EVENT_EXIT, dev->cpu);
245
246 /* The cpu is no longer idle or about to enter idle. */
247 sched_idle_set_state(NULL);

--- 532 unchanged lines hidden ---
241 start_critical_timings();
242
243 sched_clock_idle_wakeup_event();
244 time_end = ns_to_ktime(local_clock());
245 trace_cpu_idle(PWR_EVENT_EXIT, dev->cpu);
246
247 /* The cpu is no longer idle or about to enter idle. */
248 sched_idle_set_state(NULL);

--- 532 unchanged lines hidden ---