xref: /openbmc/linux/drivers/cpufreq/cpufreq.c (revision f963735a3ca388da4893fc2d463eca6b58667add)
11da177e4SLinus Torvalds /*
21da177e4SLinus Torvalds  *  linux/drivers/cpufreq/cpufreq.c
31da177e4SLinus Torvalds  *
41da177e4SLinus Torvalds  *  Copyright (C) 2001 Russell King
51da177e4SLinus Torvalds  *            (C) 2002 - 2003 Dominik Brodowski <linux@brodo.de>
6bb176f7dSViresh Kumar  *            (C) 2013 Viresh Kumar <viresh.kumar@linaro.org>
71da177e4SLinus Torvalds  *
8c32b6b8eSAshok Raj  *  Oct 2005 - Ashok Raj <ashok.raj@intel.com>
9c32b6b8eSAshok Raj  *	Added handling for CPU hotplug
108ff69732SDave Jones  *  Feb 2006 - Jacob Shin <jacob.shin@amd.com>
118ff69732SDave Jones  *	Fix handling for CPU hotplug -- affected CPUs
12c32b6b8eSAshok Raj  *
131da177e4SLinus Torvalds  * This program is free software; you can redistribute it and/or modify
141da177e4SLinus Torvalds  * it under the terms of the GNU General Public License version 2 as
151da177e4SLinus Torvalds  * published by the Free Software Foundation.
161da177e4SLinus Torvalds  */
171da177e4SLinus Torvalds 
18db701151SViresh Kumar #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
19db701151SViresh Kumar 
205ff0a268SViresh Kumar #include <linux/cpu.h>
211da177e4SLinus Torvalds #include <linux/cpufreq.h>
221da177e4SLinus Torvalds #include <linux/delay.h>
231da177e4SLinus Torvalds #include <linux/device.h>
245ff0a268SViresh Kumar #include <linux/init.h>
255ff0a268SViresh Kumar #include <linux/kernel_stat.h>
265ff0a268SViresh Kumar #include <linux/module.h>
273fc54d37Sakpm@osdl.org #include <linux/mutex.h>
285ff0a268SViresh Kumar #include <linux/slab.h>
292f0aea93SViresh Kumar #include <linux/suspend.h>
3090de2a4aSDoug Anderson #include <linux/syscore_ops.h>
315ff0a268SViresh Kumar #include <linux/tick.h>
326f4f2723SThomas Renninger #include <trace/events/power.h>
336f4f2723SThomas Renninger 
34b4f0676fSViresh Kumar static LIST_HEAD(cpufreq_policy_list);
35*f963735aSViresh Kumar 
36*f963735aSViresh Kumar static inline bool policy_is_inactive(struct cpufreq_policy *policy)
37*f963735aSViresh Kumar {
38*f963735aSViresh Kumar 	return cpumask_empty(policy->cpus);
39*f963735aSViresh Kumar }
40*f963735aSViresh Kumar 
41*f963735aSViresh Kumar static bool suitable_policy(struct cpufreq_policy *policy, bool active)
42*f963735aSViresh Kumar {
43*f963735aSViresh Kumar 	return active == !policy_is_inactive(policy);
44*f963735aSViresh Kumar }
45*f963735aSViresh Kumar 
46*f963735aSViresh Kumar /* Finds Next Acive/Inactive policy */
47*f963735aSViresh Kumar static struct cpufreq_policy *next_policy(struct cpufreq_policy *policy,
48*f963735aSViresh Kumar 					  bool active)
49*f963735aSViresh Kumar {
50*f963735aSViresh Kumar 	do {
51*f963735aSViresh Kumar 		policy = list_next_entry(policy, policy_list);
52*f963735aSViresh Kumar 
53*f963735aSViresh Kumar 		/* No more policies in the list */
54*f963735aSViresh Kumar 		if (&policy->policy_list == &cpufreq_policy_list)
55*f963735aSViresh Kumar 			return NULL;
56*f963735aSViresh Kumar 	} while (!suitable_policy(policy, active));
57*f963735aSViresh Kumar 
58*f963735aSViresh Kumar 	return policy;
59*f963735aSViresh Kumar }
60*f963735aSViresh Kumar 
61*f963735aSViresh Kumar static struct cpufreq_policy *first_policy(bool active)
62*f963735aSViresh Kumar {
63*f963735aSViresh Kumar 	struct cpufreq_policy *policy;
64*f963735aSViresh Kumar 
65*f963735aSViresh Kumar 	/* No policies in the list */
66*f963735aSViresh Kumar 	if (list_empty(&cpufreq_policy_list))
67*f963735aSViresh Kumar 		return NULL;
68*f963735aSViresh Kumar 
69*f963735aSViresh Kumar 	policy = list_first_entry(&cpufreq_policy_list, typeof(*policy),
70*f963735aSViresh Kumar 				  policy_list);
71*f963735aSViresh Kumar 
72*f963735aSViresh Kumar 	if (!suitable_policy(policy, active))
73*f963735aSViresh Kumar 		policy = next_policy(policy, active);
74*f963735aSViresh Kumar 
75*f963735aSViresh Kumar 	return policy;
76*f963735aSViresh Kumar }
77*f963735aSViresh Kumar 
78*f963735aSViresh Kumar /* Macros to iterate over CPU policies */
79*f963735aSViresh Kumar #define for_each_suitable_policy(__policy, __active)	\
80*f963735aSViresh Kumar 	for (__policy = first_policy(__active);		\
81*f963735aSViresh Kumar 	     __policy;					\
82*f963735aSViresh Kumar 	     __policy = next_policy(__policy, __active))
83*f963735aSViresh Kumar 
84*f963735aSViresh Kumar #define for_each_active_policy(__policy)		\
85*f963735aSViresh Kumar 	for_each_suitable_policy(__policy, true)
86*f963735aSViresh Kumar #define for_each_inactive_policy(__policy)		\
87*f963735aSViresh Kumar 	for_each_suitable_policy(__policy, false)
88*f963735aSViresh Kumar 
89b4f0676fSViresh Kumar #define for_each_policy(__policy)			\
90b4f0676fSViresh Kumar 	list_for_each_entry(__policy, &cpufreq_policy_list, policy_list)
91b4f0676fSViresh Kumar 
92f7b27061SViresh Kumar /* Iterate over governors */
93f7b27061SViresh Kumar static LIST_HEAD(cpufreq_governor_list);
94f7b27061SViresh Kumar #define for_each_governor(__governor)				\
95f7b27061SViresh Kumar 	list_for_each_entry(__governor, &cpufreq_governor_list, governor_list)
96f7b27061SViresh Kumar 
971da177e4SLinus Torvalds /**
98cd878479SDave Jones  * The "cpufreq driver" - the arch- or hardware-dependent low
991da177e4SLinus Torvalds  * level driver of CPUFreq support, and its spinlock. This lock
1001da177e4SLinus Torvalds  * also protects the cpufreq_cpu_data array.
1011da177e4SLinus Torvalds  */
1021c3d85ddSRafael J. Wysocki static struct cpufreq_driver *cpufreq_driver;
1037a6aedfaSMike Travis static DEFINE_PER_CPU(struct cpufreq_policy *, cpufreq_cpu_data);
1048414809cSSrivatsa S. Bhat static DEFINE_PER_CPU(struct cpufreq_policy *, cpufreq_cpu_data_fallback);
105bb176f7dSViresh Kumar static DEFINE_RWLOCK(cpufreq_driver_lock);
1066f1e4efdSJane Li DEFINE_MUTEX(cpufreq_governor_lock);
107bb176f7dSViresh Kumar 
108084f3493SThomas Renninger /* This one keeps track of the previously set governor of a removed CPU */
109e77b89f1SDmitry Monakhov static DEFINE_PER_CPU(char[CPUFREQ_NAME_LEN], cpufreq_cpu_governor);
1101da177e4SLinus Torvalds 
1112f0aea93SViresh Kumar /* Flag to suspend/resume CPUFreq governors */
1122f0aea93SViresh Kumar static bool cpufreq_suspended;
1131da177e4SLinus Torvalds 
1149c0ebcf7SViresh Kumar static inline bool has_target(void)
1159c0ebcf7SViresh Kumar {
1169c0ebcf7SViresh Kumar 	return cpufreq_driver->target_index || cpufreq_driver->target;
1179c0ebcf7SViresh Kumar }
1189c0ebcf7SViresh Kumar 
1195a01f2e8SVenkatesh Pallipadi /*
1206eed9404SViresh Kumar  * rwsem to guarantee that cpufreq driver module doesn't unload during critical
1216eed9404SViresh Kumar  * sections
1226eed9404SViresh Kumar  */
1236eed9404SViresh Kumar static DECLARE_RWSEM(cpufreq_rwsem);
1246eed9404SViresh Kumar 
1251da177e4SLinus Torvalds /* internal prototypes */
12629464f28SDave Jones static int __cpufreq_governor(struct cpufreq_policy *policy,
12729464f28SDave Jones 		unsigned int event);
128d92d50a4SViresh Kumar static unsigned int __cpufreq_get(struct cpufreq_policy *policy);
12965f27f38SDavid Howells static void handle_update(struct work_struct *work);
1301da177e4SLinus Torvalds 
1311da177e4SLinus Torvalds /**
1321da177e4SLinus Torvalds  * Two notifier lists: the "policy" list is involved in the
1331da177e4SLinus Torvalds  * validation process for a new CPU frequency policy; the
1341da177e4SLinus Torvalds  * "transition" list for kernel code that needs to handle
1351da177e4SLinus Torvalds  * changes to devices when the CPU clock speed changes.
1361da177e4SLinus Torvalds  * The mutex locks both lists.
1371da177e4SLinus Torvalds  */
138e041c683SAlan Stern static BLOCKING_NOTIFIER_HEAD(cpufreq_policy_notifier_list);
139b4dfdbb3SAlan Stern static struct srcu_notifier_head cpufreq_transition_notifier_list;
1401da177e4SLinus Torvalds 
14174212ca4SCesar Eduardo Barros static bool init_cpufreq_transition_notifier_list_called;
142b4dfdbb3SAlan Stern static int __init init_cpufreq_transition_notifier_list(void)
143b4dfdbb3SAlan Stern {
144b4dfdbb3SAlan Stern 	srcu_init_notifier_head(&cpufreq_transition_notifier_list);
14574212ca4SCesar Eduardo Barros 	init_cpufreq_transition_notifier_list_called = true;
146b4dfdbb3SAlan Stern 	return 0;
147b4dfdbb3SAlan Stern }
148b3438f82SLinus Torvalds pure_initcall(init_cpufreq_transition_notifier_list);
1491da177e4SLinus Torvalds 
150a7b422cdSKonrad Rzeszutek Wilk static int off __read_mostly;
151da584455SViresh Kumar static int cpufreq_disabled(void)
152a7b422cdSKonrad Rzeszutek Wilk {
153a7b422cdSKonrad Rzeszutek Wilk 	return off;
154a7b422cdSKonrad Rzeszutek Wilk }
155a7b422cdSKonrad Rzeszutek Wilk void disable_cpufreq(void)
156a7b422cdSKonrad Rzeszutek Wilk {
157a7b422cdSKonrad Rzeszutek Wilk 	off = 1;
158a7b422cdSKonrad Rzeszutek Wilk }
1593fc54d37Sakpm@osdl.org static DEFINE_MUTEX(cpufreq_governor_mutex);
1601da177e4SLinus Torvalds 
1614d5dcc42SViresh Kumar bool have_governor_per_policy(void)
1624d5dcc42SViresh Kumar {
1630b981e70SViresh Kumar 	return !!(cpufreq_driver->flags & CPUFREQ_HAVE_GOVERNOR_PER_POLICY);
1644d5dcc42SViresh Kumar }
1653f869d6dSViresh Kumar EXPORT_SYMBOL_GPL(have_governor_per_policy);
1664d5dcc42SViresh Kumar 
167944e9a03SViresh Kumar struct kobject *get_governor_parent_kobj(struct cpufreq_policy *policy)
168944e9a03SViresh Kumar {
169944e9a03SViresh Kumar 	if (have_governor_per_policy())
170944e9a03SViresh Kumar 		return &policy->kobj;
171944e9a03SViresh Kumar 	else
172944e9a03SViresh Kumar 		return cpufreq_global_kobject;
173944e9a03SViresh Kumar }
174944e9a03SViresh Kumar EXPORT_SYMBOL_GPL(get_governor_parent_kobj);
175944e9a03SViresh Kumar 
17672a4ce34SViresh Kumar static inline u64 get_cpu_idle_time_jiffy(unsigned int cpu, u64 *wall)
17772a4ce34SViresh Kumar {
17872a4ce34SViresh Kumar 	u64 idle_time;
17972a4ce34SViresh Kumar 	u64 cur_wall_time;
18072a4ce34SViresh Kumar 	u64 busy_time;
18172a4ce34SViresh Kumar 
18272a4ce34SViresh Kumar 	cur_wall_time = jiffies64_to_cputime64(get_jiffies_64());
18372a4ce34SViresh Kumar 
18472a4ce34SViresh Kumar 	busy_time = kcpustat_cpu(cpu).cpustat[CPUTIME_USER];
18572a4ce34SViresh Kumar 	busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_SYSTEM];
18672a4ce34SViresh Kumar 	busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_IRQ];
18772a4ce34SViresh Kumar 	busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_SOFTIRQ];
18872a4ce34SViresh Kumar 	busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_STEAL];
18972a4ce34SViresh Kumar 	busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_NICE];
19072a4ce34SViresh Kumar 
19172a4ce34SViresh Kumar 	idle_time = cur_wall_time - busy_time;
19272a4ce34SViresh Kumar 	if (wall)
19372a4ce34SViresh Kumar 		*wall = cputime_to_usecs(cur_wall_time);
19472a4ce34SViresh Kumar 
19572a4ce34SViresh Kumar 	return cputime_to_usecs(idle_time);
19672a4ce34SViresh Kumar }
19772a4ce34SViresh Kumar 
19872a4ce34SViresh Kumar u64 get_cpu_idle_time(unsigned int cpu, u64 *wall, int io_busy)
19972a4ce34SViresh Kumar {
20072a4ce34SViresh Kumar 	u64 idle_time = get_cpu_idle_time_us(cpu, io_busy ? wall : NULL);
20172a4ce34SViresh Kumar 
20272a4ce34SViresh Kumar 	if (idle_time == -1ULL)
20372a4ce34SViresh Kumar 		return get_cpu_idle_time_jiffy(cpu, wall);
20472a4ce34SViresh Kumar 	else if (!io_busy)
20572a4ce34SViresh Kumar 		idle_time += get_cpu_iowait_time_us(cpu, wall);
20672a4ce34SViresh Kumar 
20772a4ce34SViresh Kumar 	return idle_time;
20872a4ce34SViresh Kumar }
20972a4ce34SViresh Kumar EXPORT_SYMBOL_GPL(get_cpu_idle_time);
21072a4ce34SViresh Kumar 
21170e9e778SViresh Kumar /*
21270e9e778SViresh Kumar  * This is a generic cpufreq init() routine which can be used by cpufreq
21370e9e778SViresh Kumar  * drivers of SMP systems. It will do following:
21470e9e778SViresh Kumar  * - validate & show freq table passed
21570e9e778SViresh Kumar  * - set policies transition latency
21670e9e778SViresh Kumar  * - policy->cpus with all possible CPUs
21770e9e778SViresh Kumar  */
21870e9e778SViresh Kumar int cpufreq_generic_init(struct cpufreq_policy *policy,
21970e9e778SViresh Kumar 		struct cpufreq_frequency_table *table,
22070e9e778SViresh Kumar 		unsigned int transition_latency)
22170e9e778SViresh Kumar {
22270e9e778SViresh Kumar 	int ret;
22370e9e778SViresh Kumar 
22470e9e778SViresh Kumar 	ret = cpufreq_table_validate_and_show(policy, table);
22570e9e778SViresh Kumar 	if (ret) {
22670e9e778SViresh Kumar 		pr_err("%s: invalid frequency table: %d\n", __func__, ret);
22770e9e778SViresh Kumar 		return ret;
22870e9e778SViresh Kumar 	}
22970e9e778SViresh Kumar 
23070e9e778SViresh Kumar 	policy->cpuinfo.transition_latency = transition_latency;
23170e9e778SViresh Kumar 
23270e9e778SViresh Kumar 	/*
23370e9e778SViresh Kumar 	 * The driver only supports the SMP configuartion where all processors
23470e9e778SViresh Kumar 	 * share the clock and voltage and clock.
23570e9e778SViresh Kumar 	 */
23670e9e778SViresh Kumar 	cpumask_setall(policy->cpus);
23770e9e778SViresh Kumar 
23870e9e778SViresh Kumar 	return 0;
23970e9e778SViresh Kumar }
24070e9e778SViresh Kumar EXPORT_SYMBOL_GPL(cpufreq_generic_init);
24170e9e778SViresh Kumar 
242652ed95dSViresh Kumar unsigned int cpufreq_generic_get(unsigned int cpu)
243652ed95dSViresh Kumar {
244652ed95dSViresh Kumar 	struct cpufreq_policy *policy = per_cpu(cpufreq_cpu_data, cpu);
245652ed95dSViresh Kumar 
246652ed95dSViresh Kumar 	if (!policy || IS_ERR(policy->clk)) {
247e837f9b5SJoe Perches 		pr_err("%s: No %s associated to cpu: %d\n",
248e837f9b5SJoe Perches 		       __func__, policy ? "clk" : "policy", cpu);
249652ed95dSViresh Kumar 		return 0;
250652ed95dSViresh Kumar 	}
251652ed95dSViresh Kumar 
252652ed95dSViresh Kumar 	return clk_get_rate(policy->clk) / 1000;
253652ed95dSViresh Kumar }
254652ed95dSViresh Kumar EXPORT_SYMBOL_GPL(cpufreq_generic_get);
255652ed95dSViresh Kumar 
256e0b3165bSViresh Kumar /* Only for cpufreq core internal use */
257e0b3165bSViresh Kumar struct cpufreq_policy *cpufreq_cpu_get_raw(unsigned int cpu)
258e0b3165bSViresh Kumar {
259e0b3165bSViresh Kumar 	return per_cpu(cpufreq_cpu_data, cpu);
260e0b3165bSViresh Kumar }
261e0b3165bSViresh Kumar 
26250e9c852SViresh Kumar /**
26350e9c852SViresh Kumar  * cpufreq_cpu_get: returns policy for a cpu and marks it busy.
26450e9c852SViresh Kumar  *
26550e9c852SViresh Kumar  * @cpu: cpu to find policy for.
26650e9c852SViresh Kumar  *
26750e9c852SViresh Kumar  * This returns policy for 'cpu', returns NULL if it doesn't exist.
26850e9c852SViresh Kumar  * It also increments the kobject reference count to mark it busy and so would
26950e9c852SViresh Kumar  * require a corresponding call to cpufreq_cpu_put() to decrement it back.
27050e9c852SViresh Kumar  * If corresponding call cpufreq_cpu_put() isn't made, the policy wouldn't be
27150e9c852SViresh Kumar  * freed as that depends on the kobj count.
27250e9c852SViresh Kumar  *
27350e9c852SViresh Kumar  * It also takes a read-lock of 'cpufreq_rwsem' and doesn't put it back if a
27450e9c852SViresh Kumar  * valid policy is found. This is done to make sure the driver doesn't get
27550e9c852SViresh Kumar  * unregistered while the policy is being used.
27650e9c852SViresh Kumar  *
27750e9c852SViresh Kumar  * Return: A valid policy on success, otherwise NULL on failure.
27850e9c852SViresh Kumar  */
2796eed9404SViresh Kumar struct cpufreq_policy *cpufreq_cpu_get(unsigned int cpu)
2801da177e4SLinus Torvalds {
2816eed9404SViresh Kumar 	struct cpufreq_policy *policy = NULL;
2821da177e4SLinus Torvalds 	unsigned long flags;
2831da177e4SLinus Torvalds 
2841b947c90SViresh Kumar 	if (WARN_ON(cpu >= nr_cpu_ids))
2856eed9404SViresh Kumar 		return NULL;
2866eed9404SViresh Kumar 
2876eed9404SViresh Kumar 	if (!down_read_trylock(&cpufreq_rwsem))
2886eed9404SViresh Kumar 		return NULL;
2891da177e4SLinus Torvalds 
2901da177e4SLinus Torvalds 	/* get the cpufreq driver */
2910d1857a1SNathan Zimmer 	read_lock_irqsave(&cpufreq_driver_lock, flags);
2921da177e4SLinus Torvalds 
2936eed9404SViresh Kumar 	if (cpufreq_driver) {
2941da177e4SLinus Torvalds 		/* get the CPU */
2953a3e9e06SViresh Kumar 		policy = per_cpu(cpufreq_cpu_data, cpu);
2966eed9404SViresh Kumar 		if (policy)
2976eed9404SViresh Kumar 			kobject_get(&policy->kobj);
2986eed9404SViresh Kumar 	}
2996eed9404SViresh Kumar 
3006eed9404SViresh Kumar 	read_unlock_irqrestore(&cpufreq_driver_lock, flags);
3011da177e4SLinus Torvalds 
3023a3e9e06SViresh Kumar 	if (!policy)
3036eed9404SViresh Kumar 		up_read(&cpufreq_rwsem);
3041da177e4SLinus Torvalds 
3053a3e9e06SViresh Kumar 	return policy;
306a9144436SStephen Boyd }
3071da177e4SLinus Torvalds EXPORT_SYMBOL_GPL(cpufreq_cpu_get);
3081da177e4SLinus Torvalds 
30950e9c852SViresh Kumar /**
31050e9c852SViresh Kumar  * cpufreq_cpu_put: Decrements the usage count of a policy
31150e9c852SViresh Kumar  *
31250e9c852SViresh Kumar  * @policy: policy earlier returned by cpufreq_cpu_get().
31350e9c852SViresh Kumar  *
31450e9c852SViresh Kumar  * This decrements the kobject reference count incremented earlier by calling
31550e9c852SViresh Kumar  * cpufreq_cpu_get().
31650e9c852SViresh Kumar  *
31750e9c852SViresh Kumar  * It also drops the read-lock of 'cpufreq_rwsem' taken at cpufreq_cpu_get().
31850e9c852SViresh Kumar  */
3193a3e9e06SViresh Kumar void cpufreq_cpu_put(struct cpufreq_policy *policy)
320a9144436SStephen Boyd {
3216eed9404SViresh Kumar 	kobject_put(&policy->kobj);
3226eed9404SViresh Kumar 	up_read(&cpufreq_rwsem);
323a9144436SStephen Boyd }
3241da177e4SLinus Torvalds EXPORT_SYMBOL_GPL(cpufreq_cpu_put);
3251da177e4SLinus Torvalds 
3261da177e4SLinus Torvalds /*********************************************************************
3271da177e4SLinus Torvalds  *            EXTERNALLY AFFECTING FREQUENCY CHANGES                 *
3281da177e4SLinus Torvalds  *********************************************************************/
3291da177e4SLinus Torvalds 
3301da177e4SLinus Torvalds /**
3311da177e4SLinus Torvalds  * adjust_jiffies - adjust the system "loops_per_jiffy"
3321da177e4SLinus Torvalds  *
3331da177e4SLinus Torvalds  * This function alters the system "loops_per_jiffy" for the clock
3341da177e4SLinus Torvalds  * speed change. Note that loops_per_jiffy cannot be updated on SMP
3351da177e4SLinus Torvalds  * systems as each CPU might be scaled differently. So, use the arch
3361da177e4SLinus Torvalds  * per-CPU loops_per_jiffy value wherever possible.
3371da177e4SLinus Torvalds  */
33839c132eeSViresh Kumar static void adjust_jiffies(unsigned long val, struct cpufreq_freqs *ci)
33939c132eeSViresh Kumar {
3401da177e4SLinus Torvalds #ifndef CONFIG_SMP
3411da177e4SLinus Torvalds 	static unsigned long l_p_j_ref;
3421da177e4SLinus Torvalds 	static unsigned int l_p_j_ref_freq;
3431da177e4SLinus Torvalds 
3441da177e4SLinus Torvalds 	if (ci->flags & CPUFREQ_CONST_LOOPS)
3451da177e4SLinus Torvalds 		return;
3461da177e4SLinus Torvalds 
3471da177e4SLinus Torvalds 	if (!l_p_j_ref_freq) {
3481da177e4SLinus Torvalds 		l_p_j_ref = loops_per_jiffy;
3491da177e4SLinus Torvalds 		l_p_j_ref_freq = ci->old;
350e837f9b5SJoe Perches 		pr_debug("saving %lu as reference value for loops_per_jiffy; freq is %u kHz\n",
351e837f9b5SJoe Perches 			 l_p_j_ref, l_p_j_ref_freq);
3521da177e4SLinus Torvalds 	}
3530b443eadSViresh Kumar 	if (val == CPUFREQ_POSTCHANGE && ci->old != ci->new) {
354e08f5f5bSGautham R Shenoy 		loops_per_jiffy = cpufreq_scale(l_p_j_ref, l_p_j_ref_freq,
355e08f5f5bSGautham R Shenoy 								ci->new);
356e837f9b5SJoe Perches 		pr_debug("scaling loops_per_jiffy to %lu for frequency %u kHz\n",
357e837f9b5SJoe Perches 			 loops_per_jiffy, ci->new);
3581da177e4SLinus Torvalds 	}
3591da177e4SLinus Torvalds #endif
36039c132eeSViresh Kumar }
3611da177e4SLinus Torvalds 
3620956df9cSViresh Kumar static void __cpufreq_notify_transition(struct cpufreq_policy *policy,
363b43a7ffbSViresh Kumar 		struct cpufreq_freqs *freqs, unsigned int state)
3641da177e4SLinus Torvalds {
3651da177e4SLinus Torvalds 	BUG_ON(irqs_disabled());
3661da177e4SLinus Torvalds 
367d5aaffa9SDirk Brandewie 	if (cpufreq_disabled())
368d5aaffa9SDirk Brandewie 		return;
369d5aaffa9SDirk Brandewie 
3701c3d85ddSRafael J. Wysocki 	freqs->flags = cpufreq_driver->flags;
3712d06d8c4SDominik Brodowski 	pr_debug("notification %u of frequency transition to %u kHz\n",
372e4472cb3SDave Jones 		 state, freqs->new);
3731da177e4SLinus Torvalds 
3741da177e4SLinus Torvalds 	switch (state) {
375e4472cb3SDave Jones 
3761da177e4SLinus Torvalds 	case CPUFREQ_PRECHANGE:
377e4472cb3SDave Jones 		/* detect if the driver reported a value as "old frequency"
378e4472cb3SDave Jones 		 * which is not equal to what the cpufreq core thinks is
379e4472cb3SDave Jones 		 * "old frequency".
3801da177e4SLinus Torvalds 		 */
3811c3d85ddSRafael J. Wysocki 		if (!(cpufreq_driver->flags & CPUFREQ_CONST_LOOPS)) {
382e4472cb3SDave Jones 			if ((policy) && (policy->cpu == freqs->cpu) &&
383e4472cb3SDave Jones 			    (policy->cur) && (policy->cur != freqs->old)) {
384e837f9b5SJoe Perches 				pr_debug("Warning: CPU frequency is %u, cpufreq assumed %u kHz\n",
385e4472cb3SDave Jones 					 freqs->old, policy->cur);
386e4472cb3SDave Jones 				freqs->old = policy->cur;
3871da177e4SLinus Torvalds 			}
3881da177e4SLinus Torvalds 		}
389b4dfdbb3SAlan Stern 		srcu_notifier_call_chain(&cpufreq_transition_notifier_list,
390e4472cb3SDave Jones 				CPUFREQ_PRECHANGE, freqs);
3911da177e4SLinus Torvalds 		adjust_jiffies(CPUFREQ_PRECHANGE, freqs);
3921da177e4SLinus Torvalds 		break;
393e4472cb3SDave Jones 
3941da177e4SLinus Torvalds 	case CPUFREQ_POSTCHANGE:
3951da177e4SLinus Torvalds 		adjust_jiffies(CPUFREQ_POSTCHANGE, freqs);
396e837f9b5SJoe Perches 		pr_debug("FREQ: %lu - CPU: %lu\n",
397e837f9b5SJoe Perches 			 (unsigned long)freqs->new, (unsigned long)freqs->cpu);
39825e41933SThomas Renninger 		trace_cpu_frequency(freqs->new, freqs->cpu);
399b4dfdbb3SAlan Stern 		srcu_notifier_call_chain(&cpufreq_transition_notifier_list,
400e4472cb3SDave Jones 				CPUFREQ_POSTCHANGE, freqs);
401e4472cb3SDave Jones 		if (likely(policy) && likely(policy->cpu == freqs->cpu))
402e4472cb3SDave Jones 			policy->cur = freqs->new;
4031da177e4SLinus Torvalds 		break;
4041da177e4SLinus Torvalds 	}
4051da177e4SLinus Torvalds }
406bb176f7dSViresh Kumar 
407b43a7ffbSViresh Kumar /**
408b43a7ffbSViresh Kumar  * cpufreq_notify_transition - call notifier chain and adjust_jiffies
409b43a7ffbSViresh Kumar  * on frequency transition.
410b43a7ffbSViresh Kumar  *
411b43a7ffbSViresh Kumar  * This function calls the transition notifiers and the "adjust_jiffies"
412b43a7ffbSViresh Kumar  * function. It is called twice on all CPU frequency changes that have
413b43a7ffbSViresh Kumar  * external effects.
414b43a7ffbSViresh Kumar  */
415236a9800SViresh Kumar static void cpufreq_notify_transition(struct cpufreq_policy *policy,
416b43a7ffbSViresh Kumar 		struct cpufreq_freqs *freqs, unsigned int state)
417b43a7ffbSViresh Kumar {
418b43a7ffbSViresh Kumar 	for_each_cpu(freqs->cpu, policy->cpus)
419b43a7ffbSViresh Kumar 		__cpufreq_notify_transition(policy, freqs, state);
420b43a7ffbSViresh Kumar }
4211da177e4SLinus Torvalds 
422f7ba3b41SViresh Kumar /* Do post notifications when there are chances that transition has failed */
423236a9800SViresh Kumar static void cpufreq_notify_post_transition(struct cpufreq_policy *policy,
424f7ba3b41SViresh Kumar 		struct cpufreq_freqs *freqs, int transition_failed)
425f7ba3b41SViresh Kumar {
426f7ba3b41SViresh Kumar 	cpufreq_notify_transition(policy, freqs, CPUFREQ_POSTCHANGE);
427f7ba3b41SViresh Kumar 	if (!transition_failed)
428f7ba3b41SViresh Kumar 		return;
429f7ba3b41SViresh Kumar 
430f7ba3b41SViresh Kumar 	swap(freqs->old, freqs->new);
431f7ba3b41SViresh Kumar 	cpufreq_notify_transition(policy, freqs, CPUFREQ_PRECHANGE);
432f7ba3b41SViresh Kumar 	cpufreq_notify_transition(policy, freqs, CPUFREQ_POSTCHANGE);
433f7ba3b41SViresh Kumar }
434f7ba3b41SViresh Kumar 
43512478cf0SSrivatsa S. Bhat void cpufreq_freq_transition_begin(struct cpufreq_policy *policy,
43612478cf0SSrivatsa S. Bhat 		struct cpufreq_freqs *freqs)
43712478cf0SSrivatsa S. Bhat {
438ca654dc3SSrivatsa S. Bhat 
439ca654dc3SSrivatsa S. Bhat 	/*
440ca654dc3SSrivatsa S. Bhat 	 * Catch double invocations of _begin() which lead to self-deadlock.
441ca654dc3SSrivatsa S. Bhat 	 * ASYNC_NOTIFICATION drivers are left out because the cpufreq core
442ca654dc3SSrivatsa S. Bhat 	 * doesn't invoke _begin() on their behalf, and hence the chances of
443ca654dc3SSrivatsa S. Bhat 	 * double invocations are very low. Moreover, there are scenarios
444ca654dc3SSrivatsa S. Bhat 	 * where these checks can emit false-positive warnings in these
445ca654dc3SSrivatsa S. Bhat 	 * drivers; so we avoid that by skipping them altogether.
446ca654dc3SSrivatsa S. Bhat 	 */
447ca654dc3SSrivatsa S. Bhat 	WARN_ON(!(cpufreq_driver->flags & CPUFREQ_ASYNC_NOTIFICATION)
448ca654dc3SSrivatsa S. Bhat 				&& current == policy->transition_task);
449ca654dc3SSrivatsa S. Bhat 
45012478cf0SSrivatsa S. Bhat wait:
45112478cf0SSrivatsa S. Bhat 	wait_event(policy->transition_wait, !policy->transition_ongoing);
45212478cf0SSrivatsa S. Bhat 
45312478cf0SSrivatsa S. Bhat 	spin_lock(&policy->transition_lock);
45412478cf0SSrivatsa S. Bhat 
45512478cf0SSrivatsa S. Bhat 	if (unlikely(policy->transition_ongoing)) {
45612478cf0SSrivatsa S. Bhat 		spin_unlock(&policy->transition_lock);
45712478cf0SSrivatsa S. Bhat 		goto wait;
45812478cf0SSrivatsa S. Bhat 	}
45912478cf0SSrivatsa S. Bhat 
46012478cf0SSrivatsa S. Bhat 	policy->transition_ongoing = true;
461ca654dc3SSrivatsa S. Bhat 	policy->transition_task = current;
46212478cf0SSrivatsa S. Bhat 
46312478cf0SSrivatsa S. Bhat 	spin_unlock(&policy->transition_lock);
46412478cf0SSrivatsa S. Bhat 
46512478cf0SSrivatsa S. Bhat 	cpufreq_notify_transition(policy, freqs, CPUFREQ_PRECHANGE);
46612478cf0SSrivatsa S. Bhat }
46712478cf0SSrivatsa S. Bhat EXPORT_SYMBOL_GPL(cpufreq_freq_transition_begin);
46812478cf0SSrivatsa S. Bhat 
46912478cf0SSrivatsa S. Bhat void cpufreq_freq_transition_end(struct cpufreq_policy *policy,
47012478cf0SSrivatsa S. Bhat 		struct cpufreq_freqs *freqs, int transition_failed)
47112478cf0SSrivatsa S. Bhat {
47212478cf0SSrivatsa S. Bhat 	if (unlikely(WARN_ON(!policy->transition_ongoing)))
47312478cf0SSrivatsa S. Bhat 		return;
47412478cf0SSrivatsa S. Bhat 
47512478cf0SSrivatsa S. Bhat 	cpufreq_notify_post_transition(policy, freqs, transition_failed);
47612478cf0SSrivatsa S. Bhat 
47712478cf0SSrivatsa S. Bhat 	policy->transition_ongoing = false;
478ca654dc3SSrivatsa S. Bhat 	policy->transition_task = NULL;
47912478cf0SSrivatsa S. Bhat 
48012478cf0SSrivatsa S. Bhat 	wake_up(&policy->transition_wait);
48112478cf0SSrivatsa S. Bhat }
48212478cf0SSrivatsa S. Bhat EXPORT_SYMBOL_GPL(cpufreq_freq_transition_end);
48312478cf0SSrivatsa S. Bhat 
4841da177e4SLinus Torvalds 
4851da177e4SLinus Torvalds /*********************************************************************
4861da177e4SLinus Torvalds  *                          SYSFS INTERFACE                          *
4871da177e4SLinus Torvalds  *********************************************************************/
4888a5c74a1SRashika Kheria static ssize_t show_boost(struct kobject *kobj,
4896f19efc0SLukasz Majewski 				 struct attribute *attr, char *buf)
4906f19efc0SLukasz Majewski {
4916f19efc0SLukasz Majewski 	return sprintf(buf, "%d\n", cpufreq_driver->boost_enabled);
4926f19efc0SLukasz Majewski }
4936f19efc0SLukasz Majewski 
4946f19efc0SLukasz Majewski static ssize_t store_boost(struct kobject *kobj, struct attribute *attr,
4956f19efc0SLukasz Majewski 				  const char *buf, size_t count)
4966f19efc0SLukasz Majewski {
4976f19efc0SLukasz Majewski 	int ret, enable;
4986f19efc0SLukasz Majewski 
4996f19efc0SLukasz Majewski 	ret = sscanf(buf, "%d", &enable);
5006f19efc0SLukasz Majewski 	if (ret != 1 || enable < 0 || enable > 1)
5016f19efc0SLukasz Majewski 		return -EINVAL;
5026f19efc0SLukasz Majewski 
5036f19efc0SLukasz Majewski 	if (cpufreq_boost_trigger_state(enable)) {
504e837f9b5SJoe Perches 		pr_err("%s: Cannot %s BOOST!\n",
505e837f9b5SJoe Perches 		       __func__, enable ? "enable" : "disable");
5066f19efc0SLukasz Majewski 		return -EINVAL;
5076f19efc0SLukasz Majewski 	}
5086f19efc0SLukasz Majewski 
509e837f9b5SJoe Perches 	pr_debug("%s: cpufreq BOOST %s\n",
510e837f9b5SJoe Perches 		 __func__, enable ? "enabled" : "disabled");
5116f19efc0SLukasz Majewski 
5126f19efc0SLukasz Majewski 	return count;
5136f19efc0SLukasz Majewski }
5146f19efc0SLukasz Majewski define_one_global_rw(boost);
5151da177e4SLinus Torvalds 
51642f91fa1SViresh Kumar static struct cpufreq_governor *find_governor(const char *str_governor)
5173bcb09a3SJeremy Fitzhardinge {
5183bcb09a3SJeremy Fitzhardinge 	struct cpufreq_governor *t;
5193bcb09a3SJeremy Fitzhardinge 
520f7b27061SViresh Kumar 	for_each_governor(t)
5217c4f4539SRasmus Villemoes 		if (!strncasecmp(str_governor, t->name, CPUFREQ_NAME_LEN))
5223bcb09a3SJeremy Fitzhardinge 			return t;
5233bcb09a3SJeremy Fitzhardinge 
5243bcb09a3SJeremy Fitzhardinge 	return NULL;
5253bcb09a3SJeremy Fitzhardinge }
5263bcb09a3SJeremy Fitzhardinge 
5271da177e4SLinus Torvalds /**
5281da177e4SLinus Torvalds  * cpufreq_parse_governor - parse a governor string
5291da177e4SLinus Torvalds  */
5301da177e4SLinus Torvalds static int cpufreq_parse_governor(char *str_governor, unsigned int *policy,
5311da177e4SLinus Torvalds 				struct cpufreq_governor **governor)
5321da177e4SLinus Torvalds {
5333bcb09a3SJeremy Fitzhardinge 	int err = -EINVAL;
5343bcb09a3SJeremy Fitzhardinge 
5351c3d85ddSRafael J. Wysocki 	if (!cpufreq_driver)
5363bcb09a3SJeremy Fitzhardinge 		goto out;
5373bcb09a3SJeremy Fitzhardinge 
5381c3d85ddSRafael J. Wysocki 	if (cpufreq_driver->setpolicy) {
5397c4f4539SRasmus Villemoes 		if (!strncasecmp(str_governor, "performance", CPUFREQ_NAME_LEN)) {
5401da177e4SLinus Torvalds 			*policy = CPUFREQ_POLICY_PERFORMANCE;
5413bcb09a3SJeremy Fitzhardinge 			err = 0;
5427c4f4539SRasmus Villemoes 		} else if (!strncasecmp(str_governor, "powersave",
543e08f5f5bSGautham R Shenoy 						CPUFREQ_NAME_LEN)) {
5441da177e4SLinus Torvalds 			*policy = CPUFREQ_POLICY_POWERSAVE;
5453bcb09a3SJeremy Fitzhardinge 			err = 0;
5461da177e4SLinus Torvalds 		}
5472e1cc3a5SViresh Kumar 	} else {
5481da177e4SLinus Torvalds 		struct cpufreq_governor *t;
5493bcb09a3SJeremy Fitzhardinge 
5503fc54d37Sakpm@osdl.org 		mutex_lock(&cpufreq_governor_mutex);
5513bcb09a3SJeremy Fitzhardinge 
55242f91fa1SViresh Kumar 		t = find_governor(str_governor);
5533bcb09a3SJeremy Fitzhardinge 
554ea714970SJeremy Fitzhardinge 		if (t == NULL) {
555ea714970SJeremy Fitzhardinge 			int ret;
556ea714970SJeremy Fitzhardinge 
557ea714970SJeremy Fitzhardinge 			mutex_unlock(&cpufreq_governor_mutex);
5581a8e1463SKees Cook 			ret = request_module("cpufreq_%s", str_governor);
559ea714970SJeremy Fitzhardinge 			mutex_lock(&cpufreq_governor_mutex);
560ea714970SJeremy Fitzhardinge 
561ea714970SJeremy Fitzhardinge 			if (ret == 0)
56242f91fa1SViresh Kumar 				t = find_governor(str_governor);
563ea714970SJeremy Fitzhardinge 		}
564ea714970SJeremy Fitzhardinge 
5653bcb09a3SJeremy Fitzhardinge 		if (t != NULL) {
5661da177e4SLinus Torvalds 			*governor = t;
5673bcb09a3SJeremy Fitzhardinge 			err = 0;
5681da177e4SLinus Torvalds 		}
5693bcb09a3SJeremy Fitzhardinge 
5703bcb09a3SJeremy Fitzhardinge 		mutex_unlock(&cpufreq_governor_mutex);
5711da177e4SLinus Torvalds 	}
5721da177e4SLinus Torvalds out:
5733bcb09a3SJeremy Fitzhardinge 	return err;
5741da177e4SLinus Torvalds }
5751da177e4SLinus Torvalds 
5761da177e4SLinus Torvalds /**
577e08f5f5bSGautham R Shenoy  * cpufreq_per_cpu_attr_read() / show_##file_name() -
578e08f5f5bSGautham R Shenoy  * print out cpufreq information
5791da177e4SLinus Torvalds  *
5801da177e4SLinus Torvalds  * Write out information from cpufreq_driver->policy[cpu]; object must be
5811da177e4SLinus Torvalds  * "unsigned int".
5821da177e4SLinus Torvalds  */
5831da177e4SLinus Torvalds 
5841da177e4SLinus Torvalds #define show_one(file_name, object)			\
5851da177e4SLinus Torvalds static ssize_t show_##file_name				\
5861da177e4SLinus Torvalds (struct cpufreq_policy *policy, char *buf)		\
5871da177e4SLinus Torvalds {							\
5881da177e4SLinus Torvalds 	return sprintf(buf, "%u\n", policy->object);	\
5891da177e4SLinus Torvalds }
5901da177e4SLinus Torvalds 
5911da177e4SLinus Torvalds show_one(cpuinfo_min_freq, cpuinfo.min_freq);
5921da177e4SLinus Torvalds show_one(cpuinfo_max_freq, cpuinfo.max_freq);
593ed129784SThomas Renninger show_one(cpuinfo_transition_latency, cpuinfo.transition_latency);
5941da177e4SLinus Torvalds show_one(scaling_min_freq, min);
5951da177e4SLinus Torvalds show_one(scaling_max_freq, max);
596c034b02eSDirk Brandewie 
59709347b29SViresh Kumar static ssize_t show_scaling_cur_freq(struct cpufreq_policy *policy, char *buf)
598c034b02eSDirk Brandewie {
599c034b02eSDirk Brandewie 	ssize_t ret;
600c034b02eSDirk Brandewie 
601c034b02eSDirk Brandewie 	if (cpufreq_driver && cpufreq_driver->setpolicy && cpufreq_driver->get)
602c034b02eSDirk Brandewie 		ret = sprintf(buf, "%u\n", cpufreq_driver->get(policy->cpu));
603c034b02eSDirk Brandewie 	else
604c034b02eSDirk Brandewie 		ret = sprintf(buf, "%u\n", policy->cur);
605c034b02eSDirk Brandewie 	return ret;
606c034b02eSDirk Brandewie }
6071da177e4SLinus Torvalds 
608037ce839SViresh Kumar static int cpufreq_set_policy(struct cpufreq_policy *policy,
6093a3e9e06SViresh Kumar 				struct cpufreq_policy *new_policy);
6107970e08bSThomas Renninger 
6111da177e4SLinus Torvalds /**
6121da177e4SLinus Torvalds  * cpufreq_per_cpu_attr_write() / store_##file_name() - sysfs write access
6131da177e4SLinus Torvalds  */
6141da177e4SLinus Torvalds #define store_one(file_name, object)			\
6151da177e4SLinus Torvalds static ssize_t store_##file_name					\
6161da177e4SLinus Torvalds (struct cpufreq_policy *policy, const char *buf, size_t count)		\
6171da177e4SLinus Torvalds {									\
618619c144cSVince Hsu 	int ret, temp;							\
6191da177e4SLinus Torvalds 	struct cpufreq_policy new_policy;				\
6201da177e4SLinus Torvalds 									\
6211da177e4SLinus Torvalds 	ret = cpufreq_get_policy(&new_policy, policy->cpu);		\
6221da177e4SLinus Torvalds 	if (ret)							\
6231da177e4SLinus Torvalds 		return -EINVAL;						\
6241da177e4SLinus Torvalds 									\
6251da177e4SLinus Torvalds 	ret = sscanf(buf, "%u", &new_policy.object);			\
6261da177e4SLinus Torvalds 	if (ret != 1)							\
6271da177e4SLinus Torvalds 		return -EINVAL;						\
6281da177e4SLinus Torvalds 									\
629619c144cSVince Hsu 	temp = new_policy.object;					\
630037ce839SViresh Kumar 	ret = cpufreq_set_policy(policy, &new_policy);		\
631619c144cSVince Hsu 	if (!ret)							\
632619c144cSVince Hsu 		policy->user_policy.object = temp;			\
6331da177e4SLinus Torvalds 									\
6341da177e4SLinus Torvalds 	return ret ? ret : count;					\
6351da177e4SLinus Torvalds }
6361da177e4SLinus Torvalds 
6371da177e4SLinus Torvalds store_one(scaling_min_freq, min);
6381da177e4SLinus Torvalds store_one(scaling_max_freq, max);
6391da177e4SLinus Torvalds 
6401da177e4SLinus Torvalds /**
6411da177e4SLinus Torvalds  * show_cpuinfo_cur_freq - current CPU frequency as detected by hardware
6421da177e4SLinus Torvalds  */
643e08f5f5bSGautham R Shenoy static ssize_t show_cpuinfo_cur_freq(struct cpufreq_policy *policy,
644e08f5f5bSGautham R Shenoy 					char *buf)
6451da177e4SLinus Torvalds {
646d92d50a4SViresh Kumar 	unsigned int cur_freq = __cpufreq_get(policy);
6471da177e4SLinus Torvalds 	if (!cur_freq)
6481da177e4SLinus Torvalds 		return sprintf(buf, "<unknown>");
6491da177e4SLinus Torvalds 	return sprintf(buf, "%u\n", cur_freq);
6501da177e4SLinus Torvalds }
6511da177e4SLinus Torvalds 
6521da177e4SLinus Torvalds /**
6531da177e4SLinus Torvalds  * show_scaling_governor - show the current policy for the specified CPU
6541da177e4SLinus Torvalds  */
655905d77cdSDave Jones static ssize_t show_scaling_governor(struct cpufreq_policy *policy, char *buf)
6561da177e4SLinus Torvalds {
6571da177e4SLinus Torvalds 	if (policy->policy == CPUFREQ_POLICY_POWERSAVE)
6581da177e4SLinus Torvalds 		return sprintf(buf, "powersave\n");
6591da177e4SLinus Torvalds 	else if (policy->policy == CPUFREQ_POLICY_PERFORMANCE)
6601da177e4SLinus Torvalds 		return sprintf(buf, "performance\n");
6611da177e4SLinus Torvalds 	else if (policy->governor)
6624b972f0bSviresh kumar 		return scnprintf(buf, CPUFREQ_NAME_PLEN, "%s\n",
66329464f28SDave Jones 				policy->governor->name);
6641da177e4SLinus Torvalds 	return -EINVAL;
6651da177e4SLinus Torvalds }
6661da177e4SLinus Torvalds 
6671da177e4SLinus Torvalds /**
6681da177e4SLinus Torvalds  * store_scaling_governor - store policy for the specified CPU
6691da177e4SLinus Torvalds  */
6701da177e4SLinus Torvalds static ssize_t store_scaling_governor(struct cpufreq_policy *policy,
6711da177e4SLinus Torvalds 					const char *buf, size_t count)
6721da177e4SLinus Torvalds {
6735136fa56SSrivatsa S. Bhat 	int ret;
6741da177e4SLinus Torvalds 	char	str_governor[16];
6751da177e4SLinus Torvalds 	struct cpufreq_policy new_policy;
6761da177e4SLinus Torvalds 
6771da177e4SLinus Torvalds 	ret = cpufreq_get_policy(&new_policy, policy->cpu);
6781da177e4SLinus Torvalds 	if (ret)
6791da177e4SLinus Torvalds 		return ret;
6801da177e4SLinus Torvalds 
6811da177e4SLinus Torvalds 	ret = sscanf(buf, "%15s", str_governor);
6821da177e4SLinus Torvalds 	if (ret != 1)
6831da177e4SLinus Torvalds 		return -EINVAL;
6841da177e4SLinus Torvalds 
685e08f5f5bSGautham R Shenoy 	if (cpufreq_parse_governor(str_governor, &new_policy.policy,
686e08f5f5bSGautham R Shenoy 						&new_policy.governor))
6871da177e4SLinus Torvalds 		return -EINVAL;
6881da177e4SLinus Torvalds 
689037ce839SViresh Kumar 	ret = cpufreq_set_policy(policy, &new_policy);
6907970e08bSThomas Renninger 
6917970e08bSThomas Renninger 	policy->user_policy.policy = policy->policy;
6927970e08bSThomas Renninger 	policy->user_policy.governor = policy->governor;
6937970e08bSThomas Renninger 
694e08f5f5bSGautham R Shenoy 	if (ret)
695e08f5f5bSGautham R Shenoy 		return ret;
696e08f5f5bSGautham R Shenoy 	else
697e08f5f5bSGautham R Shenoy 		return count;
6981da177e4SLinus Torvalds }
6991da177e4SLinus Torvalds 
7001da177e4SLinus Torvalds /**
7011da177e4SLinus Torvalds  * show_scaling_driver - show the cpufreq driver currently loaded
7021da177e4SLinus Torvalds  */
7031da177e4SLinus Torvalds static ssize_t show_scaling_driver(struct cpufreq_policy *policy, char *buf)
7041da177e4SLinus Torvalds {
7051c3d85ddSRafael J. Wysocki 	return scnprintf(buf, CPUFREQ_NAME_PLEN, "%s\n", cpufreq_driver->name);
7061da177e4SLinus Torvalds }
7071da177e4SLinus Torvalds 
7081da177e4SLinus Torvalds /**
7091da177e4SLinus Torvalds  * show_scaling_available_governors - show the available CPUfreq governors
7101da177e4SLinus Torvalds  */
7111da177e4SLinus Torvalds static ssize_t show_scaling_available_governors(struct cpufreq_policy *policy,
7121da177e4SLinus Torvalds 						char *buf)
7131da177e4SLinus Torvalds {
7141da177e4SLinus Torvalds 	ssize_t i = 0;
7151da177e4SLinus Torvalds 	struct cpufreq_governor *t;
7161da177e4SLinus Torvalds 
7179c0ebcf7SViresh Kumar 	if (!has_target()) {
7181da177e4SLinus Torvalds 		i += sprintf(buf, "performance powersave");
7191da177e4SLinus Torvalds 		goto out;
7201da177e4SLinus Torvalds 	}
7211da177e4SLinus Torvalds 
722f7b27061SViresh Kumar 	for_each_governor(t) {
72329464f28SDave Jones 		if (i >= (ssize_t) ((PAGE_SIZE / sizeof(char))
72429464f28SDave Jones 		    - (CPUFREQ_NAME_LEN + 2)))
7251da177e4SLinus Torvalds 			goto out;
7264b972f0bSviresh kumar 		i += scnprintf(&buf[i], CPUFREQ_NAME_PLEN, "%s ", t->name);
7271da177e4SLinus Torvalds 	}
7281da177e4SLinus Torvalds out:
7291da177e4SLinus Torvalds 	i += sprintf(&buf[i], "\n");
7301da177e4SLinus Torvalds 	return i;
7311da177e4SLinus Torvalds }
732e8628dd0SDarrick J. Wong 
733f4fd3797SLan Tianyu ssize_t cpufreq_show_cpus(const struct cpumask *mask, char *buf)
7341da177e4SLinus Torvalds {
7351da177e4SLinus Torvalds 	ssize_t i = 0;
7361da177e4SLinus Torvalds 	unsigned int cpu;
7371da177e4SLinus Torvalds 
738835481d9SRusty Russell 	for_each_cpu(cpu, mask) {
7391da177e4SLinus Torvalds 		if (i)
7401da177e4SLinus Torvalds 			i += scnprintf(&buf[i], (PAGE_SIZE - i - 2), " ");
7411da177e4SLinus Torvalds 		i += scnprintf(&buf[i], (PAGE_SIZE - i - 2), "%u", cpu);
7421da177e4SLinus Torvalds 		if (i >= (PAGE_SIZE - 5))
7431da177e4SLinus Torvalds 			break;
7441da177e4SLinus Torvalds 	}
7451da177e4SLinus Torvalds 	i += sprintf(&buf[i], "\n");
7461da177e4SLinus Torvalds 	return i;
7471da177e4SLinus Torvalds }
748f4fd3797SLan Tianyu EXPORT_SYMBOL_GPL(cpufreq_show_cpus);
7491da177e4SLinus Torvalds 
750e8628dd0SDarrick J. Wong /**
751e8628dd0SDarrick J. Wong  * show_related_cpus - show the CPUs affected by each transition even if
752e8628dd0SDarrick J. Wong  * hw coordination is in use
753e8628dd0SDarrick J. Wong  */
754e8628dd0SDarrick J. Wong static ssize_t show_related_cpus(struct cpufreq_policy *policy, char *buf)
755e8628dd0SDarrick J. Wong {
756f4fd3797SLan Tianyu 	return cpufreq_show_cpus(policy->related_cpus, buf);
757e8628dd0SDarrick J. Wong }
758e8628dd0SDarrick J. Wong 
759e8628dd0SDarrick J. Wong /**
760e8628dd0SDarrick J. Wong  * show_affected_cpus - show the CPUs affected by each transition
761e8628dd0SDarrick J. Wong  */
762e8628dd0SDarrick J. Wong static ssize_t show_affected_cpus(struct cpufreq_policy *policy, char *buf)
763e8628dd0SDarrick J. Wong {
764f4fd3797SLan Tianyu 	return cpufreq_show_cpus(policy->cpus, buf);
765e8628dd0SDarrick J. Wong }
766e8628dd0SDarrick J. Wong 
7679e76988eSVenki Pallipadi static ssize_t store_scaling_setspeed(struct cpufreq_policy *policy,
7689e76988eSVenki Pallipadi 					const char *buf, size_t count)
7699e76988eSVenki Pallipadi {
7709e76988eSVenki Pallipadi 	unsigned int freq = 0;
7719e76988eSVenki Pallipadi 	unsigned int ret;
7729e76988eSVenki Pallipadi 
773879000f9SCHIKAMA masaki 	if (!policy->governor || !policy->governor->store_setspeed)
7749e76988eSVenki Pallipadi 		return -EINVAL;
7759e76988eSVenki Pallipadi 
7769e76988eSVenki Pallipadi 	ret = sscanf(buf, "%u", &freq);
7779e76988eSVenki Pallipadi 	if (ret != 1)
7789e76988eSVenki Pallipadi 		return -EINVAL;
7799e76988eSVenki Pallipadi 
7809e76988eSVenki Pallipadi 	policy->governor->store_setspeed(policy, freq);
7819e76988eSVenki Pallipadi 
7829e76988eSVenki Pallipadi 	return count;
7839e76988eSVenki Pallipadi }
7849e76988eSVenki Pallipadi 
7859e76988eSVenki Pallipadi static ssize_t show_scaling_setspeed(struct cpufreq_policy *policy, char *buf)
7869e76988eSVenki Pallipadi {
787879000f9SCHIKAMA masaki 	if (!policy->governor || !policy->governor->show_setspeed)
7889e76988eSVenki Pallipadi 		return sprintf(buf, "<unsupported>\n");
7899e76988eSVenki Pallipadi 
7909e76988eSVenki Pallipadi 	return policy->governor->show_setspeed(policy, buf);
7919e76988eSVenki Pallipadi }
7921da177e4SLinus Torvalds 
793e2f74f35SThomas Renninger /**
7948bf1ac72Sviresh kumar  * show_bios_limit - show the current cpufreq HW/BIOS limitation
795e2f74f35SThomas Renninger  */
796e2f74f35SThomas Renninger static ssize_t show_bios_limit(struct cpufreq_policy *policy, char *buf)
797e2f74f35SThomas Renninger {
798e2f74f35SThomas Renninger 	unsigned int limit;
799e2f74f35SThomas Renninger 	int ret;
8001c3d85ddSRafael J. Wysocki 	if (cpufreq_driver->bios_limit) {
8011c3d85ddSRafael J. Wysocki 		ret = cpufreq_driver->bios_limit(policy->cpu, &limit);
802e2f74f35SThomas Renninger 		if (!ret)
803e2f74f35SThomas Renninger 			return sprintf(buf, "%u\n", limit);
804e2f74f35SThomas Renninger 	}
805e2f74f35SThomas Renninger 	return sprintf(buf, "%u\n", policy->cpuinfo.max_freq);
806e2f74f35SThomas Renninger }
807e2f74f35SThomas Renninger 
8086dad2a29SBorislav Petkov cpufreq_freq_attr_ro_perm(cpuinfo_cur_freq, 0400);
8096dad2a29SBorislav Petkov cpufreq_freq_attr_ro(cpuinfo_min_freq);
8106dad2a29SBorislav Petkov cpufreq_freq_attr_ro(cpuinfo_max_freq);
8116dad2a29SBorislav Petkov cpufreq_freq_attr_ro(cpuinfo_transition_latency);
8126dad2a29SBorislav Petkov cpufreq_freq_attr_ro(scaling_available_governors);
8136dad2a29SBorislav Petkov cpufreq_freq_attr_ro(scaling_driver);
8146dad2a29SBorislav Petkov cpufreq_freq_attr_ro(scaling_cur_freq);
8156dad2a29SBorislav Petkov cpufreq_freq_attr_ro(bios_limit);
8166dad2a29SBorislav Petkov cpufreq_freq_attr_ro(related_cpus);
8176dad2a29SBorislav Petkov cpufreq_freq_attr_ro(affected_cpus);
8186dad2a29SBorislav Petkov cpufreq_freq_attr_rw(scaling_min_freq);
8196dad2a29SBorislav Petkov cpufreq_freq_attr_rw(scaling_max_freq);
8206dad2a29SBorislav Petkov cpufreq_freq_attr_rw(scaling_governor);
8216dad2a29SBorislav Petkov cpufreq_freq_attr_rw(scaling_setspeed);
8221da177e4SLinus Torvalds 
8231da177e4SLinus Torvalds static struct attribute *default_attrs[] = {
8241da177e4SLinus Torvalds 	&cpuinfo_min_freq.attr,
8251da177e4SLinus Torvalds 	&cpuinfo_max_freq.attr,
826ed129784SThomas Renninger 	&cpuinfo_transition_latency.attr,
8271da177e4SLinus Torvalds 	&scaling_min_freq.attr,
8281da177e4SLinus Torvalds 	&scaling_max_freq.attr,
8291da177e4SLinus Torvalds 	&affected_cpus.attr,
830e8628dd0SDarrick J. Wong 	&related_cpus.attr,
8311da177e4SLinus Torvalds 	&scaling_governor.attr,
8321da177e4SLinus Torvalds 	&scaling_driver.attr,
8331da177e4SLinus Torvalds 	&scaling_available_governors.attr,
8349e76988eSVenki Pallipadi 	&scaling_setspeed.attr,
8351da177e4SLinus Torvalds 	NULL
8361da177e4SLinus Torvalds };
8371da177e4SLinus Torvalds 
8381da177e4SLinus Torvalds #define to_policy(k) container_of(k, struct cpufreq_policy, kobj)
8391da177e4SLinus Torvalds #define to_attr(a) container_of(a, struct freq_attr, attr)
8401da177e4SLinus Torvalds 
8411da177e4SLinus Torvalds static ssize_t show(struct kobject *kobj, struct attribute *attr, char *buf)
8421da177e4SLinus Torvalds {
8431da177e4SLinus Torvalds 	struct cpufreq_policy *policy = to_policy(kobj);
8441da177e4SLinus Torvalds 	struct freq_attr *fattr = to_attr(attr);
8451b750e3bSViresh Kumar 	ssize_t ret;
8466eed9404SViresh Kumar 
8476eed9404SViresh Kumar 	if (!down_read_trylock(&cpufreq_rwsem))
8481b750e3bSViresh Kumar 		return -EINVAL;
8495a01f2e8SVenkatesh Pallipadi 
850ad7722daSviresh kumar 	down_read(&policy->rwsem);
8515a01f2e8SVenkatesh Pallipadi 
852e08f5f5bSGautham R Shenoy 	if (fattr->show)
853e08f5f5bSGautham R Shenoy 		ret = fattr->show(policy, buf);
854e08f5f5bSGautham R Shenoy 	else
855e08f5f5bSGautham R Shenoy 		ret = -EIO;
856e08f5f5bSGautham R Shenoy 
857ad7722daSviresh kumar 	up_read(&policy->rwsem);
8586eed9404SViresh Kumar 	up_read(&cpufreq_rwsem);
8591b750e3bSViresh Kumar 
8601da177e4SLinus Torvalds 	return ret;
8611da177e4SLinus Torvalds }
8621da177e4SLinus Torvalds 
8631da177e4SLinus Torvalds static ssize_t store(struct kobject *kobj, struct attribute *attr,
8641da177e4SLinus Torvalds 		     const char *buf, size_t count)
8651da177e4SLinus Torvalds {
8661da177e4SLinus Torvalds 	struct cpufreq_policy *policy = to_policy(kobj);
8671da177e4SLinus Torvalds 	struct freq_attr *fattr = to_attr(attr);
868a07530b4SDave Jones 	ssize_t ret = -EINVAL;
8696eed9404SViresh Kumar 
8704f750c93SSrivatsa S. Bhat 	get_online_cpus();
8714f750c93SSrivatsa S. Bhat 
8724f750c93SSrivatsa S. Bhat 	if (!cpu_online(policy->cpu))
8734f750c93SSrivatsa S. Bhat 		goto unlock;
8744f750c93SSrivatsa S. Bhat 
8756eed9404SViresh Kumar 	if (!down_read_trylock(&cpufreq_rwsem))
8764f750c93SSrivatsa S. Bhat 		goto unlock;
8775a01f2e8SVenkatesh Pallipadi 
878ad7722daSviresh kumar 	down_write(&policy->rwsem);
8795a01f2e8SVenkatesh Pallipadi 
880e08f5f5bSGautham R Shenoy 	if (fattr->store)
881e08f5f5bSGautham R Shenoy 		ret = fattr->store(policy, buf, count);
882e08f5f5bSGautham R Shenoy 	else
883e08f5f5bSGautham R Shenoy 		ret = -EIO;
884e08f5f5bSGautham R Shenoy 
885ad7722daSviresh kumar 	up_write(&policy->rwsem);
8866eed9404SViresh Kumar 
8876eed9404SViresh Kumar 	up_read(&cpufreq_rwsem);
8884f750c93SSrivatsa S. Bhat unlock:
8894f750c93SSrivatsa S. Bhat 	put_online_cpus();
8904f750c93SSrivatsa S. Bhat 
8911da177e4SLinus Torvalds 	return ret;
8921da177e4SLinus Torvalds }
8931da177e4SLinus Torvalds 
8941da177e4SLinus Torvalds static void cpufreq_sysfs_release(struct kobject *kobj)
8951da177e4SLinus Torvalds {
8961da177e4SLinus Torvalds 	struct cpufreq_policy *policy = to_policy(kobj);
8972d06d8c4SDominik Brodowski 	pr_debug("last reference is dropped\n");
8981da177e4SLinus Torvalds 	complete(&policy->kobj_unregister);
8991da177e4SLinus Torvalds }
9001da177e4SLinus Torvalds 
90152cf25d0SEmese Revfy static const struct sysfs_ops sysfs_ops = {
9021da177e4SLinus Torvalds 	.show	= show,
9031da177e4SLinus Torvalds 	.store	= store,
9041da177e4SLinus Torvalds };
9051da177e4SLinus Torvalds 
9061da177e4SLinus Torvalds static struct kobj_type ktype_cpufreq = {
9071da177e4SLinus Torvalds 	.sysfs_ops	= &sysfs_ops,
9081da177e4SLinus Torvalds 	.default_attrs	= default_attrs,
9091da177e4SLinus Torvalds 	.release	= cpufreq_sysfs_release,
9101da177e4SLinus Torvalds };
9111da177e4SLinus Torvalds 
9122361be23SViresh Kumar struct kobject *cpufreq_global_kobject;
9132361be23SViresh Kumar EXPORT_SYMBOL(cpufreq_global_kobject);
9142361be23SViresh Kumar 
9152361be23SViresh Kumar static int cpufreq_global_kobject_usage;
9162361be23SViresh Kumar 
9172361be23SViresh Kumar int cpufreq_get_global_kobject(void)
9182361be23SViresh Kumar {
9192361be23SViresh Kumar 	if (!cpufreq_global_kobject_usage++)
9202361be23SViresh Kumar 		return kobject_add(cpufreq_global_kobject,
9212361be23SViresh Kumar 				&cpu_subsys.dev_root->kobj, "%s", "cpufreq");
9222361be23SViresh Kumar 
9232361be23SViresh Kumar 	return 0;
9242361be23SViresh Kumar }
9252361be23SViresh Kumar EXPORT_SYMBOL(cpufreq_get_global_kobject);
9262361be23SViresh Kumar 
9272361be23SViresh Kumar void cpufreq_put_global_kobject(void)
9282361be23SViresh Kumar {
9292361be23SViresh Kumar 	if (!--cpufreq_global_kobject_usage)
9302361be23SViresh Kumar 		kobject_del(cpufreq_global_kobject);
9312361be23SViresh Kumar }
9322361be23SViresh Kumar EXPORT_SYMBOL(cpufreq_put_global_kobject);
9332361be23SViresh Kumar 
9342361be23SViresh Kumar int cpufreq_sysfs_create_file(const struct attribute *attr)
9352361be23SViresh Kumar {
9362361be23SViresh Kumar 	int ret = cpufreq_get_global_kobject();
9372361be23SViresh Kumar 
9382361be23SViresh Kumar 	if (!ret) {
9392361be23SViresh Kumar 		ret = sysfs_create_file(cpufreq_global_kobject, attr);
9402361be23SViresh Kumar 		if (ret)
9412361be23SViresh Kumar 			cpufreq_put_global_kobject();
9422361be23SViresh Kumar 	}
9432361be23SViresh Kumar 
9442361be23SViresh Kumar 	return ret;
9452361be23SViresh Kumar }
9462361be23SViresh Kumar EXPORT_SYMBOL(cpufreq_sysfs_create_file);
9472361be23SViresh Kumar 
9482361be23SViresh Kumar void cpufreq_sysfs_remove_file(const struct attribute *attr)
9492361be23SViresh Kumar {
9502361be23SViresh Kumar 	sysfs_remove_file(cpufreq_global_kobject, attr);
9512361be23SViresh Kumar 	cpufreq_put_global_kobject();
9522361be23SViresh Kumar }
9532361be23SViresh Kumar EXPORT_SYMBOL(cpufreq_sysfs_remove_file);
9542361be23SViresh Kumar 
95519d6f7ecSDave Jones /* symlink affected CPUs */
956308b60e7SViresh Kumar static int cpufreq_add_dev_symlink(struct cpufreq_policy *policy)
95719d6f7ecSDave Jones {
95819d6f7ecSDave Jones 	unsigned int j;
95919d6f7ecSDave Jones 	int ret = 0;
96019d6f7ecSDave Jones 
96119d6f7ecSDave Jones 	for_each_cpu(j, policy->cpus) {
9628a25a2fdSKay Sievers 		struct device *cpu_dev;
96319d6f7ecSDave Jones 
964308b60e7SViresh Kumar 		if (j == policy->cpu)
96519d6f7ecSDave Jones 			continue;
96619d6f7ecSDave Jones 
967e8fdde10SViresh Kumar 		pr_debug("Adding link for CPU: %u\n", j);
9688a25a2fdSKay Sievers 		cpu_dev = get_cpu_device(j);
9698a25a2fdSKay Sievers 		ret = sysfs_create_link(&cpu_dev->kobj, &policy->kobj,
97019d6f7ecSDave Jones 					"cpufreq");
97171c3461eSRafael J. Wysocki 		if (ret)
97271c3461eSRafael J. Wysocki 			break;
97319d6f7ecSDave Jones 	}
97419d6f7ecSDave Jones 	return ret;
97519d6f7ecSDave Jones }
97619d6f7ecSDave Jones 
977308b60e7SViresh Kumar static int cpufreq_add_dev_interface(struct cpufreq_policy *policy,
9788a25a2fdSKay Sievers 				     struct device *dev)
979909a694eSDave Jones {
980909a694eSDave Jones 	struct freq_attr **drv_attr;
981909a694eSDave Jones 	int ret = 0;
982909a694eSDave Jones 
983909a694eSDave Jones 	/* set up files for this cpu device */
9841c3d85ddSRafael J. Wysocki 	drv_attr = cpufreq_driver->attr;
985f13f1184SViresh Kumar 	while (drv_attr && *drv_attr) {
986909a694eSDave Jones 		ret = sysfs_create_file(&policy->kobj, &((*drv_attr)->attr));
987909a694eSDave Jones 		if (ret)
9886d4e81edSTomeu Vizoso 			return ret;
989909a694eSDave Jones 		drv_attr++;
990909a694eSDave Jones 	}
9911c3d85ddSRafael J. Wysocki 	if (cpufreq_driver->get) {
992909a694eSDave Jones 		ret = sysfs_create_file(&policy->kobj, &cpuinfo_cur_freq.attr);
993909a694eSDave Jones 		if (ret)
9946d4e81edSTomeu Vizoso 			return ret;
995909a694eSDave Jones 	}
996c034b02eSDirk Brandewie 
997909a694eSDave Jones 	ret = sysfs_create_file(&policy->kobj, &scaling_cur_freq.attr);
998909a694eSDave Jones 	if (ret)
9996d4e81edSTomeu Vizoso 		return ret;
1000c034b02eSDirk Brandewie 
10011c3d85ddSRafael J. Wysocki 	if (cpufreq_driver->bios_limit) {
1002e2f74f35SThomas Renninger 		ret = sysfs_create_file(&policy->kobj, &bios_limit.attr);
1003e2f74f35SThomas Renninger 		if (ret)
10046d4e81edSTomeu Vizoso 			return ret;
1005e2f74f35SThomas Renninger 	}
1006909a694eSDave Jones 
10076d4e81edSTomeu Vizoso 	return cpufreq_add_dev_symlink(policy);
1008e18f1682SSrivatsa S. Bhat }
1009e18f1682SSrivatsa S. Bhat 
1010e18f1682SSrivatsa S. Bhat static void cpufreq_init_policy(struct cpufreq_policy *policy)
1011e18f1682SSrivatsa S. Bhat {
10126e2c89d1Sviresh kumar 	struct cpufreq_governor *gov = NULL;
1013e18f1682SSrivatsa S. Bhat 	struct cpufreq_policy new_policy;
1014e18f1682SSrivatsa S. Bhat 	int ret = 0;
1015e18f1682SSrivatsa S. Bhat 
1016d5b73cd8SViresh Kumar 	memcpy(&new_policy, policy, sizeof(*policy));
1017a27a9ab7SJason Baron 
10186e2c89d1Sviresh kumar 	/* Update governor of new_policy to the governor used before hotplug */
101942f91fa1SViresh Kumar 	gov = find_governor(per_cpu(cpufreq_cpu_governor, policy->cpu));
10206e2c89d1Sviresh kumar 	if (gov)
10216e2c89d1Sviresh kumar 		pr_debug("Restoring governor %s for cpu %d\n",
10226e2c89d1Sviresh kumar 				policy->governor->name, policy->cpu);
10236e2c89d1Sviresh kumar 	else
10246e2c89d1Sviresh kumar 		gov = CPUFREQ_DEFAULT_GOVERNOR;
10256e2c89d1Sviresh kumar 
10266e2c89d1Sviresh kumar 	new_policy.governor = gov;
10276e2c89d1Sviresh kumar 
1028a27a9ab7SJason Baron 	/* Use the default policy if its valid. */
1029a27a9ab7SJason Baron 	if (cpufreq_driver->setpolicy)
10306e2c89d1Sviresh kumar 		cpufreq_parse_governor(gov->name, &new_policy.policy, NULL);
1031ecf7e461SDave Jones 
1032ecf7e461SDave Jones 	/* set default policy */
1033037ce839SViresh Kumar 	ret = cpufreq_set_policy(policy, &new_policy);
1034ecf7e461SDave Jones 	if (ret) {
10352d06d8c4SDominik Brodowski 		pr_debug("setting policy failed\n");
10361c3d85ddSRafael J. Wysocki 		if (cpufreq_driver->exit)
10371c3d85ddSRafael J. Wysocki 			cpufreq_driver->exit(policy);
1038ecf7e461SDave Jones 	}
1039909a694eSDave Jones }
1040909a694eSDave Jones 
1041d8d3b471SViresh Kumar static int cpufreq_add_policy_cpu(struct cpufreq_policy *policy,
104242f921a6SViresh Kumar 				  unsigned int cpu, struct device *dev)
1043fcf80582SViresh Kumar {
10449c0ebcf7SViresh Kumar 	int ret = 0;
1045fcf80582SViresh Kumar 	unsigned long flags;
1046fcf80582SViresh Kumar 
1047bb29ae15SViresh Kumar 	/* Has this CPU been taken care of already? */
1048bb29ae15SViresh Kumar 	if (cpumask_test_cpu(cpu, policy->cpus))
1049bb29ae15SViresh Kumar 		return 0;
1050bb29ae15SViresh Kumar 
10519c0ebcf7SViresh Kumar 	if (has_target()) {
10523de9bdebSViresh Kumar 		ret = __cpufreq_governor(policy, CPUFREQ_GOV_STOP);
10533de9bdebSViresh Kumar 		if (ret) {
10543de9bdebSViresh Kumar 			pr_err("%s: Failed to stop governor\n", __func__);
10553de9bdebSViresh Kumar 			return ret;
10563de9bdebSViresh Kumar 		}
10573de9bdebSViresh Kumar 	}
1058fcf80582SViresh Kumar 
1059ad7722daSviresh kumar 	down_write(&policy->rwsem);
10602eaa3e2dSViresh Kumar 
10610d1857a1SNathan Zimmer 	write_lock_irqsave(&cpufreq_driver_lock, flags);
10622eaa3e2dSViresh Kumar 
1063fcf80582SViresh Kumar 	cpumask_set_cpu(cpu, policy->cpus);
1064fcf80582SViresh Kumar 	per_cpu(cpufreq_cpu_data, cpu) = policy;
10650d1857a1SNathan Zimmer 	write_unlock_irqrestore(&cpufreq_driver_lock, flags);
1066fcf80582SViresh Kumar 
1067ad7722daSviresh kumar 	up_write(&policy->rwsem);
10682eaa3e2dSViresh Kumar 
10699c0ebcf7SViresh Kumar 	if (has_target()) {
1070e5c87b76SStratos Karafotis 		ret = __cpufreq_governor(policy, CPUFREQ_GOV_START);
1071e5c87b76SStratos Karafotis 		if (!ret)
1072e5c87b76SStratos Karafotis 			ret = __cpufreq_governor(policy, CPUFREQ_GOV_LIMITS);
1073e5c87b76SStratos Karafotis 
1074e5c87b76SStratos Karafotis 		if (ret) {
10753de9bdebSViresh Kumar 			pr_err("%s: Failed to start governor\n", __func__);
10763de9bdebSViresh Kumar 			return ret;
10773de9bdebSViresh Kumar 		}
1078820c6ca2SViresh Kumar 	}
1079fcf80582SViresh Kumar 
108042f921a6SViresh Kumar 	return sysfs_create_link(&dev->kobj, &policy->kobj, "cpufreq");
1081fcf80582SViresh Kumar }
10821da177e4SLinus Torvalds 
10838414809cSSrivatsa S. Bhat static struct cpufreq_policy *cpufreq_policy_restore(unsigned int cpu)
10848414809cSSrivatsa S. Bhat {
10858414809cSSrivatsa S. Bhat 	struct cpufreq_policy *policy;
10868414809cSSrivatsa S. Bhat 	unsigned long flags;
10878414809cSSrivatsa S. Bhat 
108844871c9cSLan Tianyu 	read_lock_irqsave(&cpufreq_driver_lock, flags);
10898414809cSSrivatsa S. Bhat 
10908414809cSSrivatsa S. Bhat 	policy = per_cpu(cpufreq_cpu_data_fallback, cpu);
10918414809cSSrivatsa S. Bhat 
109244871c9cSLan Tianyu 	read_unlock_irqrestore(&cpufreq_driver_lock, flags);
10938414809cSSrivatsa S. Bhat 
109409712f55SGeert Uytterhoeven 	if (policy)
10956e2c89d1Sviresh kumar 		policy->governor = NULL;
10966e2c89d1Sviresh kumar 
10978414809cSSrivatsa S. Bhat 	return policy;
10988414809cSSrivatsa S. Bhat }
10998414809cSSrivatsa S. Bhat 
1100e9698cc5SSrivatsa S. Bhat static struct cpufreq_policy *cpufreq_policy_alloc(void)
1101e9698cc5SSrivatsa S. Bhat {
1102e9698cc5SSrivatsa S. Bhat 	struct cpufreq_policy *policy;
1103e9698cc5SSrivatsa S. Bhat 
1104e9698cc5SSrivatsa S. Bhat 	policy = kzalloc(sizeof(*policy), GFP_KERNEL);
1105e9698cc5SSrivatsa S. Bhat 	if (!policy)
1106e9698cc5SSrivatsa S. Bhat 		return NULL;
1107e9698cc5SSrivatsa S. Bhat 
1108e9698cc5SSrivatsa S. Bhat 	if (!alloc_cpumask_var(&policy->cpus, GFP_KERNEL))
1109e9698cc5SSrivatsa S. Bhat 		goto err_free_policy;
1110e9698cc5SSrivatsa S. Bhat 
1111e9698cc5SSrivatsa S. Bhat 	if (!zalloc_cpumask_var(&policy->related_cpus, GFP_KERNEL))
1112e9698cc5SSrivatsa S. Bhat 		goto err_free_cpumask;
1113e9698cc5SSrivatsa S. Bhat 
1114c88a1f8bSLukasz Majewski 	INIT_LIST_HEAD(&policy->policy_list);
1115ad7722daSviresh kumar 	init_rwsem(&policy->rwsem);
111612478cf0SSrivatsa S. Bhat 	spin_lock_init(&policy->transition_lock);
111712478cf0SSrivatsa S. Bhat 	init_waitqueue_head(&policy->transition_wait);
1118818c5712SViresh Kumar 	init_completion(&policy->kobj_unregister);
1119818c5712SViresh Kumar 	INIT_WORK(&policy->update, handle_update);
1120ad7722daSviresh kumar 
1121e9698cc5SSrivatsa S. Bhat 	return policy;
1122e9698cc5SSrivatsa S. Bhat 
1123e9698cc5SSrivatsa S. Bhat err_free_cpumask:
1124e9698cc5SSrivatsa S. Bhat 	free_cpumask_var(policy->cpus);
1125e9698cc5SSrivatsa S. Bhat err_free_policy:
1126e9698cc5SSrivatsa S. Bhat 	kfree(policy);
1127e9698cc5SSrivatsa S. Bhat 
1128e9698cc5SSrivatsa S. Bhat 	return NULL;
1129e9698cc5SSrivatsa S. Bhat }
1130e9698cc5SSrivatsa S. Bhat 
113142f921a6SViresh Kumar static void cpufreq_policy_put_kobj(struct cpufreq_policy *policy)
113242f921a6SViresh Kumar {
113342f921a6SViresh Kumar 	struct kobject *kobj;
113442f921a6SViresh Kumar 	struct completion *cmp;
113542f921a6SViresh Kumar 
1136fcd7af91SViresh Kumar 	blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
1137fcd7af91SViresh Kumar 			CPUFREQ_REMOVE_POLICY, policy);
1138fcd7af91SViresh Kumar 
113942f921a6SViresh Kumar 	down_read(&policy->rwsem);
114042f921a6SViresh Kumar 	kobj = &policy->kobj;
114142f921a6SViresh Kumar 	cmp = &policy->kobj_unregister;
114242f921a6SViresh Kumar 	up_read(&policy->rwsem);
114342f921a6SViresh Kumar 	kobject_put(kobj);
114442f921a6SViresh Kumar 
114542f921a6SViresh Kumar 	/*
114642f921a6SViresh Kumar 	 * We need to make sure that the underlying kobj is
114742f921a6SViresh Kumar 	 * actually not referenced anymore by anybody before we
114842f921a6SViresh Kumar 	 * proceed with unloading.
114942f921a6SViresh Kumar 	 */
115042f921a6SViresh Kumar 	pr_debug("waiting for dropping of refcount\n");
115142f921a6SViresh Kumar 	wait_for_completion(cmp);
115242f921a6SViresh Kumar 	pr_debug("wait complete\n");
115342f921a6SViresh Kumar }
115442f921a6SViresh Kumar 
1155e9698cc5SSrivatsa S. Bhat static void cpufreq_policy_free(struct cpufreq_policy *policy)
1156e9698cc5SSrivatsa S. Bhat {
1157e9698cc5SSrivatsa S. Bhat 	free_cpumask_var(policy->related_cpus);
1158e9698cc5SSrivatsa S. Bhat 	free_cpumask_var(policy->cpus);
1159e9698cc5SSrivatsa S. Bhat 	kfree(policy);
1160e9698cc5SSrivatsa S. Bhat }
1161e9698cc5SSrivatsa S. Bhat 
11621bfb425bSViresh Kumar static int update_policy_cpu(struct cpufreq_policy *policy, unsigned int cpu,
11631bfb425bSViresh Kumar 			     struct device *cpu_dev)
11640d66b91eSSrivatsa S. Bhat {
11651bfb425bSViresh Kumar 	int ret;
11661bfb425bSViresh Kumar 
116799ec899eSSrivatsa S. Bhat 	if (WARN_ON(cpu == policy->cpu))
11681bfb425bSViresh Kumar 		return 0;
11691bfb425bSViresh Kumar 
11701bfb425bSViresh Kumar 	/* Move kobject to the new policy->cpu */
11711bfb425bSViresh Kumar 	ret = kobject_move(&policy->kobj, &cpu_dev->kobj);
11721bfb425bSViresh Kumar 	if (ret) {
11731bfb425bSViresh Kumar 		pr_err("%s: Failed to move kobj: %d\n", __func__, ret);
11741bfb425bSViresh Kumar 		return ret;
11751bfb425bSViresh Kumar 	}
1176cb38ed5cSSrivatsa S. Bhat 
1177ad7722daSviresh kumar 	down_write(&policy->rwsem);
11780d66b91eSSrivatsa S. Bhat 	policy->cpu = cpu;
1179ad7722daSviresh kumar 	up_write(&policy->rwsem);
11808efd5765SViresh Kumar 
11811bfb425bSViresh Kumar 	return 0;
11820d66b91eSSrivatsa S. Bhat }
11830d66b91eSSrivatsa S. Bhat 
118423faf0b7SViresh Kumar /**
118523faf0b7SViresh Kumar  * cpufreq_add_dev - add a CPU device
118623faf0b7SViresh Kumar  *
118723faf0b7SViresh Kumar  * Adds the cpufreq interface for a CPU device.
118823faf0b7SViresh Kumar  *
118923faf0b7SViresh Kumar  * The Oracle says: try running cpufreq registration/unregistration concurrently
119023faf0b7SViresh Kumar  * with with cpu hotplugging and all hell will break loose. Tried to clean this
119123faf0b7SViresh Kumar  * mess up, but more thorough testing is needed. - Mathieu
119223faf0b7SViresh Kumar  */
119323faf0b7SViresh Kumar static int cpufreq_add_dev(struct device *dev, struct subsys_interface *sif)
11941da177e4SLinus Torvalds {
1195fcf80582SViresh Kumar 	unsigned int j, cpu = dev->id;
119665922465SViresh Kumar 	int ret = -ENOMEM;
11977f0c020aSViresh Kumar 	struct cpufreq_policy *policy;
11981da177e4SLinus Torvalds 	unsigned long flags;
119996bbbe4aSViresh Kumar 	bool recover_policy = cpufreq_suspended;
12001da177e4SLinus Torvalds 
1201c32b6b8eSAshok Raj 	if (cpu_is_offline(cpu))
1202c32b6b8eSAshok Raj 		return 0;
1203c32b6b8eSAshok Raj 
12042d06d8c4SDominik Brodowski 	pr_debug("adding CPU %u\n", cpu);
12051da177e4SLinus Torvalds 
12066eed9404SViresh Kumar 	if (!down_read_trylock(&cpufreq_rwsem))
12076eed9404SViresh Kumar 		return 0;
12086eed9404SViresh Kumar 
1209bb29ae15SViresh Kumar 	/* Check if this CPU already has a policy to manage it */
12100d1857a1SNathan Zimmer 	read_lock_irqsave(&cpufreq_driver_lock, flags);
1211*f963735aSViresh Kumar 	for_each_active_policy(policy) {
12127f0c020aSViresh Kumar 		if (cpumask_test_cpu(cpu, policy->related_cpus)) {
12130d1857a1SNathan Zimmer 			read_unlock_irqrestore(&cpufreq_driver_lock, flags);
12147f0c020aSViresh Kumar 			ret = cpufreq_add_policy_cpu(policy, cpu, dev);
12156eed9404SViresh Kumar 			up_read(&cpufreq_rwsem);
12166eed9404SViresh Kumar 			return ret;
1217fcf80582SViresh Kumar 		}
12182eaa3e2dSViresh Kumar 	}
12190d1857a1SNathan Zimmer 	read_unlock_irqrestore(&cpufreq_driver_lock, flags);
12201da177e4SLinus Torvalds 
122172368d12SRafael J. Wysocki 	/*
122272368d12SRafael J. Wysocki 	 * Restore the saved policy when doing light-weight init and fall back
122372368d12SRafael J. Wysocki 	 * to the full init if that fails.
122472368d12SRafael J. Wysocki 	 */
122596bbbe4aSViresh Kumar 	policy = recover_policy ? cpufreq_policy_restore(cpu) : NULL;
122672368d12SRafael J. Wysocki 	if (!policy) {
122796bbbe4aSViresh Kumar 		recover_policy = false;
1228e9698cc5SSrivatsa S. Bhat 		policy = cpufreq_policy_alloc();
1229059019a3SDave Jones 		if (!policy)
12301da177e4SLinus Torvalds 			goto nomem_out;
123172368d12SRafael J. Wysocki 	}
12320d66b91eSSrivatsa S. Bhat 
12330d66b91eSSrivatsa S. Bhat 	/*
12340d66b91eSSrivatsa S. Bhat 	 * In the resume path, since we restore a saved policy, the assignment
12350d66b91eSSrivatsa S. Bhat 	 * to policy->cpu is like an update of the existing policy, rather than
12360d66b91eSSrivatsa S. Bhat 	 * the creation of a brand new one. So we need to perform this update
12370d66b91eSSrivatsa S. Bhat 	 * by invoking update_policy_cpu().
12380d66b91eSSrivatsa S. Bhat 	 */
12391bfb425bSViresh Kumar 	if (recover_policy && cpu != policy->cpu)
12401bfb425bSViresh Kumar 		WARN_ON(update_policy_cpu(policy, cpu, dev));
12411bfb425bSViresh Kumar 	else
12421da177e4SLinus Torvalds 		policy->cpu = cpu;
12430d66b91eSSrivatsa S. Bhat 
1244835481d9SRusty Russell 	cpumask_copy(policy->cpus, cpumask_of(cpu));
12451da177e4SLinus Torvalds 
12461da177e4SLinus Torvalds 	/* call driver. From then on the cpufreq must be able
12471da177e4SLinus Torvalds 	 * to accept all calls to ->verify and ->setpolicy for this CPU
12481da177e4SLinus Torvalds 	 */
12491c3d85ddSRafael J. Wysocki 	ret = cpufreq_driver->init(policy);
12501da177e4SLinus Torvalds 	if (ret) {
12512d06d8c4SDominik Brodowski 		pr_debug("initialization failed\n");
12522eaa3e2dSViresh Kumar 		goto err_set_policy_cpu;
12531da177e4SLinus Torvalds 	}
1254643ae6e8SViresh Kumar 
12556d4e81edSTomeu Vizoso 	down_write(&policy->rwsem);
12566d4e81edSTomeu Vizoso 
12575a7e56a5SViresh Kumar 	/* related cpus should atleast have policy->cpus */
12585a7e56a5SViresh Kumar 	cpumask_or(policy->related_cpus, policy->related_cpus, policy->cpus);
12595a7e56a5SViresh Kumar 
12605a7e56a5SViresh Kumar 	/*
12615a7e56a5SViresh Kumar 	 * affected cpus must always be the one, which are online. We aren't
12625a7e56a5SViresh Kumar 	 * managing offline cpus here.
12635a7e56a5SViresh Kumar 	 */
12645a7e56a5SViresh Kumar 	cpumask_and(policy->cpus, policy->cpus, cpu_online_mask);
12655a7e56a5SViresh Kumar 
126696bbbe4aSViresh Kumar 	if (!recover_policy) {
12675a7e56a5SViresh Kumar 		policy->user_policy.min = policy->min;
12685a7e56a5SViresh Kumar 		policy->user_policy.max = policy->max;
12696d4e81edSTomeu Vizoso 
12706d4e81edSTomeu Vizoso 		/* prepare interface data */
12716d4e81edSTomeu Vizoso 		ret = kobject_init_and_add(&policy->kobj, &ktype_cpufreq,
12726d4e81edSTomeu Vizoso 					   &dev->kobj, "cpufreq");
12736d4e81edSTomeu Vizoso 		if (ret) {
12746d4e81edSTomeu Vizoso 			pr_err("%s: failed to init policy->kobj: %d\n",
12756d4e81edSTomeu Vizoso 			       __func__, ret);
12766d4e81edSTomeu Vizoso 			goto err_init_policy_kobj;
12776d4e81edSTomeu Vizoso 		}
12785a7e56a5SViresh Kumar 	}
12795a7e56a5SViresh Kumar 
1280652ed95dSViresh Kumar 	write_lock_irqsave(&cpufreq_driver_lock, flags);
1281652ed95dSViresh Kumar 	for_each_cpu(j, policy->cpus)
1282652ed95dSViresh Kumar 		per_cpu(cpufreq_cpu_data, j) = policy;
1283652ed95dSViresh Kumar 	write_unlock_irqrestore(&cpufreq_driver_lock, flags);
1284652ed95dSViresh Kumar 
12852ed99e39SRafael J. Wysocki 	if (cpufreq_driver->get && !cpufreq_driver->setpolicy) {
1286da60ce9fSViresh Kumar 		policy->cur = cpufreq_driver->get(policy->cpu);
1287da60ce9fSViresh Kumar 		if (!policy->cur) {
1288da60ce9fSViresh Kumar 			pr_err("%s: ->get() failed\n", __func__);
1289da60ce9fSViresh Kumar 			goto err_get_freq;
1290da60ce9fSViresh Kumar 		}
1291da60ce9fSViresh Kumar 	}
1292da60ce9fSViresh Kumar 
1293d3916691SViresh Kumar 	/*
1294d3916691SViresh Kumar 	 * Sometimes boot loaders set CPU frequency to a value outside of
1295d3916691SViresh Kumar 	 * frequency table present with cpufreq core. In such cases CPU might be
1296d3916691SViresh Kumar 	 * unstable if it has to run on that frequency for long duration of time
1297d3916691SViresh Kumar 	 * and so its better to set it to a frequency which is specified in
1298d3916691SViresh Kumar 	 * freq-table. This also makes cpufreq stats inconsistent as
1299d3916691SViresh Kumar 	 * cpufreq-stats would fail to register because current frequency of CPU
1300d3916691SViresh Kumar 	 * isn't found in freq-table.
1301d3916691SViresh Kumar 	 *
1302d3916691SViresh Kumar 	 * Because we don't want this change to effect boot process badly, we go
1303d3916691SViresh Kumar 	 * for the next freq which is >= policy->cur ('cur' must be set by now,
1304d3916691SViresh Kumar 	 * otherwise we will end up setting freq to lowest of the table as 'cur'
1305d3916691SViresh Kumar 	 * is initialized to zero).
1306d3916691SViresh Kumar 	 *
1307d3916691SViresh Kumar 	 * We are passing target-freq as "policy->cur - 1" otherwise
1308d3916691SViresh Kumar 	 * __cpufreq_driver_target() would simply fail, as policy->cur will be
1309d3916691SViresh Kumar 	 * equal to target-freq.
1310d3916691SViresh Kumar 	 */
1311d3916691SViresh Kumar 	if ((cpufreq_driver->flags & CPUFREQ_NEED_INITIAL_FREQ_CHECK)
1312d3916691SViresh Kumar 	    && has_target()) {
1313d3916691SViresh Kumar 		/* Are we running at unknown frequency ? */
1314d3916691SViresh Kumar 		ret = cpufreq_frequency_table_get_index(policy, policy->cur);
1315d3916691SViresh Kumar 		if (ret == -EINVAL) {
1316d3916691SViresh Kumar 			/* Warn user and fix it */
1317d3916691SViresh Kumar 			pr_warn("%s: CPU%d: Running at unlisted freq: %u KHz\n",
1318d3916691SViresh Kumar 				__func__, policy->cpu, policy->cur);
1319d3916691SViresh Kumar 			ret = __cpufreq_driver_target(policy, policy->cur - 1,
1320d3916691SViresh Kumar 				CPUFREQ_RELATION_L);
1321d3916691SViresh Kumar 
1322d3916691SViresh Kumar 			/*
1323d3916691SViresh Kumar 			 * Reaching here after boot in a few seconds may not
1324d3916691SViresh Kumar 			 * mean that system will remain stable at "unknown"
1325d3916691SViresh Kumar 			 * frequency for longer duration. Hence, a BUG_ON().
1326d3916691SViresh Kumar 			 */
1327d3916691SViresh Kumar 			BUG_ON(ret);
1328d3916691SViresh Kumar 			pr_warn("%s: CPU%d: Unlisted initial frequency changed to: %u KHz\n",
1329d3916691SViresh Kumar 				__func__, policy->cpu, policy->cur);
1330d3916691SViresh Kumar 		}
1331d3916691SViresh Kumar 	}
1332d3916691SViresh Kumar 
1333a1531acdSThomas Renninger 	blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
1334a1531acdSThomas Renninger 				     CPUFREQ_START, policy);
1335a1531acdSThomas Renninger 
133696bbbe4aSViresh Kumar 	if (!recover_policy) {
1337308b60e7SViresh Kumar 		ret = cpufreq_add_dev_interface(policy, dev);
133819d6f7ecSDave Jones 		if (ret)
13390142f9dcSAhmed S. Darwish 			goto err_out_unregister;
1340fcd7af91SViresh Kumar 		blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
1341fcd7af91SViresh Kumar 				CPUFREQ_CREATE_POLICY, policy);
13429515f4d6SViresh Kumar 	}
1343c88a1f8bSLukasz Majewski 
1344c88a1f8bSLukasz Majewski 	write_lock_irqsave(&cpufreq_driver_lock, flags);
1345c88a1f8bSLukasz Majewski 	list_add(&policy->policy_list, &cpufreq_policy_list);
1346c88a1f8bSLukasz Majewski 	write_unlock_irqrestore(&cpufreq_driver_lock, flags);
13478ff69732SDave Jones 
1348e18f1682SSrivatsa S. Bhat 	cpufreq_init_policy(policy);
1349e18f1682SSrivatsa S. Bhat 
135096bbbe4aSViresh Kumar 	if (!recover_policy) {
135108fd8c1cSViresh Kumar 		policy->user_policy.policy = policy->policy;
135208fd8c1cSViresh Kumar 		policy->user_policy.governor = policy->governor;
135308fd8c1cSViresh Kumar 	}
13544e97b631SViresh Kumar 	up_write(&policy->rwsem);
135508fd8c1cSViresh Kumar 
1356038c5b3eSGreg Kroah-Hartman 	kobject_uevent(&policy->kobj, KOBJ_ADD);
13577c45cf31SViresh Kumar 
13586eed9404SViresh Kumar 	up_read(&cpufreq_rwsem);
13596eed9404SViresh Kumar 
13607c45cf31SViresh Kumar 	/* Callback for handling stuff after policy is ready */
13617c45cf31SViresh Kumar 	if (cpufreq_driver->ready)
13627c45cf31SViresh Kumar 		cpufreq_driver->ready(policy);
13637c45cf31SViresh Kumar 
13642d06d8c4SDominik Brodowski 	pr_debug("initialization complete\n");
13651da177e4SLinus Torvalds 
13661da177e4SLinus Torvalds 	return 0;
13671da177e4SLinus Torvalds 
13681da177e4SLinus Torvalds err_out_unregister:
1369652ed95dSViresh Kumar err_get_freq:
13700d1857a1SNathan Zimmer 	write_lock_irqsave(&cpufreq_driver_lock, flags);
1371474deff7SViresh Kumar 	for_each_cpu(j, policy->cpus)
13727a6aedfaSMike Travis 		per_cpu(cpufreq_cpu_data, j) = NULL;
13730d1857a1SNathan Zimmer 	write_unlock_irqrestore(&cpufreq_driver_lock, flags);
13741da177e4SLinus Torvalds 
13756d4e81edSTomeu Vizoso 	if (!recover_policy) {
13766d4e81edSTomeu Vizoso 		kobject_put(&policy->kobj);
13776d4e81edSTomeu Vizoso 		wait_for_completion(&policy->kobj_unregister);
13786d4e81edSTomeu Vizoso 	}
13796d4e81edSTomeu Vizoso err_init_policy_kobj:
13807106e02bSPrarit Bhargava 	up_write(&policy->rwsem);
13817106e02bSPrarit Bhargava 
1382da60ce9fSViresh Kumar 	if (cpufreq_driver->exit)
1383da60ce9fSViresh Kumar 		cpufreq_driver->exit(policy);
13842eaa3e2dSViresh Kumar err_set_policy_cpu:
138596bbbe4aSViresh Kumar 	if (recover_policy) {
138672368d12SRafael J. Wysocki 		/* Do not leave stale fallback data behind. */
138772368d12SRafael J. Wysocki 		per_cpu(cpufreq_cpu_data_fallback, cpu) = NULL;
138842f921a6SViresh Kumar 		cpufreq_policy_put_kobj(policy);
138972368d12SRafael J. Wysocki 	}
1390e9698cc5SSrivatsa S. Bhat 	cpufreq_policy_free(policy);
139142f921a6SViresh Kumar 
13921da177e4SLinus Torvalds nomem_out:
13936eed9404SViresh Kumar 	up_read(&cpufreq_rwsem);
13946eed9404SViresh Kumar 
13951da177e4SLinus Torvalds 	return ret;
13961da177e4SLinus Torvalds }
13971da177e4SLinus Torvalds 
1398cedb70afSSrivatsa S. Bhat static int __cpufreq_remove_dev_prepare(struct device *dev,
139996bbbe4aSViresh Kumar 					struct subsys_interface *sif)
14001da177e4SLinus Torvalds {
1401f9ba680dSSrivatsa S. Bhat 	unsigned int cpu = dev->id, cpus;
14021bfb425bSViresh Kumar 	int ret;
14031da177e4SLinus Torvalds 	unsigned long flags;
14043a3e9e06SViresh Kumar 	struct cpufreq_policy *policy;
14051da177e4SLinus Torvalds 
1406b8eed8afSViresh Kumar 	pr_debug("%s: unregistering CPU %u\n", __func__, cpu);
14071da177e4SLinus Torvalds 
14080d1857a1SNathan Zimmer 	write_lock_irqsave(&cpufreq_driver_lock, flags);
14091da177e4SLinus Torvalds 
14103a3e9e06SViresh Kumar 	policy = per_cpu(cpufreq_cpu_data, cpu);
14111da177e4SLinus Torvalds 
14128414809cSSrivatsa S. Bhat 	/* Save the policy somewhere when doing a light-weight tear-down */
141396bbbe4aSViresh Kumar 	if (cpufreq_suspended)
14143a3e9e06SViresh Kumar 		per_cpu(cpufreq_cpu_data_fallback, cpu) = policy;
14158414809cSSrivatsa S. Bhat 
14160d1857a1SNathan Zimmer 	write_unlock_irqrestore(&cpufreq_driver_lock, flags);
14171da177e4SLinus Torvalds 
14183a3e9e06SViresh Kumar 	if (!policy) {
1419b8eed8afSViresh Kumar 		pr_debug("%s: No cpu_data found\n", __func__);
14201da177e4SLinus Torvalds 		return -EINVAL;
14211da177e4SLinus Torvalds 	}
14221da177e4SLinus Torvalds 
14239c0ebcf7SViresh Kumar 	if (has_target()) {
14243de9bdebSViresh Kumar 		ret = __cpufreq_governor(policy, CPUFREQ_GOV_STOP);
14253de9bdebSViresh Kumar 		if (ret) {
14263de9bdebSViresh Kumar 			pr_err("%s: Failed to stop governor\n", __func__);
14273de9bdebSViresh Kumar 			return ret;
14283de9bdebSViresh Kumar 		}
14295a01f2e8SVenkatesh Pallipadi 
1430fa69e33fSDirk Brandewie 		strncpy(per_cpu(cpufreq_cpu_governor, cpu),
14313a3e9e06SViresh Kumar 			policy->governor->name, CPUFREQ_NAME_LEN);
1432db5f2995SViresh Kumar 	}
14331da177e4SLinus Torvalds 
1434ad7722daSviresh kumar 	down_read(&policy->rwsem);
14353a3e9e06SViresh Kumar 	cpus = cpumask_weight(policy->cpus);
1436ad7722daSviresh kumar 	up_read(&policy->rwsem);
14371da177e4SLinus Torvalds 
143861173f25SSrivatsa S. Bhat 	if (cpu != policy->cpu) {
143973bf0fc2SViresh Kumar 		sysfs_remove_link(&dev->kobj, "cpufreq");
144073bf0fc2SViresh Kumar 	} else if (cpus > 1) {
14411bfb425bSViresh Kumar 		/* Nominate new CPU */
14421bfb425bSViresh Kumar 		int new_cpu = cpumask_any_but(policy->cpus, cpu);
14431bfb425bSViresh Kumar 		struct device *cpu_dev = get_cpu_device(new_cpu);
14441bfb425bSViresh Kumar 
14451bfb425bSViresh Kumar 		sysfs_remove_link(&cpu_dev->kobj, "cpufreq");
14461bfb425bSViresh Kumar 		ret = update_policy_cpu(policy, new_cpu, cpu_dev);
14471bfb425bSViresh Kumar 		if (ret) {
14481bfb425bSViresh Kumar 			if (sysfs_create_link(&cpu_dev->kobj, &policy->kobj,
14491bfb425bSViresh Kumar 					      "cpufreq"))
14501bfb425bSViresh Kumar 				pr_err("%s: Failed to restore kobj link to cpu:%d\n",
14511bfb425bSViresh Kumar 				       __func__, cpu_dev->id);
14521bfb425bSViresh Kumar 			return ret;
14531bfb425bSViresh Kumar 		}
1454a82fab29SSrivatsa S. Bhat 
1455bda9f552SStratos Karafotis 		if (!cpufreq_suspended)
145675949c9aSViresh Kumar 			pr_debug("%s: policy Kobject moved to cpu: %d from: %d\n",
145775949c9aSViresh Kumar 				 __func__, new_cpu, cpu);
1458789ca243SPreeti U Murthy 	} else if (cpufreq_driver->stop_cpu) {
1459367dc4aaSDirk Brandewie 		cpufreq_driver->stop_cpu(policy);
14601da177e4SLinus Torvalds 	}
1461b8eed8afSViresh Kumar 
1462cedb70afSSrivatsa S. Bhat 	return 0;
1463cedb70afSSrivatsa S. Bhat }
1464cedb70afSSrivatsa S. Bhat 
1465cedb70afSSrivatsa S. Bhat static int __cpufreq_remove_dev_finish(struct device *dev,
146696bbbe4aSViresh Kumar 				       struct subsys_interface *sif)
1467cedb70afSSrivatsa S. Bhat {
1468cedb70afSSrivatsa S. Bhat 	unsigned int cpu = dev->id, cpus;
1469cedb70afSSrivatsa S. Bhat 	int ret;
1470cedb70afSSrivatsa S. Bhat 	unsigned long flags;
1471cedb70afSSrivatsa S. Bhat 	struct cpufreq_policy *policy;
1472cedb70afSSrivatsa S. Bhat 
14736ffae8c0SViresh Kumar 	write_lock_irqsave(&cpufreq_driver_lock, flags);
1474cedb70afSSrivatsa S. Bhat 	policy = per_cpu(cpufreq_cpu_data, cpu);
14756ffae8c0SViresh Kumar 	per_cpu(cpufreq_cpu_data, cpu) = NULL;
14766ffae8c0SViresh Kumar 	write_unlock_irqrestore(&cpufreq_driver_lock, flags);
1477cedb70afSSrivatsa S. Bhat 
1478cedb70afSSrivatsa S. Bhat 	if (!policy) {
1479cedb70afSSrivatsa S. Bhat 		pr_debug("%s: No cpu_data found\n", __func__);
1480cedb70afSSrivatsa S. Bhat 		return -EINVAL;
1481cedb70afSSrivatsa S. Bhat 	}
1482cedb70afSSrivatsa S. Bhat 
1483ad7722daSviresh kumar 	down_write(&policy->rwsem);
1484cedb70afSSrivatsa S. Bhat 	cpus = cpumask_weight(policy->cpus);
14859c8f1ee4SViresh Kumar 	cpumask_clear_cpu(cpu, policy->cpus);
1486ad7722daSviresh kumar 	up_write(&policy->rwsem);
1487cedb70afSSrivatsa S. Bhat 
1488b8eed8afSViresh Kumar 	/* If cpu is last user of policy, free policy */
1489b8eed8afSViresh Kumar 	if (cpus == 1) {
14909c0ebcf7SViresh Kumar 		if (has_target()) {
14913de9bdebSViresh Kumar 			ret = __cpufreq_governor(policy,
14923de9bdebSViresh Kumar 					CPUFREQ_GOV_POLICY_EXIT);
14933de9bdebSViresh Kumar 			if (ret) {
14943de9bdebSViresh Kumar 				pr_err("%s: Failed to exit governor\n",
14953de9bdebSViresh Kumar 				       __func__);
14963de9bdebSViresh Kumar 				return ret;
14973de9bdebSViresh Kumar 			}
14983de9bdebSViresh Kumar 		}
14992a998599SRafael J. Wysocki 
150096bbbe4aSViresh Kumar 		if (!cpufreq_suspended)
150142f921a6SViresh Kumar 			cpufreq_policy_put_kobj(policy);
15021da177e4SLinus Torvalds 
15038414809cSSrivatsa S. Bhat 		/*
15048414809cSSrivatsa S. Bhat 		 * Perform the ->exit() even during light-weight tear-down,
15058414809cSSrivatsa S. Bhat 		 * since this is a core component, and is essential for the
15068414809cSSrivatsa S. Bhat 		 * subsequent light-weight ->init() to succeed.
15078414809cSSrivatsa S. Bhat 		 */
15081c3d85ddSRafael J. Wysocki 		if (cpufreq_driver->exit)
15093a3e9e06SViresh Kumar 			cpufreq_driver->exit(policy);
151027ecddc2SJacob Shin 
15119515f4d6SViresh Kumar 		/* Remove policy from list of active policies */
15129515f4d6SViresh Kumar 		write_lock_irqsave(&cpufreq_driver_lock, flags);
15139515f4d6SViresh Kumar 		list_del(&policy->policy_list);
15149515f4d6SViresh Kumar 		write_unlock_irqrestore(&cpufreq_driver_lock, flags);
15159515f4d6SViresh Kumar 
151696bbbe4aSViresh Kumar 		if (!cpufreq_suspended)
15173a3e9e06SViresh Kumar 			cpufreq_policy_free(policy);
1518e5c87b76SStratos Karafotis 	} else if (has_target()) {
1519e5c87b76SStratos Karafotis 		ret = __cpufreq_governor(policy, CPUFREQ_GOV_START);
1520e5c87b76SStratos Karafotis 		if (!ret)
1521e5c87b76SStratos Karafotis 			ret = __cpufreq_governor(policy, CPUFREQ_GOV_LIMITS);
1522e5c87b76SStratos Karafotis 
1523e5c87b76SStratos Karafotis 		if (ret) {
1524e5c87b76SStratos Karafotis 			pr_err("%s: Failed to start governor\n", __func__);
15253de9bdebSViresh Kumar 			return ret;
15263de9bdebSViresh Kumar 		}
1527b8eed8afSViresh Kumar 	}
15281da177e4SLinus Torvalds 
15291da177e4SLinus Torvalds 	return 0;
15301da177e4SLinus Torvalds }
15311da177e4SLinus Torvalds 
1532cedb70afSSrivatsa S. Bhat /**
153327a862e9SViresh Kumar  * cpufreq_remove_dev - remove a CPU device
1534cedb70afSSrivatsa S. Bhat  *
1535cedb70afSSrivatsa S. Bhat  * Removes the cpufreq interface for a CPU device.
1536cedb70afSSrivatsa S. Bhat  */
15378a25a2fdSKay Sievers static int cpufreq_remove_dev(struct device *dev, struct subsys_interface *sif)
15385a01f2e8SVenkatesh Pallipadi {
15398a25a2fdSKay Sievers 	unsigned int cpu = dev->id;
154027a862e9SViresh Kumar 	int ret;
1541ec28297aSVenki Pallipadi 
1542ec28297aSVenki Pallipadi 	if (cpu_is_offline(cpu))
1543ec28297aSVenki Pallipadi 		return 0;
1544ec28297aSVenki Pallipadi 
154596bbbe4aSViresh Kumar 	ret = __cpufreq_remove_dev_prepare(dev, sif);
154627a862e9SViresh Kumar 
154727a862e9SViresh Kumar 	if (!ret)
154896bbbe4aSViresh Kumar 		ret = __cpufreq_remove_dev_finish(dev, sif);
154927a862e9SViresh Kumar 
155027a862e9SViresh Kumar 	return ret;
15515a01f2e8SVenkatesh Pallipadi }
15525a01f2e8SVenkatesh Pallipadi 
155365f27f38SDavid Howells static void handle_update(struct work_struct *work)
15541da177e4SLinus Torvalds {
155565f27f38SDavid Howells 	struct cpufreq_policy *policy =
155665f27f38SDavid Howells 		container_of(work, struct cpufreq_policy, update);
155765f27f38SDavid Howells 	unsigned int cpu = policy->cpu;
15582d06d8c4SDominik Brodowski 	pr_debug("handle_update for cpu %u called\n", cpu);
15591da177e4SLinus Torvalds 	cpufreq_update_policy(cpu);
15601da177e4SLinus Torvalds }
15611da177e4SLinus Torvalds 
15621da177e4SLinus Torvalds /**
1563bb176f7dSViresh Kumar  *	cpufreq_out_of_sync - If actual and saved CPU frequency differs, we're
1564bb176f7dSViresh Kumar  *	in deep trouble.
1565a1e1dc41SViresh Kumar  *	@policy: policy managing CPUs
15661da177e4SLinus Torvalds  *	@new_freq: CPU frequency the CPU actually runs at
15671da177e4SLinus Torvalds  *
156829464f28SDave Jones  *	We adjust to current frequency first, and need to clean up later.
156929464f28SDave Jones  *	So either call to cpufreq_update_policy() or schedule handle_update()).
15701da177e4SLinus Torvalds  */
1571a1e1dc41SViresh Kumar static void cpufreq_out_of_sync(struct cpufreq_policy *policy,
1572e08f5f5bSGautham R Shenoy 				unsigned int new_freq)
15731da177e4SLinus Torvalds {
15741da177e4SLinus Torvalds 	struct cpufreq_freqs freqs;
1575b43a7ffbSViresh Kumar 
1576e837f9b5SJoe Perches 	pr_debug("Warning: CPU frequency out of sync: cpufreq and timing core thinks of %u, is %u kHz\n",
1577a1e1dc41SViresh Kumar 		 policy->cur, new_freq);
15781da177e4SLinus Torvalds 
1579a1e1dc41SViresh Kumar 	freqs.old = policy->cur;
15801da177e4SLinus Torvalds 	freqs.new = new_freq;
1581b43a7ffbSViresh Kumar 
15828fec051eSViresh Kumar 	cpufreq_freq_transition_begin(policy, &freqs);
15838fec051eSViresh Kumar 	cpufreq_freq_transition_end(policy, &freqs, 0);
15841da177e4SLinus Torvalds }
15851da177e4SLinus Torvalds 
15861da177e4SLinus Torvalds /**
15874ab70df4SDhaval Giani  * cpufreq_quick_get - get the CPU frequency (in kHz) from policy->cur
158895235ca2SVenkatesh Pallipadi  * @cpu: CPU number
158995235ca2SVenkatesh Pallipadi  *
159095235ca2SVenkatesh Pallipadi  * This is the last known freq, without actually getting it from the driver.
159195235ca2SVenkatesh Pallipadi  * Return value will be same as what is shown in scaling_cur_freq in sysfs.
159295235ca2SVenkatesh Pallipadi  */
159395235ca2SVenkatesh Pallipadi unsigned int cpufreq_quick_get(unsigned int cpu)
159495235ca2SVenkatesh Pallipadi {
15959e21ba8bSDirk Brandewie 	struct cpufreq_policy *policy;
1596e08f5f5bSGautham R Shenoy 	unsigned int ret_freq = 0;
159795235ca2SVenkatesh Pallipadi 
15981c3d85ddSRafael J. Wysocki 	if (cpufreq_driver && cpufreq_driver->setpolicy && cpufreq_driver->get)
15991c3d85ddSRafael J. Wysocki 		return cpufreq_driver->get(cpu);
16009e21ba8bSDirk Brandewie 
16019e21ba8bSDirk Brandewie 	policy = cpufreq_cpu_get(cpu);
160295235ca2SVenkatesh Pallipadi 	if (policy) {
1603e08f5f5bSGautham R Shenoy 		ret_freq = policy->cur;
160495235ca2SVenkatesh Pallipadi 		cpufreq_cpu_put(policy);
160595235ca2SVenkatesh Pallipadi 	}
160695235ca2SVenkatesh Pallipadi 
16074d34a67dSDave Jones 	return ret_freq;
160895235ca2SVenkatesh Pallipadi }
160995235ca2SVenkatesh Pallipadi EXPORT_SYMBOL(cpufreq_quick_get);
161095235ca2SVenkatesh Pallipadi 
16113d737108SJesse Barnes /**
16123d737108SJesse Barnes  * cpufreq_quick_get_max - get the max reported CPU frequency for this CPU
16133d737108SJesse Barnes  * @cpu: CPU number
16143d737108SJesse Barnes  *
16153d737108SJesse Barnes  * Just return the max possible frequency for a given CPU.
16163d737108SJesse Barnes  */
16173d737108SJesse Barnes unsigned int cpufreq_quick_get_max(unsigned int cpu)
16183d737108SJesse Barnes {
16193d737108SJesse Barnes 	struct cpufreq_policy *policy = cpufreq_cpu_get(cpu);
16203d737108SJesse Barnes 	unsigned int ret_freq = 0;
16213d737108SJesse Barnes 
16223d737108SJesse Barnes 	if (policy) {
16233d737108SJesse Barnes 		ret_freq = policy->max;
16243d737108SJesse Barnes 		cpufreq_cpu_put(policy);
16253d737108SJesse Barnes 	}
16263d737108SJesse Barnes 
16273d737108SJesse Barnes 	return ret_freq;
16283d737108SJesse Barnes }
16293d737108SJesse Barnes EXPORT_SYMBOL(cpufreq_quick_get_max);
16303d737108SJesse Barnes 
1631d92d50a4SViresh Kumar static unsigned int __cpufreq_get(struct cpufreq_policy *policy)
16321da177e4SLinus Torvalds {
1633e08f5f5bSGautham R Shenoy 	unsigned int ret_freq = 0;
16341da177e4SLinus Torvalds 
16351c3d85ddSRafael J. Wysocki 	if (!cpufreq_driver->get)
16364d34a67dSDave Jones 		return ret_freq;
16371da177e4SLinus Torvalds 
1638d92d50a4SViresh Kumar 	ret_freq = cpufreq_driver->get(policy->cpu);
16391da177e4SLinus Torvalds 
1640e08f5f5bSGautham R Shenoy 	if (ret_freq && policy->cur &&
16411c3d85ddSRafael J. Wysocki 		!(cpufreq_driver->flags & CPUFREQ_CONST_LOOPS)) {
1642e08f5f5bSGautham R Shenoy 		/* verify no discrepancy between actual and
1643e08f5f5bSGautham R Shenoy 					saved value exists */
1644e08f5f5bSGautham R Shenoy 		if (unlikely(ret_freq != policy->cur)) {
1645a1e1dc41SViresh Kumar 			cpufreq_out_of_sync(policy, ret_freq);
16461da177e4SLinus Torvalds 			schedule_work(&policy->update);
16471da177e4SLinus Torvalds 		}
16481da177e4SLinus Torvalds 	}
16491da177e4SLinus Torvalds 
16504d34a67dSDave Jones 	return ret_freq;
16515a01f2e8SVenkatesh Pallipadi }
16521da177e4SLinus Torvalds 
16535a01f2e8SVenkatesh Pallipadi /**
16545a01f2e8SVenkatesh Pallipadi  * cpufreq_get - get the current CPU frequency (in kHz)
16555a01f2e8SVenkatesh Pallipadi  * @cpu: CPU number
16565a01f2e8SVenkatesh Pallipadi  *
16575a01f2e8SVenkatesh Pallipadi  * Get the CPU current (static) CPU frequency
16585a01f2e8SVenkatesh Pallipadi  */
16595a01f2e8SVenkatesh Pallipadi unsigned int cpufreq_get(unsigned int cpu)
16605a01f2e8SVenkatesh Pallipadi {
1661999976e0SAaron Plattner 	struct cpufreq_policy *policy = cpufreq_cpu_get(cpu);
16625a01f2e8SVenkatesh Pallipadi 	unsigned int ret_freq = 0;
16635a01f2e8SVenkatesh Pallipadi 
1664999976e0SAaron Plattner 	if (policy) {
1665ad7722daSviresh kumar 		down_read(&policy->rwsem);
1666d92d50a4SViresh Kumar 		ret_freq = __cpufreq_get(policy);
1667ad7722daSviresh kumar 		up_read(&policy->rwsem);
1668999976e0SAaron Plattner 
1669999976e0SAaron Plattner 		cpufreq_cpu_put(policy);
1670999976e0SAaron Plattner 	}
16716eed9404SViresh Kumar 
16724d34a67dSDave Jones 	return ret_freq;
16731da177e4SLinus Torvalds }
16741da177e4SLinus Torvalds EXPORT_SYMBOL(cpufreq_get);
16751da177e4SLinus Torvalds 
16768a25a2fdSKay Sievers static struct subsys_interface cpufreq_interface = {
16778a25a2fdSKay Sievers 	.name		= "cpufreq",
16788a25a2fdSKay Sievers 	.subsys		= &cpu_subsys,
16798a25a2fdSKay Sievers 	.add_dev	= cpufreq_add_dev,
16808a25a2fdSKay Sievers 	.remove_dev	= cpufreq_remove_dev,
1681e00e56dfSRafael J. Wysocki };
1682e00e56dfSRafael J. Wysocki 
1683e28867eaSViresh Kumar /*
1684e28867eaSViresh Kumar  * In case platform wants some specific frequency to be configured
1685e28867eaSViresh Kumar  * during suspend..
168642d4dc3fSBenjamin Herrenschmidt  */
1687e28867eaSViresh Kumar int cpufreq_generic_suspend(struct cpufreq_policy *policy)
168842d4dc3fSBenjamin Herrenschmidt {
1689e28867eaSViresh Kumar 	int ret;
16904bc5d341SDave Jones 
1691e28867eaSViresh Kumar 	if (!policy->suspend_freq) {
1692e28867eaSViresh Kumar 		pr_err("%s: suspend_freq can't be zero\n", __func__);
1693e28867eaSViresh Kumar 		return -EINVAL;
169442d4dc3fSBenjamin Herrenschmidt 	}
169542d4dc3fSBenjamin Herrenschmidt 
1696e28867eaSViresh Kumar 	pr_debug("%s: Setting suspend-freq: %u\n", __func__,
1697e28867eaSViresh Kumar 			policy->suspend_freq);
1698e28867eaSViresh Kumar 
1699e28867eaSViresh Kumar 	ret = __cpufreq_driver_target(policy, policy->suspend_freq,
1700e28867eaSViresh Kumar 			CPUFREQ_RELATION_H);
1701e28867eaSViresh Kumar 	if (ret)
1702e28867eaSViresh Kumar 		pr_err("%s: unable to set suspend-freq: %u. err: %d\n",
1703e28867eaSViresh Kumar 				__func__, policy->suspend_freq, ret);
1704e28867eaSViresh Kumar 
1705c9060494SDave Jones 	return ret;
170642d4dc3fSBenjamin Herrenschmidt }
1707e28867eaSViresh Kumar EXPORT_SYMBOL(cpufreq_generic_suspend);
170842d4dc3fSBenjamin Herrenschmidt 
170942d4dc3fSBenjamin Herrenschmidt /**
17102f0aea93SViresh Kumar  * cpufreq_suspend() - Suspend CPUFreq governors
17111da177e4SLinus Torvalds  *
17122f0aea93SViresh Kumar  * Called during system wide Suspend/Hibernate cycles for suspending governors
17132f0aea93SViresh Kumar  * as some platforms can't change frequency after this point in suspend cycle.
17142f0aea93SViresh Kumar  * Because some of the devices (like: i2c, regulators, etc) they use for
17152f0aea93SViresh Kumar  * changing frequency are suspended quickly after this point.
17161da177e4SLinus Torvalds  */
17172f0aea93SViresh Kumar void cpufreq_suspend(void)
17181da177e4SLinus Torvalds {
17193a3e9e06SViresh Kumar 	struct cpufreq_policy *policy;
17201da177e4SLinus Torvalds 
17212f0aea93SViresh Kumar 	if (!cpufreq_driver)
1722e00e56dfSRafael J. Wysocki 		return;
17231da177e4SLinus Torvalds 
17242f0aea93SViresh Kumar 	if (!has_target())
1725b1b12babSViresh Kumar 		goto suspend;
17261da177e4SLinus Torvalds 
17272f0aea93SViresh Kumar 	pr_debug("%s: Suspending Governors\n", __func__);
17282f0aea93SViresh Kumar 
1729*f963735aSViresh Kumar 	for_each_active_policy(policy) {
17302f0aea93SViresh Kumar 		if (__cpufreq_governor(policy, CPUFREQ_GOV_STOP))
17312f0aea93SViresh Kumar 			pr_err("%s: Failed to stop governor for policy: %p\n",
17322f0aea93SViresh Kumar 				__func__, policy);
17332f0aea93SViresh Kumar 		else if (cpufreq_driver->suspend
17342f0aea93SViresh Kumar 		    && cpufreq_driver->suspend(policy))
17352f0aea93SViresh Kumar 			pr_err("%s: Failed to suspend driver: %p\n", __func__,
17362f0aea93SViresh Kumar 				policy);
17371da177e4SLinus Torvalds 	}
1738b1b12babSViresh Kumar 
1739b1b12babSViresh Kumar suspend:
1740b1b12babSViresh Kumar 	cpufreq_suspended = true;
17411da177e4SLinus Torvalds }
17421da177e4SLinus Torvalds 
17431da177e4SLinus Torvalds /**
17442f0aea93SViresh Kumar  * cpufreq_resume() - Resume CPUFreq governors
17451da177e4SLinus Torvalds  *
17462f0aea93SViresh Kumar  * Called during system wide Suspend/Hibernate cycle for resuming governors that
17472f0aea93SViresh Kumar  * are suspended with cpufreq_suspend().
17481da177e4SLinus Torvalds  */
17492f0aea93SViresh Kumar void cpufreq_resume(void)
17501da177e4SLinus Torvalds {
17511da177e4SLinus Torvalds 	struct cpufreq_policy *policy;
17521da177e4SLinus Torvalds 
17532f0aea93SViresh Kumar 	if (!cpufreq_driver)
17541da177e4SLinus Torvalds 		return;
17551da177e4SLinus Torvalds 
17568e30444eSLan Tianyu 	cpufreq_suspended = false;
17578e30444eSLan Tianyu 
17582f0aea93SViresh Kumar 	if (!has_target())
17592f0aea93SViresh Kumar 		return;
17601da177e4SLinus Torvalds 
17612f0aea93SViresh Kumar 	pr_debug("%s: Resuming Governors\n", __func__);
17622f0aea93SViresh Kumar 
1763*f963735aSViresh Kumar 	for_each_active_policy(policy) {
17640c5aa405SViresh Kumar 		if (cpufreq_driver->resume && cpufreq_driver->resume(policy))
17650c5aa405SViresh Kumar 			pr_err("%s: Failed to resume driver: %p\n", __func__,
17660c5aa405SViresh Kumar 				policy);
17670c5aa405SViresh Kumar 		else if (__cpufreq_governor(policy, CPUFREQ_GOV_START)
17682f0aea93SViresh Kumar 		    || __cpufreq_governor(policy, CPUFREQ_GOV_LIMITS))
17692f0aea93SViresh Kumar 			pr_err("%s: Failed to start governor for policy: %p\n",
17702f0aea93SViresh Kumar 				__func__, policy);
1771c75de0acSViresh Kumar 	}
17722f0aea93SViresh Kumar 
17732f0aea93SViresh Kumar 	/*
1774c75de0acSViresh Kumar 	 * schedule call cpufreq_update_policy() for first-online CPU, as that
1775c75de0acSViresh Kumar 	 * wouldn't be hotplugged-out on suspend. It will verify that the
1776c75de0acSViresh Kumar 	 * current freq is in sync with what we believe it to be.
17772f0aea93SViresh Kumar 	 */
1778c75de0acSViresh Kumar 	policy = cpufreq_cpu_get_raw(cpumask_first(cpu_online_mask));
1779c75de0acSViresh Kumar 	if (WARN_ON(!policy))
1780c75de0acSViresh Kumar 		return;
1781c75de0acSViresh Kumar 
17823a3e9e06SViresh Kumar 	schedule_work(&policy->update);
17831da177e4SLinus Torvalds }
17841da177e4SLinus Torvalds 
17859d95046eSBorislav Petkov /**
17869d95046eSBorislav Petkov  *	cpufreq_get_current_driver - return current driver's name
17879d95046eSBorislav Petkov  *
17889d95046eSBorislav Petkov  *	Return the name string of the currently loaded cpufreq driver
17899d95046eSBorislav Petkov  *	or NULL, if none.
17909d95046eSBorislav Petkov  */
17919d95046eSBorislav Petkov const char *cpufreq_get_current_driver(void)
17929d95046eSBorislav Petkov {
17931c3d85ddSRafael J. Wysocki 	if (cpufreq_driver)
17941c3d85ddSRafael J. Wysocki 		return cpufreq_driver->name;
17951c3d85ddSRafael J. Wysocki 
17961c3d85ddSRafael J. Wysocki 	return NULL;
17979d95046eSBorislav Petkov }
17989d95046eSBorislav Petkov EXPORT_SYMBOL_GPL(cpufreq_get_current_driver);
17991da177e4SLinus Torvalds 
180051315cdfSThomas Petazzoni /**
180151315cdfSThomas Petazzoni  *	cpufreq_get_driver_data - return current driver data
180251315cdfSThomas Petazzoni  *
180351315cdfSThomas Petazzoni  *	Return the private data of the currently loaded cpufreq
180451315cdfSThomas Petazzoni  *	driver, or NULL if no cpufreq driver is loaded.
180551315cdfSThomas Petazzoni  */
180651315cdfSThomas Petazzoni void *cpufreq_get_driver_data(void)
180751315cdfSThomas Petazzoni {
180851315cdfSThomas Petazzoni 	if (cpufreq_driver)
180951315cdfSThomas Petazzoni 		return cpufreq_driver->driver_data;
181051315cdfSThomas Petazzoni 
181151315cdfSThomas Petazzoni 	return NULL;
181251315cdfSThomas Petazzoni }
181351315cdfSThomas Petazzoni EXPORT_SYMBOL_GPL(cpufreq_get_driver_data);
181451315cdfSThomas Petazzoni 
18151da177e4SLinus Torvalds /*********************************************************************
18161da177e4SLinus Torvalds  *                     NOTIFIER LISTS INTERFACE                      *
18171da177e4SLinus Torvalds  *********************************************************************/
18181da177e4SLinus Torvalds 
18191da177e4SLinus Torvalds /**
18201da177e4SLinus Torvalds  *	cpufreq_register_notifier - register a driver with cpufreq
18211da177e4SLinus Torvalds  *	@nb: notifier function to register
18221da177e4SLinus Torvalds  *      @list: CPUFREQ_TRANSITION_NOTIFIER or CPUFREQ_POLICY_NOTIFIER
18231da177e4SLinus Torvalds  *
18241da177e4SLinus Torvalds  *	Add a driver to one of two lists: either a list of drivers that
18251da177e4SLinus Torvalds  *      are notified about clock rate changes (once before and once after
18261da177e4SLinus Torvalds  *      the transition), or a list of drivers that are notified about
18271da177e4SLinus Torvalds  *      changes in cpufreq policy.
18281da177e4SLinus Torvalds  *
18291da177e4SLinus Torvalds  *	This function may sleep, and has the same return conditions as
1830e041c683SAlan Stern  *	blocking_notifier_chain_register.
18311da177e4SLinus Torvalds  */
18321da177e4SLinus Torvalds int cpufreq_register_notifier(struct notifier_block *nb, unsigned int list)
18331da177e4SLinus Torvalds {
18341da177e4SLinus Torvalds 	int ret;
18351da177e4SLinus Torvalds 
1836d5aaffa9SDirk Brandewie 	if (cpufreq_disabled())
1837d5aaffa9SDirk Brandewie 		return -EINVAL;
1838d5aaffa9SDirk Brandewie 
183974212ca4SCesar Eduardo Barros 	WARN_ON(!init_cpufreq_transition_notifier_list_called);
184074212ca4SCesar Eduardo Barros 
18411da177e4SLinus Torvalds 	switch (list) {
18421da177e4SLinus Torvalds 	case CPUFREQ_TRANSITION_NOTIFIER:
1843b4dfdbb3SAlan Stern 		ret = srcu_notifier_chain_register(
1844e041c683SAlan Stern 				&cpufreq_transition_notifier_list, nb);
18451da177e4SLinus Torvalds 		break;
18461da177e4SLinus Torvalds 	case CPUFREQ_POLICY_NOTIFIER:
1847e041c683SAlan Stern 		ret = blocking_notifier_chain_register(
1848e041c683SAlan Stern 				&cpufreq_policy_notifier_list, nb);
18491da177e4SLinus Torvalds 		break;
18501da177e4SLinus Torvalds 	default:
18511da177e4SLinus Torvalds 		ret = -EINVAL;
18521da177e4SLinus Torvalds 	}
18531da177e4SLinus Torvalds 
18541da177e4SLinus Torvalds 	return ret;
18551da177e4SLinus Torvalds }
18561da177e4SLinus Torvalds EXPORT_SYMBOL(cpufreq_register_notifier);
18571da177e4SLinus Torvalds 
18581da177e4SLinus Torvalds /**
18591da177e4SLinus Torvalds  *	cpufreq_unregister_notifier - unregister a driver with cpufreq
18601da177e4SLinus Torvalds  *	@nb: notifier block to be unregistered
18611da177e4SLinus Torvalds  *	@list: CPUFREQ_TRANSITION_NOTIFIER or CPUFREQ_POLICY_NOTIFIER
18621da177e4SLinus Torvalds  *
18631da177e4SLinus Torvalds  *	Remove a driver from the CPU frequency notifier list.
18641da177e4SLinus Torvalds  *
18651da177e4SLinus Torvalds  *	This function may sleep, and has the same return conditions as
1866e041c683SAlan Stern  *	blocking_notifier_chain_unregister.
18671da177e4SLinus Torvalds  */
18681da177e4SLinus Torvalds int cpufreq_unregister_notifier(struct notifier_block *nb, unsigned int list)
18691da177e4SLinus Torvalds {
18701da177e4SLinus Torvalds 	int ret;
18711da177e4SLinus Torvalds 
1872d5aaffa9SDirk Brandewie 	if (cpufreq_disabled())
1873d5aaffa9SDirk Brandewie 		return -EINVAL;
1874d5aaffa9SDirk Brandewie 
18751da177e4SLinus Torvalds 	switch (list) {
18761da177e4SLinus Torvalds 	case CPUFREQ_TRANSITION_NOTIFIER:
1877b4dfdbb3SAlan Stern 		ret = srcu_notifier_chain_unregister(
1878e041c683SAlan Stern 				&cpufreq_transition_notifier_list, nb);
18791da177e4SLinus Torvalds 		break;
18801da177e4SLinus Torvalds 	case CPUFREQ_POLICY_NOTIFIER:
1881e041c683SAlan Stern 		ret = blocking_notifier_chain_unregister(
1882e041c683SAlan Stern 				&cpufreq_policy_notifier_list, nb);
18831da177e4SLinus Torvalds 		break;
18841da177e4SLinus Torvalds 	default:
18851da177e4SLinus Torvalds 		ret = -EINVAL;
18861da177e4SLinus Torvalds 	}
18871da177e4SLinus Torvalds 
18881da177e4SLinus Torvalds 	return ret;
18891da177e4SLinus Torvalds }
18901da177e4SLinus Torvalds EXPORT_SYMBOL(cpufreq_unregister_notifier);
18911da177e4SLinus Torvalds 
18921da177e4SLinus Torvalds 
18931da177e4SLinus Torvalds /*********************************************************************
18941da177e4SLinus Torvalds  *                              GOVERNORS                            *
18951da177e4SLinus Torvalds  *********************************************************************/
18961da177e4SLinus Torvalds 
18971c03a2d0SViresh Kumar /* Must set freqs->new to intermediate frequency */
18981c03a2d0SViresh Kumar static int __target_intermediate(struct cpufreq_policy *policy,
18991c03a2d0SViresh Kumar 				 struct cpufreq_freqs *freqs, int index)
19001c03a2d0SViresh Kumar {
19011c03a2d0SViresh Kumar 	int ret;
19021c03a2d0SViresh Kumar 
19031c03a2d0SViresh Kumar 	freqs->new = cpufreq_driver->get_intermediate(policy, index);
19041c03a2d0SViresh Kumar 
19051c03a2d0SViresh Kumar 	/* We don't need to switch to intermediate freq */
19061c03a2d0SViresh Kumar 	if (!freqs->new)
19071c03a2d0SViresh Kumar 		return 0;
19081c03a2d0SViresh Kumar 
19091c03a2d0SViresh Kumar 	pr_debug("%s: cpu: %d, switching to intermediate freq: oldfreq: %u, intermediate freq: %u\n",
19101c03a2d0SViresh Kumar 		 __func__, policy->cpu, freqs->old, freqs->new);
19111c03a2d0SViresh Kumar 
19121c03a2d0SViresh Kumar 	cpufreq_freq_transition_begin(policy, freqs);
19131c03a2d0SViresh Kumar 	ret = cpufreq_driver->target_intermediate(policy, index);
19141c03a2d0SViresh Kumar 	cpufreq_freq_transition_end(policy, freqs, ret);
19151c03a2d0SViresh Kumar 
19161c03a2d0SViresh Kumar 	if (ret)
19171c03a2d0SViresh Kumar 		pr_err("%s: Failed to change to intermediate frequency: %d\n",
19181c03a2d0SViresh Kumar 		       __func__, ret);
19191c03a2d0SViresh Kumar 
19201c03a2d0SViresh Kumar 	return ret;
19211c03a2d0SViresh Kumar }
19221c03a2d0SViresh Kumar 
19238d65775dSViresh Kumar static int __target_index(struct cpufreq_policy *policy,
19248d65775dSViresh Kumar 			  struct cpufreq_frequency_table *freq_table, int index)
19258d65775dSViresh Kumar {
19261c03a2d0SViresh Kumar 	struct cpufreq_freqs freqs = {.old = policy->cur, .flags = 0};
19271c03a2d0SViresh Kumar 	unsigned int intermediate_freq = 0;
19288d65775dSViresh Kumar 	int retval = -EINVAL;
19298d65775dSViresh Kumar 	bool notify;
19308d65775dSViresh Kumar 
19318d65775dSViresh Kumar 	notify = !(cpufreq_driver->flags & CPUFREQ_ASYNC_NOTIFICATION);
19328d65775dSViresh Kumar 	if (notify) {
19331c03a2d0SViresh Kumar 		/* Handle switching to intermediate frequency */
19341c03a2d0SViresh Kumar 		if (cpufreq_driver->get_intermediate) {
19351c03a2d0SViresh Kumar 			retval = __target_intermediate(policy, &freqs, index);
19361c03a2d0SViresh Kumar 			if (retval)
19371c03a2d0SViresh Kumar 				return retval;
19388d65775dSViresh Kumar 
19391c03a2d0SViresh Kumar 			intermediate_freq = freqs.new;
19401c03a2d0SViresh Kumar 			/* Set old freq to intermediate */
19411c03a2d0SViresh Kumar 			if (intermediate_freq)
19421c03a2d0SViresh Kumar 				freqs.old = freqs.new;
19431c03a2d0SViresh Kumar 		}
19441c03a2d0SViresh Kumar 
19451c03a2d0SViresh Kumar 		freqs.new = freq_table[index].frequency;
19468d65775dSViresh Kumar 		pr_debug("%s: cpu: %d, oldfreq: %u, new freq: %u\n",
19478d65775dSViresh Kumar 			 __func__, policy->cpu, freqs.old, freqs.new);
19488d65775dSViresh Kumar 
19498d65775dSViresh Kumar 		cpufreq_freq_transition_begin(policy, &freqs);
19508d65775dSViresh Kumar 	}
19518d65775dSViresh Kumar 
19528d65775dSViresh Kumar 	retval = cpufreq_driver->target_index(policy, index);
19538d65775dSViresh Kumar 	if (retval)
19548d65775dSViresh Kumar 		pr_err("%s: Failed to change cpu frequency: %d\n", __func__,
19558d65775dSViresh Kumar 		       retval);
19568d65775dSViresh Kumar 
19571c03a2d0SViresh Kumar 	if (notify) {
19588d65775dSViresh Kumar 		cpufreq_freq_transition_end(policy, &freqs, retval);
19598d65775dSViresh Kumar 
19601c03a2d0SViresh Kumar 		/*
19611c03a2d0SViresh Kumar 		 * Failed after setting to intermediate freq? Driver should have
19621c03a2d0SViresh Kumar 		 * reverted back to initial frequency and so should we. Check
19631c03a2d0SViresh Kumar 		 * here for intermediate_freq instead of get_intermediate, in
19641c03a2d0SViresh Kumar 		 * case we have't switched to intermediate freq at all.
19651c03a2d0SViresh Kumar 		 */
19661c03a2d0SViresh Kumar 		if (unlikely(retval && intermediate_freq)) {
19671c03a2d0SViresh Kumar 			freqs.old = intermediate_freq;
19681c03a2d0SViresh Kumar 			freqs.new = policy->restore_freq;
19691c03a2d0SViresh Kumar 			cpufreq_freq_transition_begin(policy, &freqs);
19701c03a2d0SViresh Kumar 			cpufreq_freq_transition_end(policy, &freqs, 0);
19711c03a2d0SViresh Kumar 		}
19721c03a2d0SViresh Kumar 	}
19731c03a2d0SViresh Kumar 
19748d65775dSViresh Kumar 	return retval;
19758d65775dSViresh Kumar }
19768d65775dSViresh Kumar 
19771da177e4SLinus Torvalds int __cpufreq_driver_target(struct cpufreq_policy *policy,
19781da177e4SLinus Torvalds 			    unsigned int target_freq,
19791da177e4SLinus Torvalds 			    unsigned int relation)
19801da177e4SLinus Torvalds {
19817249924eSViresh Kumar 	unsigned int old_target_freq = target_freq;
19828d65775dSViresh Kumar 	int retval = -EINVAL;
1983c32b6b8eSAshok Raj 
1984a7b422cdSKonrad Rzeszutek Wilk 	if (cpufreq_disabled())
1985a7b422cdSKonrad Rzeszutek Wilk 		return -ENODEV;
1986a7b422cdSKonrad Rzeszutek Wilk 
19877249924eSViresh Kumar 	/* Make sure that target_freq is within supported range */
19887249924eSViresh Kumar 	if (target_freq > policy->max)
19897249924eSViresh Kumar 		target_freq = policy->max;
19907249924eSViresh Kumar 	if (target_freq < policy->min)
19917249924eSViresh Kumar 		target_freq = policy->min;
19927249924eSViresh Kumar 
19937249924eSViresh Kumar 	pr_debug("target for CPU %u: %u kHz, relation %u, requested %u kHz\n",
19947249924eSViresh Kumar 		 policy->cpu, target_freq, relation, old_target_freq);
19955a1c0228SViresh Kumar 
19969c0ebcf7SViresh Kumar 	/*
19979c0ebcf7SViresh Kumar 	 * This might look like a redundant call as we are checking it again
19989c0ebcf7SViresh Kumar 	 * after finding index. But it is left intentionally for cases where
19999c0ebcf7SViresh Kumar 	 * exactly same freq is called again and so we can save on few function
20009c0ebcf7SViresh Kumar 	 * calls.
20019c0ebcf7SViresh Kumar 	 */
20025a1c0228SViresh Kumar 	if (target_freq == policy->cur)
20035a1c0228SViresh Kumar 		return 0;
20045a1c0228SViresh Kumar 
20051c03a2d0SViresh Kumar 	/* Save last value to restore later on errors */
20061c03a2d0SViresh Kumar 	policy->restore_freq = policy->cur;
20071c03a2d0SViresh Kumar 
20081c3d85ddSRafael J. Wysocki 	if (cpufreq_driver->target)
20091c3d85ddSRafael J. Wysocki 		retval = cpufreq_driver->target(policy, target_freq, relation);
20109c0ebcf7SViresh Kumar 	else if (cpufreq_driver->target_index) {
20119c0ebcf7SViresh Kumar 		struct cpufreq_frequency_table *freq_table;
20129c0ebcf7SViresh Kumar 		int index;
201390d45d17SAshok Raj 
20149c0ebcf7SViresh Kumar 		freq_table = cpufreq_frequency_get_table(policy->cpu);
20159c0ebcf7SViresh Kumar 		if (unlikely(!freq_table)) {
20169c0ebcf7SViresh Kumar 			pr_err("%s: Unable to find freq_table\n", __func__);
20179c0ebcf7SViresh Kumar 			goto out;
20189c0ebcf7SViresh Kumar 		}
20199c0ebcf7SViresh Kumar 
20209c0ebcf7SViresh Kumar 		retval = cpufreq_frequency_table_target(policy, freq_table,
20219c0ebcf7SViresh Kumar 				target_freq, relation, &index);
20229c0ebcf7SViresh Kumar 		if (unlikely(retval)) {
20239c0ebcf7SViresh Kumar 			pr_err("%s: Unable to find matching freq\n", __func__);
20249c0ebcf7SViresh Kumar 			goto out;
20259c0ebcf7SViresh Kumar 		}
20269c0ebcf7SViresh Kumar 
2027d4019f0aSViresh Kumar 		if (freq_table[index].frequency == policy->cur) {
20289c0ebcf7SViresh Kumar 			retval = 0;
2029d4019f0aSViresh Kumar 			goto out;
2030d4019f0aSViresh Kumar 		}
2031d4019f0aSViresh Kumar 
20328d65775dSViresh Kumar 		retval = __target_index(policy, freq_table, index);
20339c0ebcf7SViresh Kumar 	}
20349c0ebcf7SViresh Kumar 
20359c0ebcf7SViresh Kumar out:
20361da177e4SLinus Torvalds 	return retval;
20371da177e4SLinus Torvalds }
20381da177e4SLinus Torvalds EXPORT_SYMBOL_GPL(__cpufreq_driver_target);
20391da177e4SLinus Torvalds 
20401da177e4SLinus Torvalds int cpufreq_driver_target(struct cpufreq_policy *policy,
20411da177e4SLinus Torvalds 			  unsigned int target_freq,
20421da177e4SLinus Torvalds 			  unsigned int relation)
20431da177e4SLinus Torvalds {
2044f1829e4aSJulia Lawall 	int ret = -EINVAL;
20451da177e4SLinus Torvalds 
2046ad7722daSviresh kumar 	down_write(&policy->rwsem);
20471da177e4SLinus Torvalds 
20481da177e4SLinus Torvalds 	ret = __cpufreq_driver_target(policy, target_freq, relation);
20491da177e4SLinus Torvalds 
2050ad7722daSviresh kumar 	up_write(&policy->rwsem);
20511da177e4SLinus Torvalds 
20521da177e4SLinus Torvalds 	return ret;
20531da177e4SLinus Torvalds }
20541da177e4SLinus Torvalds EXPORT_SYMBOL_GPL(cpufreq_driver_target);
20551da177e4SLinus Torvalds 
2056e08f5f5bSGautham R Shenoy static int __cpufreq_governor(struct cpufreq_policy *policy,
2057e08f5f5bSGautham R Shenoy 					unsigned int event)
20581da177e4SLinus Torvalds {
2059cc993cabSDave Jones 	int ret;
20606afde10cSThomas Renninger 
20616afde10cSThomas Renninger 	/* Only must be defined when default governor is known to have latency
20626afde10cSThomas Renninger 	   restrictions, like e.g. conservative or ondemand.
20636afde10cSThomas Renninger 	   That this is the case is already ensured in Kconfig
20646afde10cSThomas Renninger 	*/
20656afde10cSThomas Renninger #ifdef CONFIG_CPU_FREQ_GOV_PERFORMANCE
20666afde10cSThomas Renninger 	struct cpufreq_governor *gov = &cpufreq_gov_performance;
20676afde10cSThomas Renninger #else
20686afde10cSThomas Renninger 	struct cpufreq_governor *gov = NULL;
20696afde10cSThomas Renninger #endif
20701c256245SThomas Renninger 
20712f0aea93SViresh Kumar 	/* Don't start any governor operations if we are entering suspend */
20722f0aea93SViresh Kumar 	if (cpufreq_suspended)
20732f0aea93SViresh Kumar 		return 0;
2074cb57720bSEthan Zhao 	/*
2075cb57720bSEthan Zhao 	 * Governor might not be initiated here if ACPI _PPC changed
2076cb57720bSEthan Zhao 	 * notification happened, so check it.
2077cb57720bSEthan Zhao 	 */
2078cb57720bSEthan Zhao 	if (!policy->governor)
2079cb57720bSEthan Zhao 		return -EINVAL;
20802f0aea93SViresh Kumar 
20811c256245SThomas Renninger 	if (policy->governor->max_transition_latency &&
20821c256245SThomas Renninger 	    policy->cpuinfo.transition_latency >
20831c256245SThomas Renninger 	    policy->governor->max_transition_latency) {
20846afde10cSThomas Renninger 		if (!gov)
20856afde10cSThomas Renninger 			return -EINVAL;
20866afde10cSThomas Renninger 		else {
2087e837f9b5SJoe Perches 			pr_warn("%s governor failed, too long transition latency of HW, fallback to %s governor\n",
2088e837f9b5SJoe Perches 				policy->governor->name, gov->name);
20891c256245SThomas Renninger 			policy->governor = gov;
20901c256245SThomas Renninger 		}
20916afde10cSThomas Renninger 	}
20921da177e4SLinus Torvalds 
2093fe492f3fSViresh Kumar 	if (event == CPUFREQ_GOV_POLICY_INIT)
20941da177e4SLinus Torvalds 		if (!try_module_get(policy->governor->owner))
20951da177e4SLinus Torvalds 			return -EINVAL;
20961da177e4SLinus Torvalds 
20972d06d8c4SDominik Brodowski 	pr_debug("__cpufreq_governor for CPU %u, event %u\n",
2098e08f5f5bSGautham R Shenoy 		 policy->cpu, event);
209995731ebbSXiaoguang Chen 
210095731ebbSXiaoguang Chen 	mutex_lock(&cpufreq_governor_lock);
210156d07db2SSrivatsa S. Bhat 	if ((policy->governor_enabled && event == CPUFREQ_GOV_START)
2102f73d3933SViresh Kumar 	    || (!policy->governor_enabled
2103f73d3933SViresh Kumar 	    && (event == CPUFREQ_GOV_LIMITS || event == CPUFREQ_GOV_STOP))) {
210495731ebbSXiaoguang Chen 		mutex_unlock(&cpufreq_governor_lock);
210595731ebbSXiaoguang Chen 		return -EBUSY;
210695731ebbSXiaoguang Chen 	}
210795731ebbSXiaoguang Chen 
210895731ebbSXiaoguang Chen 	if (event == CPUFREQ_GOV_STOP)
210995731ebbSXiaoguang Chen 		policy->governor_enabled = false;
211095731ebbSXiaoguang Chen 	else if (event == CPUFREQ_GOV_START)
211195731ebbSXiaoguang Chen 		policy->governor_enabled = true;
211295731ebbSXiaoguang Chen 
211395731ebbSXiaoguang Chen 	mutex_unlock(&cpufreq_governor_lock);
211495731ebbSXiaoguang Chen 
21151da177e4SLinus Torvalds 	ret = policy->governor->governor(policy, event);
21161da177e4SLinus Torvalds 
21174d5dcc42SViresh Kumar 	if (!ret) {
21184d5dcc42SViresh Kumar 		if (event == CPUFREQ_GOV_POLICY_INIT)
21198e53695fSViresh Kumar 			policy->governor->initialized++;
21204d5dcc42SViresh Kumar 		else if (event == CPUFREQ_GOV_POLICY_EXIT)
21218e53695fSViresh Kumar 			policy->governor->initialized--;
212295731ebbSXiaoguang Chen 	} else {
212395731ebbSXiaoguang Chen 		/* Restore original values */
212495731ebbSXiaoguang Chen 		mutex_lock(&cpufreq_governor_lock);
212595731ebbSXiaoguang Chen 		if (event == CPUFREQ_GOV_STOP)
212695731ebbSXiaoguang Chen 			policy->governor_enabled = true;
212795731ebbSXiaoguang Chen 		else if (event == CPUFREQ_GOV_START)
212895731ebbSXiaoguang Chen 			policy->governor_enabled = false;
212995731ebbSXiaoguang Chen 		mutex_unlock(&cpufreq_governor_lock);
21304d5dcc42SViresh Kumar 	}
2131b394058fSViresh Kumar 
2132fe492f3fSViresh Kumar 	if (((event == CPUFREQ_GOV_POLICY_INIT) && ret) ||
2133fe492f3fSViresh Kumar 			((event == CPUFREQ_GOV_POLICY_EXIT) && !ret))
21341da177e4SLinus Torvalds 		module_put(policy->governor->owner);
21351da177e4SLinus Torvalds 
21361da177e4SLinus Torvalds 	return ret;
21371da177e4SLinus Torvalds }
21381da177e4SLinus Torvalds 
21391da177e4SLinus Torvalds int cpufreq_register_governor(struct cpufreq_governor *governor)
21401da177e4SLinus Torvalds {
21413bcb09a3SJeremy Fitzhardinge 	int err;
21421da177e4SLinus Torvalds 
21431da177e4SLinus Torvalds 	if (!governor)
21441da177e4SLinus Torvalds 		return -EINVAL;
21451da177e4SLinus Torvalds 
2146a7b422cdSKonrad Rzeszutek Wilk 	if (cpufreq_disabled())
2147a7b422cdSKonrad Rzeszutek Wilk 		return -ENODEV;
2148a7b422cdSKonrad Rzeszutek Wilk 
21493fc54d37Sakpm@osdl.org 	mutex_lock(&cpufreq_governor_mutex);
21501da177e4SLinus Torvalds 
2151b394058fSViresh Kumar 	governor->initialized = 0;
21523bcb09a3SJeremy Fitzhardinge 	err = -EBUSY;
215342f91fa1SViresh Kumar 	if (!find_governor(governor->name)) {
21543bcb09a3SJeremy Fitzhardinge 		err = 0;
21551da177e4SLinus Torvalds 		list_add(&governor->governor_list, &cpufreq_governor_list);
21563bcb09a3SJeremy Fitzhardinge 	}
21571da177e4SLinus Torvalds 
21583fc54d37Sakpm@osdl.org 	mutex_unlock(&cpufreq_governor_mutex);
21593bcb09a3SJeremy Fitzhardinge 	return err;
21601da177e4SLinus Torvalds }
21611da177e4SLinus Torvalds EXPORT_SYMBOL_GPL(cpufreq_register_governor);
21621da177e4SLinus Torvalds 
21631da177e4SLinus Torvalds void cpufreq_unregister_governor(struct cpufreq_governor *governor)
21641da177e4SLinus Torvalds {
216590e41bacSPrarit Bhargava 	int cpu;
216690e41bacSPrarit Bhargava 
21671da177e4SLinus Torvalds 	if (!governor)
21681da177e4SLinus Torvalds 		return;
21691da177e4SLinus Torvalds 
2170a7b422cdSKonrad Rzeszutek Wilk 	if (cpufreq_disabled())
2171a7b422cdSKonrad Rzeszutek Wilk 		return;
2172a7b422cdSKonrad Rzeszutek Wilk 
217390e41bacSPrarit Bhargava 	for_each_present_cpu(cpu) {
217490e41bacSPrarit Bhargava 		if (cpu_online(cpu))
217590e41bacSPrarit Bhargava 			continue;
217690e41bacSPrarit Bhargava 		if (!strcmp(per_cpu(cpufreq_cpu_governor, cpu), governor->name))
217790e41bacSPrarit Bhargava 			strcpy(per_cpu(cpufreq_cpu_governor, cpu), "\0");
217890e41bacSPrarit Bhargava 	}
217990e41bacSPrarit Bhargava 
21803fc54d37Sakpm@osdl.org 	mutex_lock(&cpufreq_governor_mutex);
21811da177e4SLinus Torvalds 	list_del(&governor->governor_list);
21823fc54d37Sakpm@osdl.org 	mutex_unlock(&cpufreq_governor_mutex);
21831da177e4SLinus Torvalds 	return;
21841da177e4SLinus Torvalds }
21851da177e4SLinus Torvalds EXPORT_SYMBOL_GPL(cpufreq_unregister_governor);
21861da177e4SLinus Torvalds 
21871da177e4SLinus Torvalds 
21881da177e4SLinus Torvalds /*********************************************************************
21891da177e4SLinus Torvalds  *                          POLICY INTERFACE                         *
21901da177e4SLinus Torvalds  *********************************************************************/
21911da177e4SLinus Torvalds 
21921da177e4SLinus Torvalds /**
21931da177e4SLinus Torvalds  * cpufreq_get_policy - get the current cpufreq_policy
219429464f28SDave Jones  * @policy: struct cpufreq_policy into which the current cpufreq_policy
219529464f28SDave Jones  *	is written
21961da177e4SLinus Torvalds  *
21971da177e4SLinus Torvalds  * Reads the current cpufreq policy.
21981da177e4SLinus Torvalds  */
21991da177e4SLinus Torvalds int cpufreq_get_policy(struct cpufreq_policy *policy, unsigned int cpu)
22001da177e4SLinus Torvalds {
22011da177e4SLinus Torvalds 	struct cpufreq_policy *cpu_policy;
22021da177e4SLinus Torvalds 	if (!policy)
22031da177e4SLinus Torvalds 		return -EINVAL;
22041da177e4SLinus Torvalds 
22051da177e4SLinus Torvalds 	cpu_policy = cpufreq_cpu_get(cpu);
22061da177e4SLinus Torvalds 	if (!cpu_policy)
22071da177e4SLinus Torvalds 		return -EINVAL;
22081da177e4SLinus Torvalds 
2209d5b73cd8SViresh Kumar 	memcpy(policy, cpu_policy, sizeof(*policy));
22101da177e4SLinus Torvalds 
22111da177e4SLinus Torvalds 	cpufreq_cpu_put(cpu_policy);
22121da177e4SLinus Torvalds 	return 0;
22131da177e4SLinus Torvalds }
22141da177e4SLinus Torvalds EXPORT_SYMBOL(cpufreq_get_policy);
22151da177e4SLinus Torvalds 
2216153d7f3fSArjan van de Ven /*
2217037ce839SViresh Kumar  * policy : current policy.
2218037ce839SViresh Kumar  * new_policy: policy to be set.
2219153d7f3fSArjan van de Ven  */
2220037ce839SViresh Kumar static int cpufreq_set_policy(struct cpufreq_policy *policy,
22213a3e9e06SViresh Kumar 				struct cpufreq_policy *new_policy)
22221da177e4SLinus Torvalds {
2223d9a789c7SRafael J. Wysocki 	struct cpufreq_governor *old_gov;
2224d9a789c7SRafael J. Wysocki 	int ret;
22251da177e4SLinus Torvalds 
2226e837f9b5SJoe Perches 	pr_debug("setting new policy for CPU %u: %u - %u kHz\n",
2227e837f9b5SJoe Perches 		 new_policy->cpu, new_policy->min, new_policy->max);
22281da177e4SLinus Torvalds 
2229d5b73cd8SViresh Kumar 	memcpy(&new_policy->cpuinfo, &policy->cpuinfo, sizeof(policy->cpuinfo));
22301da177e4SLinus Torvalds 
2231d9a789c7SRafael J. Wysocki 	if (new_policy->min > policy->max || new_policy->max < policy->min)
2232d9a789c7SRafael J. Wysocki 		return -EINVAL;
22339c9a43edSMattia Dongili 
22341da177e4SLinus Torvalds 	/* verify the cpu speed can be set within this limit */
22353a3e9e06SViresh Kumar 	ret = cpufreq_driver->verify(new_policy);
22361da177e4SLinus Torvalds 	if (ret)
2237d9a789c7SRafael J. Wysocki 		return ret;
22381da177e4SLinus Torvalds 
22391da177e4SLinus Torvalds 	/* adjust if necessary - all reasons */
2240e041c683SAlan Stern 	blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
22413a3e9e06SViresh Kumar 			CPUFREQ_ADJUST, new_policy);
22421da177e4SLinus Torvalds 
22431da177e4SLinus Torvalds 	/* adjust if necessary - hardware incompatibility*/
2244e041c683SAlan Stern 	blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
22453a3e9e06SViresh Kumar 			CPUFREQ_INCOMPATIBLE, new_policy);
22461da177e4SLinus Torvalds 
2247bb176f7dSViresh Kumar 	/*
2248bb176f7dSViresh Kumar 	 * verify the cpu speed can be set within this limit, which might be
2249bb176f7dSViresh Kumar 	 * different to the first one
2250bb176f7dSViresh Kumar 	 */
22513a3e9e06SViresh Kumar 	ret = cpufreq_driver->verify(new_policy);
2252e041c683SAlan Stern 	if (ret)
2253d9a789c7SRafael J. Wysocki 		return ret;
22541da177e4SLinus Torvalds 
22551da177e4SLinus Torvalds 	/* notification of the new policy */
2256e041c683SAlan Stern 	blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
22573a3e9e06SViresh Kumar 			CPUFREQ_NOTIFY, new_policy);
22581da177e4SLinus Torvalds 
22593a3e9e06SViresh Kumar 	policy->min = new_policy->min;
22603a3e9e06SViresh Kumar 	policy->max = new_policy->max;
22611da177e4SLinus Torvalds 
22622d06d8c4SDominik Brodowski 	pr_debug("new min and max freqs are %u - %u kHz\n",
22633a3e9e06SViresh Kumar 		 policy->min, policy->max);
22641da177e4SLinus Torvalds 
22651c3d85ddSRafael J. Wysocki 	if (cpufreq_driver->setpolicy) {
22663a3e9e06SViresh Kumar 		policy->policy = new_policy->policy;
22672d06d8c4SDominik Brodowski 		pr_debug("setting range\n");
2268d9a789c7SRafael J. Wysocki 		return cpufreq_driver->setpolicy(new_policy);
2269d9a789c7SRafael J. Wysocki 	}
2270d9a789c7SRafael J. Wysocki 
2271d9a789c7SRafael J. Wysocki 	if (new_policy->governor == policy->governor)
2272d9a789c7SRafael J. Wysocki 		goto out;
22731da177e4SLinus Torvalds 
22742d06d8c4SDominik Brodowski 	pr_debug("governor switch\n");
22751da177e4SLinus Torvalds 
2276d9a789c7SRafael J. Wysocki 	/* save old, working values */
2277d9a789c7SRafael J. Wysocki 	old_gov = policy->governor;
22781da177e4SLinus Torvalds 	/* end old governor */
2279d9a789c7SRafael J. Wysocki 	if (old_gov) {
22803a3e9e06SViresh Kumar 		__cpufreq_governor(policy, CPUFREQ_GOV_STOP);
2281ad7722daSviresh kumar 		up_write(&policy->rwsem);
2282d9a789c7SRafael J. Wysocki 		__cpufreq_governor(policy, CPUFREQ_GOV_POLICY_EXIT);
2283ad7722daSviresh kumar 		down_write(&policy->rwsem);
22847bd353a9SViresh Kumar 	}
22851da177e4SLinus Torvalds 
22861da177e4SLinus Torvalds 	/* start new governor */
22873a3e9e06SViresh Kumar 	policy->governor = new_policy->governor;
22883a3e9e06SViresh Kumar 	if (!__cpufreq_governor(policy, CPUFREQ_GOV_POLICY_INIT)) {
2289d9a789c7SRafael J. Wysocki 		if (!__cpufreq_governor(policy, CPUFREQ_GOV_START))
2290d9a789c7SRafael J. Wysocki 			goto out;
2291d9a789c7SRafael J. Wysocki 
2292ad7722daSviresh kumar 		up_write(&policy->rwsem);
2293d9a789c7SRafael J. Wysocki 		__cpufreq_governor(policy, CPUFREQ_GOV_POLICY_EXIT);
2294ad7722daSviresh kumar 		down_write(&policy->rwsem);
2295955ef483SViresh Kumar 	}
22967bd353a9SViresh Kumar 
22971da177e4SLinus Torvalds 	/* new governor failed, so re-start old one */
2298d9a789c7SRafael J. Wysocki 	pr_debug("starting governor %s failed\n", policy->governor->name);
22991da177e4SLinus Torvalds 	if (old_gov) {
23003a3e9e06SViresh Kumar 		policy->governor = old_gov;
2301d9a789c7SRafael J. Wysocki 		__cpufreq_governor(policy, CPUFREQ_GOV_POLICY_INIT);
2302d9a789c7SRafael J. Wysocki 		__cpufreq_governor(policy, CPUFREQ_GOV_START);
23031da177e4SLinus Torvalds 	}
23041da177e4SLinus Torvalds 
2305d9a789c7SRafael J. Wysocki 	return -EINVAL;
2306d9a789c7SRafael J. Wysocki 
2307d9a789c7SRafael J. Wysocki  out:
2308d9a789c7SRafael J. Wysocki 	pr_debug("governor: change or update limits\n");
2309d9a789c7SRafael J. Wysocki 	return __cpufreq_governor(policy, CPUFREQ_GOV_LIMITS);
23101da177e4SLinus Torvalds }
23111da177e4SLinus Torvalds 
23121da177e4SLinus Torvalds /**
23131da177e4SLinus Torvalds  *	cpufreq_update_policy - re-evaluate an existing cpufreq policy
23141da177e4SLinus Torvalds  *	@cpu: CPU which shall be re-evaluated
23151da177e4SLinus Torvalds  *
231625985edcSLucas De Marchi  *	Useful for policy notifiers which have different necessities
23171da177e4SLinus Torvalds  *	at different times.
23181da177e4SLinus Torvalds  */
23191da177e4SLinus Torvalds int cpufreq_update_policy(unsigned int cpu)
23201da177e4SLinus Torvalds {
23213a3e9e06SViresh Kumar 	struct cpufreq_policy *policy = cpufreq_cpu_get(cpu);
23223a3e9e06SViresh Kumar 	struct cpufreq_policy new_policy;
2323f1829e4aSJulia Lawall 	int ret;
23241da177e4SLinus Torvalds 
2325fefa8ff8SAaron Plattner 	if (!policy)
2326fefa8ff8SAaron Plattner 		return -ENODEV;
23271da177e4SLinus Torvalds 
2328ad7722daSviresh kumar 	down_write(&policy->rwsem);
23291da177e4SLinus Torvalds 
23302d06d8c4SDominik Brodowski 	pr_debug("updating policy for CPU %u\n", cpu);
2331d5b73cd8SViresh Kumar 	memcpy(&new_policy, policy, sizeof(*policy));
23323a3e9e06SViresh Kumar 	new_policy.min = policy->user_policy.min;
23333a3e9e06SViresh Kumar 	new_policy.max = policy->user_policy.max;
23343a3e9e06SViresh Kumar 	new_policy.policy = policy->user_policy.policy;
23353a3e9e06SViresh Kumar 	new_policy.governor = policy->user_policy.governor;
23361da177e4SLinus Torvalds 
2337bb176f7dSViresh Kumar 	/*
2338bb176f7dSViresh Kumar 	 * BIOS might change freq behind our back
2339bb176f7dSViresh Kumar 	 * -> ask driver for current freq and notify governors about a change
2340bb176f7dSViresh Kumar 	 */
23412ed99e39SRafael J. Wysocki 	if (cpufreq_driver->get && !cpufreq_driver->setpolicy) {
23423a3e9e06SViresh Kumar 		new_policy.cur = cpufreq_driver->get(cpu);
2343bd0fa9bbSViresh Kumar 		if (WARN_ON(!new_policy.cur)) {
2344bd0fa9bbSViresh Kumar 			ret = -EIO;
2345fefa8ff8SAaron Plattner 			goto unlock;
2346bd0fa9bbSViresh Kumar 		}
2347bd0fa9bbSViresh Kumar 
23483a3e9e06SViresh Kumar 		if (!policy->cur) {
2349e837f9b5SJoe Perches 			pr_debug("Driver did not initialize current freq\n");
23503a3e9e06SViresh Kumar 			policy->cur = new_policy.cur;
2351a85f7bd3SThomas Renninger 		} else {
23529c0ebcf7SViresh Kumar 			if (policy->cur != new_policy.cur && has_target())
2353a1e1dc41SViresh Kumar 				cpufreq_out_of_sync(policy, new_policy.cur);
23540961dd0dSThomas Renninger 		}
2355a85f7bd3SThomas Renninger 	}
23560961dd0dSThomas Renninger 
2357037ce839SViresh Kumar 	ret = cpufreq_set_policy(policy, &new_policy);
23581da177e4SLinus Torvalds 
2359fefa8ff8SAaron Plattner unlock:
2360ad7722daSviresh kumar 	up_write(&policy->rwsem);
23615a01f2e8SVenkatesh Pallipadi 
23623a3e9e06SViresh Kumar 	cpufreq_cpu_put(policy);
23631da177e4SLinus Torvalds 	return ret;
23641da177e4SLinus Torvalds }
23651da177e4SLinus Torvalds EXPORT_SYMBOL(cpufreq_update_policy);
23661da177e4SLinus Torvalds 
23672760984fSPaul Gortmaker static int cpufreq_cpu_callback(struct notifier_block *nfb,
2368c32b6b8eSAshok Raj 					unsigned long action, void *hcpu)
2369c32b6b8eSAshok Raj {
2370c32b6b8eSAshok Raj 	unsigned int cpu = (unsigned long)hcpu;
23718a25a2fdSKay Sievers 	struct device *dev;
2372c32b6b8eSAshok Raj 
23738a25a2fdSKay Sievers 	dev = get_cpu_device(cpu);
23748a25a2fdSKay Sievers 	if (dev) {
23755302c3fbSSrivatsa S. Bhat 		switch (action & ~CPU_TASKS_FROZEN) {
2376c32b6b8eSAshok Raj 		case CPU_ONLINE:
237723faf0b7SViresh Kumar 			cpufreq_add_dev(dev, NULL);
2378c32b6b8eSAshok Raj 			break;
23795302c3fbSSrivatsa S. Bhat 
2380c32b6b8eSAshok Raj 		case CPU_DOWN_PREPARE:
238196bbbe4aSViresh Kumar 			__cpufreq_remove_dev_prepare(dev, NULL);
23821aee40acSSrivatsa S. Bhat 			break;
23831aee40acSSrivatsa S. Bhat 
23841aee40acSSrivatsa S. Bhat 		case CPU_POST_DEAD:
238596bbbe4aSViresh Kumar 			__cpufreq_remove_dev_finish(dev, NULL);
2386c32b6b8eSAshok Raj 			break;
23875302c3fbSSrivatsa S. Bhat 
23885a01f2e8SVenkatesh Pallipadi 		case CPU_DOWN_FAILED:
238923faf0b7SViresh Kumar 			cpufreq_add_dev(dev, NULL);
2390c32b6b8eSAshok Raj 			break;
2391c32b6b8eSAshok Raj 		}
2392c32b6b8eSAshok Raj 	}
2393c32b6b8eSAshok Raj 	return NOTIFY_OK;
2394c32b6b8eSAshok Raj }
2395c32b6b8eSAshok Raj 
23969c36f746SNeal Buckendahl static struct notifier_block __refdata cpufreq_cpu_notifier = {
2397c32b6b8eSAshok Raj 	.notifier_call = cpufreq_cpu_callback,
2398c32b6b8eSAshok Raj };
23991da177e4SLinus Torvalds 
24001da177e4SLinus Torvalds /*********************************************************************
24016f19efc0SLukasz Majewski  *               BOOST						     *
24026f19efc0SLukasz Majewski  *********************************************************************/
24036f19efc0SLukasz Majewski static int cpufreq_boost_set_sw(int state)
24046f19efc0SLukasz Majewski {
24056f19efc0SLukasz Majewski 	struct cpufreq_frequency_table *freq_table;
24066f19efc0SLukasz Majewski 	struct cpufreq_policy *policy;
24076f19efc0SLukasz Majewski 	int ret = -EINVAL;
24086f19efc0SLukasz Majewski 
2409*f963735aSViresh Kumar 	for_each_active_policy(policy) {
24106f19efc0SLukasz Majewski 		freq_table = cpufreq_frequency_get_table(policy->cpu);
24116f19efc0SLukasz Majewski 		if (freq_table) {
24126f19efc0SLukasz Majewski 			ret = cpufreq_frequency_table_cpuinfo(policy,
24136f19efc0SLukasz Majewski 							freq_table);
24146f19efc0SLukasz Majewski 			if (ret) {
24156f19efc0SLukasz Majewski 				pr_err("%s: Policy frequency update failed\n",
24166f19efc0SLukasz Majewski 				       __func__);
24176f19efc0SLukasz Majewski 				break;
24186f19efc0SLukasz Majewski 			}
24196f19efc0SLukasz Majewski 			policy->user_policy.max = policy->max;
24206f19efc0SLukasz Majewski 			__cpufreq_governor(policy, CPUFREQ_GOV_LIMITS);
24216f19efc0SLukasz Majewski 		}
24226f19efc0SLukasz Majewski 	}
24236f19efc0SLukasz Majewski 
24246f19efc0SLukasz Majewski 	return ret;
24256f19efc0SLukasz Majewski }
24266f19efc0SLukasz Majewski 
24276f19efc0SLukasz Majewski int cpufreq_boost_trigger_state(int state)
24286f19efc0SLukasz Majewski {
24296f19efc0SLukasz Majewski 	unsigned long flags;
24306f19efc0SLukasz Majewski 	int ret = 0;
24316f19efc0SLukasz Majewski 
24326f19efc0SLukasz Majewski 	if (cpufreq_driver->boost_enabled == state)
24336f19efc0SLukasz Majewski 		return 0;
24346f19efc0SLukasz Majewski 
24356f19efc0SLukasz Majewski 	write_lock_irqsave(&cpufreq_driver_lock, flags);
24366f19efc0SLukasz Majewski 	cpufreq_driver->boost_enabled = state;
24376f19efc0SLukasz Majewski 	write_unlock_irqrestore(&cpufreq_driver_lock, flags);
24386f19efc0SLukasz Majewski 
24396f19efc0SLukasz Majewski 	ret = cpufreq_driver->set_boost(state);
24406f19efc0SLukasz Majewski 	if (ret) {
24416f19efc0SLukasz Majewski 		write_lock_irqsave(&cpufreq_driver_lock, flags);
24426f19efc0SLukasz Majewski 		cpufreq_driver->boost_enabled = !state;
24436f19efc0SLukasz Majewski 		write_unlock_irqrestore(&cpufreq_driver_lock, flags);
24446f19efc0SLukasz Majewski 
2445e837f9b5SJoe Perches 		pr_err("%s: Cannot %s BOOST\n",
2446e837f9b5SJoe Perches 		       __func__, state ? "enable" : "disable");
24476f19efc0SLukasz Majewski 	}
24486f19efc0SLukasz Majewski 
24496f19efc0SLukasz Majewski 	return ret;
24506f19efc0SLukasz Majewski }
24516f19efc0SLukasz Majewski 
24526f19efc0SLukasz Majewski int cpufreq_boost_supported(void)
24536f19efc0SLukasz Majewski {
24546f19efc0SLukasz Majewski 	if (likely(cpufreq_driver))
24556f19efc0SLukasz Majewski 		return cpufreq_driver->boost_supported;
24566f19efc0SLukasz Majewski 
24576f19efc0SLukasz Majewski 	return 0;
24586f19efc0SLukasz Majewski }
24596f19efc0SLukasz Majewski EXPORT_SYMBOL_GPL(cpufreq_boost_supported);
24606f19efc0SLukasz Majewski 
24616f19efc0SLukasz Majewski int cpufreq_boost_enabled(void)
24626f19efc0SLukasz Majewski {
24636f19efc0SLukasz Majewski 	return cpufreq_driver->boost_enabled;
24646f19efc0SLukasz Majewski }
24656f19efc0SLukasz Majewski EXPORT_SYMBOL_GPL(cpufreq_boost_enabled);
24666f19efc0SLukasz Majewski 
24676f19efc0SLukasz Majewski /*********************************************************************
24681da177e4SLinus Torvalds  *               REGISTER / UNREGISTER CPUFREQ DRIVER                *
24691da177e4SLinus Torvalds  *********************************************************************/
24701da177e4SLinus Torvalds 
24711da177e4SLinus Torvalds /**
24721da177e4SLinus Torvalds  * cpufreq_register_driver - register a CPU Frequency driver
24731da177e4SLinus Torvalds  * @driver_data: A struct cpufreq_driver containing the values#
24741da177e4SLinus Torvalds  * submitted by the CPU Frequency driver.
24751da177e4SLinus Torvalds  *
24761da177e4SLinus Torvalds  * Registers a CPU Frequency driver to this core code. This code
24771da177e4SLinus Torvalds  * returns zero on success, -EBUSY when another driver got here first
24781da177e4SLinus Torvalds  * (and isn't unregistered in the meantime).
24791da177e4SLinus Torvalds  *
24801da177e4SLinus Torvalds  */
2481221dee28SLinus Torvalds int cpufreq_register_driver(struct cpufreq_driver *driver_data)
24821da177e4SLinus Torvalds {
24831da177e4SLinus Torvalds 	unsigned long flags;
24841da177e4SLinus Torvalds 	int ret;
24851da177e4SLinus Torvalds 
2486a7b422cdSKonrad Rzeszutek Wilk 	if (cpufreq_disabled())
2487a7b422cdSKonrad Rzeszutek Wilk 		return -ENODEV;
2488a7b422cdSKonrad Rzeszutek Wilk 
24891da177e4SLinus Torvalds 	if (!driver_data || !driver_data->verify || !driver_data->init ||
24909c0ebcf7SViresh Kumar 	    !(driver_data->setpolicy || driver_data->target_index ||
24919832235fSRafael J. Wysocki 		    driver_data->target) ||
24929832235fSRafael J. Wysocki 	     (driver_data->setpolicy && (driver_data->target_index ||
24931c03a2d0SViresh Kumar 		    driver_data->target)) ||
24941c03a2d0SViresh Kumar 	     (!!driver_data->get_intermediate != !!driver_data->target_intermediate))
24951da177e4SLinus Torvalds 		return -EINVAL;
24961da177e4SLinus Torvalds 
24972d06d8c4SDominik Brodowski 	pr_debug("trying to register driver %s\n", driver_data->name);
24981da177e4SLinus Torvalds 
24990d1857a1SNathan Zimmer 	write_lock_irqsave(&cpufreq_driver_lock, flags);
25001c3d85ddSRafael J. Wysocki 	if (cpufreq_driver) {
25010d1857a1SNathan Zimmer 		write_unlock_irqrestore(&cpufreq_driver_lock, flags);
25024dea5806SYinghai Lu 		return -EEXIST;
25031da177e4SLinus Torvalds 	}
25041c3d85ddSRafael J. Wysocki 	cpufreq_driver = driver_data;
25050d1857a1SNathan Zimmer 	write_unlock_irqrestore(&cpufreq_driver_lock, flags);
25061da177e4SLinus Torvalds 
2507bc68b7dfSViresh Kumar 	if (driver_data->setpolicy)
2508bc68b7dfSViresh Kumar 		driver_data->flags |= CPUFREQ_CONST_LOOPS;
2509bc68b7dfSViresh Kumar 
25106f19efc0SLukasz Majewski 	if (cpufreq_boost_supported()) {
25116f19efc0SLukasz Majewski 		/*
25126f19efc0SLukasz Majewski 		 * Check if driver provides function to enable boost -
25136f19efc0SLukasz Majewski 		 * if not, use cpufreq_boost_set_sw as default
25146f19efc0SLukasz Majewski 		 */
25156f19efc0SLukasz Majewski 		if (!cpufreq_driver->set_boost)
25166f19efc0SLukasz Majewski 			cpufreq_driver->set_boost = cpufreq_boost_set_sw;
25176f19efc0SLukasz Majewski 
25186f19efc0SLukasz Majewski 		ret = cpufreq_sysfs_create_file(&boost.attr);
25196f19efc0SLukasz Majewski 		if (ret) {
25206f19efc0SLukasz Majewski 			pr_err("%s: cannot register global BOOST sysfs file\n",
25216f19efc0SLukasz Majewski 			       __func__);
25226f19efc0SLukasz Majewski 			goto err_null_driver;
25236f19efc0SLukasz Majewski 		}
25246f19efc0SLukasz Majewski 	}
25256f19efc0SLukasz Majewski 
25268a25a2fdSKay Sievers 	ret = subsys_interface_register(&cpufreq_interface);
25278f5bc2abSJiri Slaby 	if (ret)
25286f19efc0SLukasz Majewski 		goto err_boost_unreg;
25291da177e4SLinus Torvalds 
2530ce1bcfe9SViresh Kumar 	if (!(cpufreq_driver->flags & CPUFREQ_STICKY) &&
2531ce1bcfe9SViresh Kumar 	    list_empty(&cpufreq_policy_list)) {
25321da177e4SLinus Torvalds 		/* if all ->init() calls failed, unregister */
2533ce1bcfe9SViresh Kumar 		pr_debug("%s: No CPU initialized for driver %s\n", __func__,
2534e08f5f5bSGautham R Shenoy 			 driver_data->name);
25358a25a2fdSKay Sievers 		goto err_if_unreg;
25361da177e4SLinus Torvalds 	}
25371da177e4SLinus Torvalds 
253865edc68cSChandra Seetharaman 	register_hotcpu_notifier(&cpufreq_cpu_notifier);
25392d06d8c4SDominik Brodowski 	pr_debug("driver %s up and running\n", driver_data->name);
25401da177e4SLinus Torvalds 
25418f5bc2abSJiri Slaby 	return 0;
25428a25a2fdSKay Sievers err_if_unreg:
25438a25a2fdSKay Sievers 	subsys_interface_unregister(&cpufreq_interface);
25446f19efc0SLukasz Majewski err_boost_unreg:
25456f19efc0SLukasz Majewski 	if (cpufreq_boost_supported())
25466f19efc0SLukasz Majewski 		cpufreq_sysfs_remove_file(&boost.attr);
25478f5bc2abSJiri Slaby err_null_driver:
25480d1857a1SNathan Zimmer 	write_lock_irqsave(&cpufreq_driver_lock, flags);
25491c3d85ddSRafael J. Wysocki 	cpufreq_driver = NULL;
25500d1857a1SNathan Zimmer 	write_unlock_irqrestore(&cpufreq_driver_lock, flags);
25514d34a67dSDave Jones 	return ret;
25521da177e4SLinus Torvalds }
25531da177e4SLinus Torvalds EXPORT_SYMBOL_GPL(cpufreq_register_driver);
25541da177e4SLinus Torvalds 
25551da177e4SLinus Torvalds /**
25561da177e4SLinus Torvalds  * cpufreq_unregister_driver - unregister the current CPUFreq driver
25571da177e4SLinus Torvalds  *
25581da177e4SLinus Torvalds  * Unregister the current CPUFreq driver. Only call this if you have
25591da177e4SLinus Torvalds  * the right to do so, i.e. if you have succeeded in initialising before!
25601da177e4SLinus Torvalds  * Returns zero if successful, and -EINVAL if the cpufreq_driver is
25611da177e4SLinus Torvalds  * currently not initialised.
25621da177e4SLinus Torvalds  */
2563221dee28SLinus Torvalds int cpufreq_unregister_driver(struct cpufreq_driver *driver)
25641da177e4SLinus Torvalds {
25651da177e4SLinus Torvalds 	unsigned long flags;
25661da177e4SLinus Torvalds 
25671c3d85ddSRafael J. Wysocki 	if (!cpufreq_driver || (driver != cpufreq_driver))
25681da177e4SLinus Torvalds 		return -EINVAL;
25691da177e4SLinus Torvalds 
25702d06d8c4SDominik Brodowski 	pr_debug("unregistering driver %s\n", driver->name);
25711da177e4SLinus Torvalds 
25728a25a2fdSKay Sievers 	subsys_interface_unregister(&cpufreq_interface);
25736f19efc0SLukasz Majewski 	if (cpufreq_boost_supported())
25746f19efc0SLukasz Majewski 		cpufreq_sysfs_remove_file(&boost.attr);
25756f19efc0SLukasz Majewski 
257665edc68cSChandra Seetharaman 	unregister_hotcpu_notifier(&cpufreq_cpu_notifier);
25771da177e4SLinus Torvalds 
25786eed9404SViresh Kumar 	down_write(&cpufreq_rwsem);
25790d1857a1SNathan Zimmer 	write_lock_irqsave(&cpufreq_driver_lock, flags);
25806eed9404SViresh Kumar 
25811c3d85ddSRafael J. Wysocki 	cpufreq_driver = NULL;
25826eed9404SViresh Kumar 
25830d1857a1SNathan Zimmer 	write_unlock_irqrestore(&cpufreq_driver_lock, flags);
25846eed9404SViresh Kumar 	up_write(&cpufreq_rwsem);
25851da177e4SLinus Torvalds 
25861da177e4SLinus Torvalds 	return 0;
25871da177e4SLinus Torvalds }
25881da177e4SLinus Torvalds EXPORT_SYMBOL_GPL(cpufreq_unregister_driver);
25895a01f2e8SVenkatesh Pallipadi 
259090de2a4aSDoug Anderson /*
259190de2a4aSDoug Anderson  * Stop cpufreq at shutdown to make sure it isn't holding any locks
259290de2a4aSDoug Anderson  * or mutexes when secondary CPUs are halted.
259390de2a4aSDoug Anderson  */
259490de2a4aSDoug Anderson static struct syscore_ops cpufreq_syscore_ops = {
259590de2a4aSDoug Anderson 	.shutdown = cpufreq_suspend,
259690de2a4aSDoug Anderson };
259790de2a4aSDoug Anderson 
25985a01f2e8SVenkatesh Pallipadi static int __init cpufreq_core_init(void)
25995a01f2e8SVenkatesh Pallipadi {
2600a7b422cdSKonrad Rzeszutek Wilk 	if (cpufreq_disabled())
2601a7b422cdSKonrad Rzeszutek Wilk 		return -ENODEV;
2602a7b422cdSKonrad Rzeszutek Wilk 
26032361be23SViresh Kumar 	cpufreq_global_kobject = kobject_create();
26048aa84ad8SThomas Renninger 	BUG_ON(!cpufreq_global_kobject);
26058aa84ad8SThomas Renninger 
260690de2a4aSDoug Anderson 	register_syscore_ops(&cpufreq_syscore_ops);
260790de2a4aSDoug Anderson 
26085a01f2e8SVenkatesh Pallipadi 	return 0;
26095a01f2e8SVenkatesh Pallipadi }
26105a01f2e8SVenkatesh Pallipadi core_initcall(cpufreq_core_init);
2611