xref: /openbmc/linux/drivers/cpufreq/cpufreq.c (revision 9591becbf226e3aa0f6c73494736e2c5ab14cc8d)
11da177e4SLinus Torvalds /*
21da177e4SLinus Torvalds  *  linux/drivers/cpufreq/cpufreq.c
31da177e4SLinus Torvalds  *
41da177e4SLinus Torvalds  *  Copyright (C) 2001 Russell King
51da177e4SLinus Torvalds  *            (C) 2002 - 2003 Dominik Brodowski <linux@brodo.de>
6bb176f7dSViresh Kumar  *            (C) 2013 Viresh Kumar <viresh.kumar@linaro.org>
71da177e4SLinus Torvalds  *
8c32b6b8eSAshok Raj  *  Oct 2005 - Ashok Raj <ashok.raj@intel.com>
9c32b6b8eSAshok Raj  *	Added handling for CPU hotplug
108ff69732SDave Jones  *  Feb 2006 - Jacob Shin <jacob.shin@amd.com>
118ff69732SDave Jones  *	Fix handling for CPU hotplug -- affected CPUs
12c32b6b8eSAshok Raj  *
131da177e4SLinus Torvalds  * This program is free software; you can redistribute it and/or modify
141da177e4SLinus Torvalds  * it under the terms of the GNU General Public License version 2 as
151da177e4SLinus Torvalds  * published by the Free Software Foundation.
161da177e4SLinus Torvalds  */
171da177e4SLinus Torvalds 
18db701151SViresh Kumar #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
19db701151SViresh Kumar 
205ff0a268SViresh Kumar #include <linux/cpu.h>
211da177e4SLinus Torvalds #include <linux/cpufreq.h>
221da177e4SLinus Torvalds #include <linux/delay.h>
231da177e4SLinus Torvalds #include <linux/device.h>
245ff0a268SViresh Kumar #include <linux/init.h>
255ff0a268SViresh Kumar #include <linux/kernel_stat.h>
265ff0a268SViresh Kumar #include <linux/module.h>
273fc54d37Sakpm@osdl.org #include <linux/mutex.h>
285ff0a268SViresh Kumar #include <linux/slab.h>
292f0aea93SViresh Kumar #include <linux/suspend.h>
3090de2a4aSDoug Anderson #include <linux/syscore_ops.h>
315ff0a268SViresh Kumar #include <linux/tick.h>
326f4f2723SThomas Renninger #include <trace/events/power.h>
336f4f2723SThomas Renninger 
34b4f0676fSViresh Kumar static LIST_HEAD(cpufreq_policy_list);
35f963735aSViresh Kumar 
36f963735aSViresh Kumar static inline bool policy_is_inactive(struct cpufreq_policy *policy)
37f963735aSViresh Kumar {
38f963735aSViresh Kumar 	return cpumask_empty(policy->cpus);
39f963735aSViresh Kumar }
40f963735aSViresh Kumar 
41f963735aSViresh Kumar static bool suitable_policy(struct cpufreq_policy *policy, bool active)
42f963735aSViresh Kumar {
43f963735aSViresh Kumar 	return active == !policy_is_inactive(policy);
44f963735aSViresh Kumar }
45f963735aSViresh Kumar 
46f963735aSViresh Kumar /* Finds Next Acive/Inactive policy */
47f963735aSViresh Kumar static struct cpufreq_policy *next_policy(struct cpufreq_policy *policy,
48f963735aSViresh Kumar 					  bool active)
49f963735aSViresh Kumar {
50f963735aSViresh Kumar 	do {
51f963735aSViresh Kumar 		policy = list_next_entry(policy, policy_list);
52f963735aSViresh Kumar 
53f963735aSViresh Kumar 		/* No more policies in the list */
54f963735aSViresh Kumar 		if (&policy->policy_list == &cpufreq_policy_list)
55f963735aSViresh Kumar 			return NULL;
56f963735aSViresh Kumar 	} while (!suitable_policy(policy, active));
57f963735aSViresh Kumar 
58f963735aSViresh Kumar 	return policy;
59f963735aSViresh Kumar }
60f963735aSViresh Kumar 
61f963735aSViresh Kumar static struct cpufreq_policy *first_policy(bool active)
62f963735aSViresh Kumar {
63f963735aSViresh Kumar 	struct cpufreq_policy *policy;
64f963735aSViresh Kumar 
65f963735aSViresh Kumar 	/* No policies in the list */
66f963735aSViresh Kumar 	if (list_empty(&cpufreq_policy_list))
67f963735aSViresh Kumar 		return NULL;
68f963735aSViresh Kumar 
69f963735aSViresh Kumar 	policy = list_first_entry(&cpufreq_policy_list, typeof(*policy),
70f963735aSViresh Kumar 				  policy_list);
71f963735aSViresh Kumar 
72f963735aSViresh Kumar 	if (!suitable_policy(policy, active))
73f963735aSViresh Kumar 		policy = next_policy(policy, active);
74f963735aSViresh Kumar 
75f963735aSViresh Kumar 	return policy;
76f963735aSViresh Kumar }
77f963735aSViresh Kumar 
78f963735aSViresh Kumar /* Macros to iterate over CPU policies */
79f963735aSViresh Kumar #define for_each_suitable_policy(__policy, __active)	\
80f963735aSViresh Kumar 	for (__policy = first_policy(__active);		\
81f963735aSViresh Kumar 	     __policy;					\
82f963735aSViresh Kumar 	     __policy = next_policy(__policy, __active))
83f963735aSViresh Kumar 
84f963735aSViresh Kumar #define for_each_active_policy(__policy)		\
85f963735aSViresh Kumar 	for_each_suitable_policy(__policy, true)
86f963735aSViresh Kumar #define for_each_inactive_policy(__policy)		\
87f963735aSViresh Kumar 	for_each_suitable_policy(__policy, false)
88f963735aSViresh Kumar 
89b4f0676fSViresh Kumar #define for_each_policy(__policy)			\
90b4f0676fSViresh Kumar 	list_for_each_entry(__policy, &cpufreq_policy_list, policy_list)
91b4f0676fSViresh Kumar 
92f7b27061SViresh Kumar /* Iterate over governors */
93f7b27061SViresh Kumar static LIST_HEAD(cpufreq_governor_list);
94f7b27061SViresh Kumar #define for_each_governor(__governor)				\
95f7b27061SViresh Kumar 	list_for_each_entry(__governor, &cpufreq_governor_list, governor_list)
96f7b27061SViresh Kumar 
971da177e4SLinus Torvalds /**
98cd878479SDave Jones  * The "cpufreq driver" - the arch- or hardware-dependent low
991da177e4SLinus Torvalds  * level driver of CPUFreq support, and its spinlock. This lock
1001da177e4SLinus Torvalds  * also protects the cpufreq_cpu_data array.
1011da177e4SLinus Torvalds  */
1021c3d85ddSRafael J. Wysocki static struct cpufreq_driver *cpufreq_driver;
1037a6aedfaSMike Travis static DEFINE_PER_CPU(struct cpufreq_policy *, cpufreq_cpu_data);
104bb176f7dSViresh Kumar static DEFINE_RWLOCK(cpufreq_driver_lock);
1056f1e4efdSJane Li DEFINE_MUTEX(cpufreq_governor_lock);
106bb176f7dSViresh Kumar 
1072f0aea93SViresh Kumar /* Flag to suspend/resume CPUFreq governors */
1082f0aea93SViresh Kumar static bool cpufreq_suspended;
1091da177e4SLinus Torvalds 
1109c0ebcf7SViresh Kumar static inline bool has_target(void)
1119c0ebcf7SViresh Kumar {
1129c0ebcf7SViresh Kumar 	return cpufreq_driver->target_index || cpufreq_driver->target;
1139c0ebcf7SViresh Kumar }
1149c0ebcf7SViresh Kumar 
1155a01f2e8SVenkatesh Pallipadi /*
1166eed9404SViresh Kumar  * rwsem to guarantee that cpufreq driver module doesn't unload during critical
1176eed9404SViresh Kumar  * sections
1186eed9404SViresh Kumar  */
1196eed9404SViresh Kumar static DECLARE_RWSEM(cpufreq_rwsem);
1206eed9404SViresh Kumar 
1211da177e4SLinus Torvalds /* internal prototypes */
12229464f28SDave Jones static int __cpufreq_governor(struct cpufreq_policy *policy,
12329464f28SDave Jones 		unsigned int event);
124d92d50a4SViresh Kumar static unsigned int __cpufreq_get(struct cpufreq_policy *policy);
12565f27f38SDavid Howells static void handle_update(struct work_struct *work);
1261da177e4SLinus Torvalds 
1271da177e4SLinus Torvalds /**
1281da177e4SLinus Torvalds  * Two notifier lists: the "policy" list is involved in the
1291da177e4SLinus Torvalds  * validation process for a new CPU frequency policy; the
1301da177e4SLinus Torvalds  * "transition" list for kernel code that needs to handle
1311da177e4SLinus Torvalds  * changes to devices when the CPU clock speed changes.
1321da177e4SLinus Torvalds  * The mutex locks both lists.
1331da177e4SLinus Torvalds  */
134e041c683SAlan Stern static BLOCKING_NOTIFIER_HEAD(cpufreq_policy_notifier_list);
135b4dfdbb3SAlan Stern static struct srcu_notifier_head cpufreq_transition_notifier_list;
1361da177e4SLinus Torvalds 
13774212ca4SCesar Eduardo Barros static bool init_cpufreq_transition_notifier_list_called;
138b4dfdbb3SAlan Stern static int __init init_cpufreq_transition_notifier_list(void)
139b4dfdbb3SAlan Stern {
140b4dfdbb3SAlan Stern 	srcu_init_notifier_head(&cpufreq_transition_notifier_list);
14174212ca4SCesar Eduardo Barros 	init_cpufreq_transition_notifier_list_called = true;
142b4dfdbb3SAlan Stern 	return 0;
143b4dfdbb3SAlan Stern }
144b3438f82SLinus Torvalds pure_initcall(init_cpufreq_transition_notifier_list);
1451da177e4SLinus Torvalds 
146a7b422cdSKonrad Rzeszutek Wilk static int off __read_mostly;
147da584455SViresh Kumar static int cpufreq_disabled(void)
148a7b422cdSKonrad Rzeszutek Wilk {
149a7b422cdSKonrad Rzeszutek Wilk 	return off;
150a7b422cdSKonrad Rzeszutek Wilk }
151a7b422cdSKonrad Rzeszutek Wilk void disable_cpufreq(void)
152a7b422cdSKonrad Rzeszutek Wilk {
153a7b422cdSKonrad Rzeszutek Wilk 	off = 1;
154a7b422cdSKonrad Rzeszutek Wilk }
1553fc54d37Sakpm@osdl.org static DEFINE_MUTEX(cpufreq_governor_mutex);
1561da177e4SLinus Torvalds 
1574d5dcc42SViresh Kumar bool have_governor_per_policy(void)
1584d5dcc42SViresh Kumar {
1590b981e70SViresh Kumar 	return !!(cpufreq_driver->flags & CPUFREQ_HAVE_GOVERNOR_PER_POLICY);
1604d5dcc42SViresh Kumar }
1613f869d6dSViresh Kumar EXPORT_SYMBOL_GPL(have_governor_per_policy);
1624d5dcc42SViresh Kumar 
163944e9a03SViresh Kumar struct kobject *get_governor_parent_kobj(struct cpufreq_policy *policy)
164944e9a03SViresh Kumar {
165944e9a03SViresh Kumar 	if (have_governor_per_policy())
166944e9a03SViresh Kumar 		return &policy->kobj;
167944e9a03SViresh Kumar 	else
168944e9a03SViresh Kumar 		return cpufreq_global_kobject;
169944e9a03SViresh Kumar }
170944e9a03SViresh Kumar EXPORT_SYMBOL_GPL(get_governor_parent_kobj);
171944e9a03SViresh Kumar 
17272a4ce34SViresh Kumar static inline u64 get_cpu_idle_time_jiffy(unsigned int cpu, u64 *wall)
17372a4ce34SViresh Kumar {
17472a4ce34SViresh Kumar 	u64 idle_time;
17572a4ce34SViresh Kumar 	u64 cur_wall_time;
17672a4ce34SViresh Kumar 	u64 busy_time;
17772a4ce34SViresh Kumar 
17872a4ce34SViresh Kumar 	cur_wall_time = jiffies64_to_cputime64(get_jiffies_64());
17972a4ce34SViresh Kumar 
18072a4ce34SViresh Kumar 	busy_time = kcpustat_cpu(cpu).cpustat[CPUTIME_USER];
18172a4ce34SViresh Kumar 	busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_SYSTEM];
18272a4ce34SViresh Kumar 	busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_IRQ];
18372a4ce34SViresh Kumar 	busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_SOFTIRQ];
18472a4ce34SViresh Kumar 	busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_STEAL];
18572a4ce34SViresh Kumar 	busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_NICE];
18672a4ce34SViresh Kumar 
18772a4ce34SViresh Kumar 	idle_time = cur_wall_time - busy_time;
18872a4ce34SViresh Kumar 	if (wall)
18972a4ce34SViresh Kumar 		*wall = cputime_to_usecs(cur_wall_time);
19072a4ce34SViresh Kumar 
19172a4ce34SViresh Kumar 	return cputime_to_usecs(idle_time);
19272a4ce34SViresh Kumar }
19372a4ce34SViresh Kumar 
19472a4ce34SViresh Kumar u64 get_cpu_idle_time(unsigned int cpu, u64 *wall, int io_busy)
19572a4ce34SViresh Kumar {
19672a4ce34SViresh Kumar 	u64 idle_time = get_cpu_idle_time_us(cpu, io_busy ? wall : NULL);
19772a4ce34SViresh Kumar 
19872a4ce34SViresh Kumar 	if (idle_time == -1ULL)
19972a4ce34SViresh Kumar 		return get_cpu_idle_time_jiffy(cpu, wall);
20072a4ce34SViresh Kumar 	else if (!io_busy)
20172a4ce34SViresh Kumar 		idle_time += get_cpu_iowait_time_us(cpu, wall);
20272a4ce34SViresh Kumar 
20372a4ce34SViresh Kumar 	return idle_time;
20472a4ce34SViresh Kumar }
20572a4ce34SViresh Kumar EXPORT_SYMBOL_GPL(get_cpu_idle_time);
20672a4ce34SViresh Kumar 
20770e9e778SViresh Kumar /*
20870e9e778SViresh Kumar  * This is a generic cpufreq init() routine which can be used by cpufreq
20970e9e778SViresh Kumar  * drivers of SMP systems. It will do following:
21070e9e778SViresh Kumar  * - validate & show freq table passed
21170e9e778SViresh Kumar  * - set policies transition latency
21270e9e778SViresh Kumar  * - policy->cpus with all possible CPUs
21370e9e778SViresh Kumar  */
21470e9e778SViresh Kumar int cpufreq_generic_init(struct cpufreq_policy *policy,
21570e9e778SViresh Kumar 		struct cpufreq_frequency_table *table,
21670e9e778SViresh Kumar 		unsigned int transition_latency)
21770e9e778SViresh Kumar {
21870e9e778SViresh Kumar 	int ret;
21970e9e778SViresh Kumar 
22070e9e778SViresh Kumar 	ret = cpufreq_table_validate_and_show(policy, table);
22170e9e778SViresh Kumar 	if (ret) {
22270e9e778SViresh Kumar 		pr_err("%s: invalid frequency table: %d\n", __func__, ret);
22370e9e778SViresh Kumar 		return ret;
22470e9e778SViresh Kumar 	}
22570e9e778SViresh Kumar 
22670e9e778SViresh Kumar 	policy->cpuinfo.transition_latency = transition_latency;
22770e9e778SViresh Kumar 
22870e9e778SViresh Kumar 	/*
22958405af6SShailendra Verma 	 * The driver only supports the SMP configuration where all processors
23070e9e778SViresh Kumar 	 * share the clock and voltage and clock.
23170e9e778SViresh Kumar 	 */
23270e9e778SViresh Kumar 	cpumask_setall(policy->cpus);
23370e9e778SViresh Kumar 
23470e9e778SViresh Kumar 	return 0;
23570e9e778SViresh Kumar }
23670e9e778SViresh Kumar EXPORT_SYMBOL_GPL(cpufreq_generic_init);
23770e9e778SViresh Kumar 
238988bed09SViresh Kumar /* Only for cpufreq core internal use */
239988bed09SViresh Kumar struct cpufreq_policy *cpufreq_cpu_get_raw(unsigned int cpu)
240652ed95dSViresh Kumar {
241652ed95dSViresh Kumar 	struct cpufreq_policy *policy = per_cpu(cpufreq_cpu_data, cpu);
242652ed95dSViresh Kumar 
243988bed09SViresh Kumar 	return policy && cpumask_test_cpu(cpu, policy->cpus) ? policy : NULL;
244988bed09SViresh Kumar }
245988bed09SViresh Kumar 
246988bed09SViresh Kumar unsigned int cpufreq_generic_get(unsigned int cpu)
247988bed09SViresh Kumar {
248988bed09SViresh Kumar 	struct cpufreq_policy *policy = cpufreq_cpu_get_raw(cpu);
249988bed09SViresh Kumar 
250652ed95dSViresh Kumar 	if (!policy || IS_ERR(policy->clk)) {
251e837f9b5SJoe Perches 		pr_err("%s: No %s associated to cpu: %d\n",
252e837f9b5SJoe Perches 		       __func__, policy ? "clk" : "policy", cpu);
253652ed95dSViresh Kumar 		return 0;
254652ed95dSViresh Kumar 	}
255652ed95dSViresh Kumar 
256652ed95dSViresh Kumar 	return clk_get_rate(policy->clk) / 1000;
257652ed95dSViresh Kumar }
258652ed95dSViresh Kumar EXPORT_SYMBOL_GPL(cpufreq_generic_get);
259652ed95dSViresh Kumar 
26050e9c852SViresh Kumar /**
26150e9c852SViresh Kumar  * cpufreq_cpu_get: returns policy for a cpu and marks it busy.
26250e9c852SViresh Kumar  *
26350e9c852SViresh Kumar  * @cpu: cpu to find policy for.
26450e9c852SViresh Kumar  *
26550e9c852SViresh Kumar  * This returns policy for 'cpu', returns NULL if it doesn't exist.
26650e9c852SViresh Kumar  * It also increments the kobject reference count to mark it busy and so would
26750e9c852SViresh Kumar  * require a corresponding call to cpufreq_cpu_put() to decrement it back.
26850e9c852SViresh Kumar  * If corresponding call cpufreq_cpu_put() isn't made, the policy wouldn't be
26950e9c852SViresh Kumar  * freed as that depends on the kobj count.
27050e9c852SViresh Kumar  *
27150e9c852SViresh Kumar  * It also takes a read-lock of 'cpufreq_rwsem' and doesn't put it back if a
27250e9c852SViresh Kumar  * valid policy is found. This is done to make sure the driver doesn't get
27350e9c852SViresh Kumar  * unregistered while the policy is being used.
27450e9c852SViresh Kumar  *
27550e9c852SViresh Kumar  * Return: A valid policy on success, otherwise NULL on failure.
27650e9c852SViresh Kumar  */
2776eed9404SViresh Kumar struct cpufreq_policy *cpufreq_cpu_get(unsigned int cpu)
2781da177e4SLinus Torvalds {
2796eed9404SViresh Kumar 	struct cpufreq_policy *policy = NULL;
2801da177e4SLinus Torvalds 	unsigned long flags;
2811da177e4SLinus Torvalds 
2821b947c90SViresh Kumar 	if (WARN_ON(cpu >= nr_cpu_ids))
2836eed9404SViresh Kumar 		return NULL;
2846eed9404SViresh Kumar 
2856eed9404SViresh Kumar 	if (!down_read_trylock(&cpufreq_rwsem))
2866eed9404SViresh Kumar 		return NULL;
2871da177e4SLinus Torvalds 
2881da177e4SLinus Torvalds 	/* get the cpufreq driver */
2890d1857a1SNathan Zimmer 	read_lock_irqsave(&cpufreq_driver_lock, flags);
2901da177e4SLinus Torvalds 
2916eed9404SViresh Kumar 	if (cpufreq_driver) {
2921da177e4SLinus Torvalds 		/* get the CPU */
293988bed09SViresh Kumar 		policy = cpufreq_cpu_get_raw(cpu);
2946eed9404SViresh Kumar 		if (policy)
2956eed9404SViresh Kumar 			kobject_get(&policy->kobj);
2966eed9404SViresh Kumar 	}
2976eed9404SViresh Kumar 
2986eed9404SViresh Kumar 	read_unlock_irqrestore(&cpufreq_driver_lock, flags);
2991da177e4SLinus Torvalds 
3003a3e9e06SViresh Kumar 	if (!policy)
3016eed9404SViresh Kumar 		up_read(&cpufreq_rwsem);
3021da177e4SLinus Torvalds 
3033a3e9e06SViresh Kumar 	return policy;
304a9144436SStephen Boyd }
3051da177e4SLinus Torvalds EXPORT_SYMBOL_GPL(cpufreq_cpu_get);
3061da177e4SLinus Torvalds 
30750e9c852SViresh Kumar /**
30850e9c852SViresh Kumar  * cpufreq_cpu_put: Decrements the usage count of a policy
30950e9c852SViresh Kumar  *
31050e9c852SViresh Kumar  * @policy: policy earlier returned by cpufreq_cpu_get().
31150e9c852SViresh Kumar  *
31250e9c852SViresh Kumar  * This decrements the kobject reference count incremented earlier by calling
31350e9c852SViresh Kumar  * cpufreq_cpu_get().
31450e9c852SViresh Kumar  *
31550e9c852SViresh Kumar  * It also drops the read-lock of 'cpufreq_rwsem' taken at cpufreq_cpu_get().
31650e9c852SViresh Kumar  */
3173a3e9e06SViresh Kumar void cpufreq_cpu_put(struct cpufreq_policy *policy)
318a9144436SStephen Boyd {
3196eed9404SViresh Kumar 	kobject_put(&policy->kobj);
3206eed9404SViresh Kumar 	up_read(&cpufreq_rwsem);
321a9144436SStephen Boyd }
3221da177e4SLinus Torvalds EXPORT_SYMBOL_GPL(cpufreq_cpu_put);
3231da177e4SLinus Torvalds 
3241da177e4SLinus Torvalds /*********************************************************************
3251da177e4SLinus Torvalds  *            EXTERNALLY AFFECTING FREQUENCY CHANGES                 *
3261da177e4SLinus Torvalds  *********************************************************************/
3271da177e4SLinus Torvalds 
3281da177e4SLinus Torvalds /**
3291da177e4SLinus Torvalds  * adjust_jiffies - adjust the system "loops_per_jiffy"
3301da177e4SLinus Torvalds  *
3311da177e4SLinus Torvalds  * This function alters the system "loops_per_jiffy" for the clock
3321da177e4SLinus Torvalds  * speed change. Note that loops_per_jiffy cannot be updated on SMP
3331da177e4SLinus Torvalds  * systems as each CPU might be scaled differently. So, use the arch
3341da177e4SLinus Torvalds  * per-CPU loops_per_jiffy value wherever possible.
3351da177e4SLinus Torvalds  */
33639c132eeSViresh Kumar static void adjust_jiffies(unsigned long val, struct cpufreq_freqs *ci)
33739c132eeSViresh Kumar {
3381da177e4SLinus Torvalds #ifndef CONFIG_SMP
3391da177e4SLinus Torvalds 	static unsigned long l_p_j_ref;
3401da177e4SLinus Torvalds 	static unsigned int l_p_j_ref_freq;
3411da177e4SLinus Torvalds 
3421da177e4SLinus Torvalds 	if (ci->flags & CPUFREQ_CONST_LOOPS)
3431da177e4SLinus Torvalds 		return;
3441da177e4SLinus Torvalds 
3451da177e4SLinus Torvalds 	if (!l_p_j_ref_freq) {
3461da177e4SLinus Torvalds 		l_p_j_ref = loops_per_jiffy;
3471da177e4SLinus Torvalds 		l_p_j_ref_freq = ci->old;
348e837f9b5SJoe Perches 		pr_debug("saving %lu as reference value for loops_per_jiffy; freq is %u kHz\n",
349e837f9b5SJoe Perches 			 l_p_j_ref, l_p_j_ref_freq);
3501da177e4SLinus Torvalds 	}
3510b443eadSViresh Kumar 	if (val == CPUFREQ_POSTCHANGE && ci->old != ci->new) {
352e08f5f5bSGautham R Shenoy 		loops_per_jiffy = cpufreq_scale(l_p_j_ref, l_p_j_ref_freq,
353e08f5f5bSGautham R Shenoy 								ci->new);
354e837f9b5SJoe Perches 		pr_debug("scaling loops_per_jiffy to %lu for frequency %u kHz\n",
355e837f9b5SJoe Perches 			 loops_per_jiffy, ci->new);
3561da177e4SLinus Torvalds 	}
3571da177e4SLinus Torvalds #endif
35839c132eeSViresh Kumar }
3591da177e4SLinus Torvalds 
3600956df9cSViresh Kumar static void __cpufreq_notify_transition(struct cpufreq_policy *policy,
361b43a7ffbSViresh Kumar 		struct cpufreq_freqs *freqs, unsigned int state)
3621da177e4SLinus Torvalds {
3631da177e4SLinus Torvalds 	BUG_ON(irqs_disabled());
3641da177e4SLinus Torvalds 
365d5aaffa9SDirk Brandewie 	if (cpufreq_disabled())
366d5aaffa9SDirk Brandewie 		return;
367d5aaffa9SDirk Brandewie 
3681c3d85ddSRafael J. Wysocki 	freqs->flags = cpufreq_driver->flags;
3692d06d8c4SDominik Brodowski 	pr_debug("notification %u of frequency transition to %u kHz\n",
370e4472cb3SDave Jones 		 state, freqs->new);
3711da177e4SLinus Torvalds 
3721da177e4SLinus Torvalds 	switch (state) {
373e4472cb3SDave Jones 
3741da177e4SLinus Torvalds 	case CPUFREQ_PRECHANGE:
375e4472cb3SDave Jones 		/* detect if the driver reported a value as "old frequency"
376e4472cb3SDave Jones 		 * which is not equal to what the cpufreq core thinks is
377e4472cb3SDave Jones 		 * "old frequency".
3781da177e4SLinus Torvalds 		 */
3791c3d85ddSRafael J. Wysocki 		if (!(cpufreq_driver->flags & CPUFREQ_CONST_LOOPS)) {
380e4472cb3SDave Jones 			if ((policy) && (policy->cpu == freqs->cpu) &&
381e4472cb3SDave Jones 			    (policy->cur) && (policy->cur != freqs->old)) {
382e837f9b5SJoe Perches 				pr_debug("Warning: CPU frequency is %u, cpufreq assumed %u kHz\n",
383e4472cb3SDave Jones 					 freqs->old, policy->cur);
384e4472cb3SDave Jones 				freqs->old = policy->cur;
3851da177e4SLinus Torvalds 			}
3861da177e4SLinus Torvalds 		}
387b4dfdbb3SAlan Stern 		srcu_notifier_call_chain(&cpufreq_transition_notifier_list,
388e4472cb3SDave Jones 				CPUFREQ_PRECHANGE, freqs);
3891da177e4SLinus Torvalds 		adjust_jiffies(CPUFREQ_PRECHANGE, freqs);
3901da177e4SLinus Torvalds 		break;
391e4472cb3SDave Jones 
3921da177e4SLinus Torvalds 	case CPUFREQ_POSTCHANGE:
3931da177e4SLinus Torvalds 		adjust_jiffies(CPUFREQ_POSTCHANGE, freqs);
394e837f9b5SJoe Perches 		pr_debug("FREQ: %lu - CPU: %lu\n",
395e837f9b5SJoe Perches 			 (unsigned long)freqs->new, (unsigned long)freqs->cpu);
39625e41933SThomas Renninger 		trace_cpu_frequency(freqs->new, freqs->cpu);
397b4dfdbb3SAlan Stern 		srcu_notifier_call_chain(&cpufreq_transition_notifier_list,
398e4472cb3SDave Jones 				CPUFREQ_POSTCHANGE, freqs);
399e4472cb3SDave Jones 		if (likely(policy) && likely(policy->cpu == freqs->cpu))
400e4472cb3SDave Jones 			policy->cur = freqs->new;
4011da177e4SLinus Torvalds 		break;
4021da177e4SLinus Torvalds 	}
4031da177e4SLinus Torvalds }
404bb176f7dSViresh Kumar 
405b43a7ffbSViresh Kumar /**
406b43a7ffbSViresh Kumar  * cpufreq_notify_transition - call notifier chain and adjust_jiffies
407b43a7ffbSViresh Kumar  * on frequency transition.
408b43a7ffbSViresh Kumar  *
409b43a7ffbSViresh Kumar  * This function calls the transition notifiers and the "adjust_jiffies"
410b43a7ffbSViresh Kumar  * function. It is called twice on all CPU frequency changes that have
411b43a7ffbSViresh Kumar  * external effects.
412b43a7ffbSViresh Kumar  */
413236a9800SViresh Kumar static void cpufreq_notify_transition(struct cpufreq_policy *policy,
414b43a7ffbSViresh Kumar 		struct cpufreq_freqs *freqs, unsigned int state)
415b43a7ffbSViresh Kumar {
416b43a7ffbSViresh Kumar 	for_each_cpu(freqs->cpu, policy->cpus)
417b43a7ffbSViresh Kumar 		__cpufreq_notify_transition(policy, freqs, state);
418b43a7ffbSViresh Kumar }
4191da177e4SLinus Torvalds 
420f7ba3b41SViresh Kumar /* Do post notifications when there are chances that transition has failed */
421236a9800SViresh Kumar static void cpufreq_notify_post_transition(struct cpufreq_policy *policy,
422f7ba3b41SViresh Kumar 		struct cpufreq_freqs *freqs, int transition_failed)
423f7ba3b41SViresh Kumar {
424f7ba3b41SViresh Kumar 	cpufreq_notify_transition(policy, freqs, CPUFREQ_POSTCHANGE);
425f7ba3b41SViresh Kumar 	if (!transition_failed)
426f7ba3b41SViresh Kumar 		return;
427f7ba3b41SViresh Kumar 
428f7ba3b41SViresh Kumar 	swap(freqs->old, freqs->new);
429f7ba3b41SViresh Kumar 	cpufreq_notify_transition(policy, freqs, CPUFREQ_PRECHANGE);
430f7ba3b41SViresh Kumar 	cpufreq_notify_transition(policy, freqs, CPUFREQ_POSTCHANGE);
431f7ba3b41SViresh Kumar }
432f7ba3b41SViresh Kumar 
43312478cf0SSrivatsa S. Bhat void cpufreq_freq_transition_begin(struct cpufreq_policy *policy,
43412478cf0SSrivatsa S. Bhat 		struct cpufreq_freqs *freqs)
43512478cf0SSrivatsa S. Bhat {
436ca654dc3SSrivatsa S. Bhat 
437ca654dc3SSrivatsa S. Bhat 	/*
438ca654dc3SSrivatsa S. Bhat 	 * Catch double invocations of _begin() which lead to self-deadlock.
439ca654dc3SSrivatsa S. Bhat 	 * ASYNC_NOTIFICATION drivers are left out because the cpufreq core
440ca654dc3SSrivatsa S. Bhat 	 * doesn't invoke _begin() on their behalf, and hence the chances of
441ca654dc3SSrivatsa S. Bhat 	 * double invocations are very low. Moreover, there are scenarios
442ca654dc3SSrivatsa S. Bhat 	 * where these checks can emit false-positive warnings in these
443ca654dc3SSrivatsa S. Bhat 	 * drivers; so we avoid that by skipping them altogether.
444ca654dc3SSrivatsa S. Bhat 	 */
445ca654dc3SSrivatsa S. Bhat 	WARN_ON(!(cpufreq_driver->flags & CPUFREQ_ASYNC_NOTIFICATION)
446ca654dc3SSrivatsa S. Bhat 				&& current == policy->transition_task);
447ca654dc3SSrivatsa S. Bhat 
44812478cf0SSrivatsa S. Bhat wait:
44912478cf0SSrivatsa S. Bhat 	wait_event(policy->transition_wait, !policy->transition_ongoing);
45012478cf0SSrivatsa S. Bhat 
45112478cf0SSrivatsa S. Bhat 	spin_lock(&policy->transition_lock);
45212478cf0SSrivatsa S. Bhat 
45312478cf0SSrivatsa S. Bhat 	if (unlikely(policy->transition_ongoing)) {
45412478cf0SSrivatsa S. Bhat 		spin_unlock(&policy->transition_lock);
45512478cf0SSrivatsa S. Bhat 		goto wait;
45612478cf0SSrivatsa S. Bhat 	}
45712478cf0SSrivatsa S. Bhat 
45812478cf0SSrivatsa S. Bhat 	policy->transition_ongoing = true;
459ca654dc3SSrivatsa S. Bhat 	policy->transition_task = current;
46012478cf0SSrivatsa S. Bhat 
46112478cf0SSrivatsa S. Bhat 	spin_unlock(&policy->transition_lock);
46212478cf0SSrivatsa S. Bhat 
46312478cf0SSrivatsa S. Bhat 	cpufreq_notify_transition(policy, freqs, CPUFREQ_PRECHANGE);
46412478cf0SSrivatsa S. Bhat }
46512478cf0SSrivatsa S. Bhat EXPORT_SYMBOL_GPL(cpufreq_freq_transition_begin);
46612478cf0SSrivatsa S. Bhat 
46712478cf0SSrivatsa S. Bhat void cpufreq_freq_transition_end(struct cpufreq_policy *policy,
46812478cf0SSrivatsa S. Bhat 		struct cpufreq_freqs *freqs, int transition_failed)
46912478cf0SSrivatsa S. Bhat {
47012478cf0SSrivatsa S. Bhat 	if (unlikely(WARN_ON(!policy->transition_ongoing)))
47112478cf0SSrivatsa S. Bhat 		return;
47212478cf0SSrivatsa S. Bhat 
47312478cf0SSrivatsa S. Bhat 	cpufreq_notify_post_transition(policy, freqs, transition_failed);
47412478cf0SSrivatsa S. Bhat 
47512478cf0SSrivatsa S. Bhat 	policy->transition_ongoing = false;
476ca654dc3SSrivatsa S. Bhat 	policy->transition_task = NULL;
47712478cf0SSrivatsa S. Bhat 
47812478cf0SSrivatsa S. Bhat 	wake_up(&policy->transition_wait);
47912478cf0SSrivatsa S. Bhat }
48012478cf0SSrivatsa S. Bhat EXPORT_SYMBOL_GPL(cpufreq_freq_transition_end);
48112478cf0SSrivatsa S. Bhat 
4821da177e4SLinus Torvalds 
4831da177e4SLinus Torvalds /*********************************************************************
4841da177e4SLinus Torvalds  *                          SYSFS INTERFACE                          *
4851da177e4SLinus Torvalds  *********************************************************************/
4868a5c74a1SRashika Kheria static ssize_t show_boost(struct kobject *kobj,
4876f19efc0SLukasz Majewski 				 struct attribute *attr, char *buf)
4886f19efc0SLukasz Majewski {
4896f19efc0SLukasz Majewski 	return sprintf(buf, "%d\n", cpufreq_driver->boost_enabled);
4906f19efc0SLukasz Majewski }
4916f19efc0SLukasz Majewski 
4926f19efc0SLukasz Majewski static ssize_t store_boost(struct kobject *kobj, struct attribute *attr,
4936f19efc0SLukasz Majewski 				  const char *buf, size_t count)
4946f19efc0SLukasz Majewski {
4956f19efc0SLukasz Majewski 	int ret, enable;
4966f19efc0SLukasz Majewski 
4976f19efc0SLukasz Majewski 	ret = sscanf(buf, "%d", &enable);
4986f19efc0SLukasz Majewski 	if (ret != 1 || enable < 0 || enable > 1)
4996f19efc0SLukasz Majewski 		return -EINVAL;
5006f19efc0SLukasz Majewski 
5016f19efc0SLukasz Majewski 	if (cpufreq_boost_trigger_state(enable)) {
502e837f9b5SJoe Perches 		pr_err("%s: Cannot %s BOOST!\n",
503e837f9b5SJoe Perches 		       __func__, enable ? "enable" : "disable");
5046f19efc0SLukasz Majewski 		return -EINVAL;
5056f19efc0SLukasz Majewski 	}
5066f19efc0SLukasz Majewski 
507e837f9b5SJoe Perches 	pr_debug("%s: cpufreq BOOST %s\n",
508e837f9b5SJoe Perches 		 __func__, enable ? "enabled" : "disabled");
5096f19efc0SLukasz Majewski 
5106f19efc0SLukasz Majewski 	return count;
5116f19efc0SLukasz Majewski }
5126f19efc0SLukasz Majewski define_one_global_rw(boost);
5131da177e4SLinus Torvalds 
51442f91fa1SViresh Kumar static struct cpufreq_governor *find_governor(const char *str_governor)
5153bcb09a3SJeremy Fitzhardinge {
5163bcb09a3SJeremy Fitzhardinge 	struct cpufreq_governor *t;
5173bcb09a3SJeremy Fitzhardinge 
518f7b27061SViresh Kumar 	for_each_governor(t)
5197c4f4539SRasmus Villemoes 		if (!strncasecmp(str_governor, t->name, CPUFREQ_NAME_LEN))
5203bcb09a3SJeremy Fitzhardinge 			return t;
5213bcb09a3SJeremy Fitzhardinge 
5223bcb09a3SJeremy Fitzhardinge 	return NULL;
5233bcb09a3SJeremy Fitzhardinge }
5243bcb09a3SJeremy Fitzhardinge 
5251da177e4SLinus Torvalds /**
5261da177e4SLinus Torvalds  * cpufreq_parse_governor - parse a governor string
5271da177e4SLinus Torvalds  */
5281da177e4SLinus Torvalds static int cpufreq_parse_governor(char *str_governor, unsigned int *policy,
5291da177e4SLinus Torvalds 				struct cpufreq_governor **governor)
5301da177e4SLinus Torvalds {
5313bcb09a3SJeremy Fitzhardinge 	int err = -EINVAL;
5323bcb09a3SJeremy Fitzhardinge 
5331c3d85ddSRafael J. Wysocki 	if (!cpufreq_driver)
5343bcb09a3SJeremy Fitzhardinge 		goto out;
5353bcb09a3SJeremy Fitzhardinge 
5361c3d85ddSRafael J. Wysocki 	if (cpufreq_driver->setpolicy) {
5377c4f4539SRasmus Villemoes 		if (!strncasecmp(str_governor, "performance", CPUFREQ_NAME_LEN)) {
5381da177e4SLinus Torvalds 			*policy = CPUFREQ_POLICY_PERFORMANCE;
5393bcb09a3SJeremy Fitzhardinge 			err = 0;
5407c4f4539SRasmus Villemoes 		} else if (!strncasecmp(str_governor, "powersave",
541e08f5f5bSGautham R Shenoy 						CPUFREQ_NAME_LEN)) {
5421da177e4SLinus Torvalds 			*policy = CPUFREQ_POLICY_POWERSAVE;
5433bcb09a3SJeremy Fitzhardinge 			err = 0;
5441da177e4SLinus Torvalds 		}
5452e1cc3a5SViresh Kumar 	} else {
5461da177e4SLinus Torvalds 		struct cpufreq_governor *t;
5473bcb09a3SJeremy Fitzhardinge 
5483fc54d37Sakpm@osdl.org 		mutex_lock(&cpufreq_governor_mutex);
5493bcb09a3SJeremy Fitzhardinge 
55042f91fa1SViresh Kumar 		t = find_governor(str_governor);
5513bcb09a3SJeremy Fitzhardinge 
552ea714970SJeremy Fitzhardinge 		if (t == NULL) {
553ea714970SJeremy Fitzhardinge 			int ret;
554ea714970SJeremy Fitzhardinge 
555ea714970SJeremy Fitzhardinge 			mutex_unlock(&cpufreq_governor_mutex);
5561a8e1463SKees Cook 			ret = request_module("cpufreq_%s", str_governor);
557ea714970SJeremy Fitzhardinge 			mutex_lock(&cpufreq_governor_mutex);
558ea714970SJeremy Fitzhardinge 
559ea714970SJeremy Fitzhardinge 			if (ret == 0)
56042f91fa1SViresh Kumar 				t = find_governor(str_governor);
561ea714970SJeremy Fitzhardinge 		}
562ea714970SJeremy Fitzhardinge 
5633bcb09a3SJeremy Fitzhardinge 		if (t != NULL) {
5641da177e4SLinus Torvalds 			*governor = t;
5653bcb09a3SJeremy Fitzhardinge 			err = 0;
5661da177e4SLinus Torvalds 		}
5673bcb09a3SJeremy Fitzhardinge 
5683bcb09a3SJeremy Fitzhardinge 		mutex_unlock(&cpufreq_governor_mutex);
5691da177e4SLinus Torvalds 	}
5701da177e4SLinus Torvalds out:
5713bcb09a3SJeremy Fitzhardinge 	return err;
5721da177e4SLinus Torvalds }
5731da177e4SLinus Torvalds 
5741da177e4SLinus Torvalds /**
575e08f5f5bSGautham R Shenoy  * cpufreq_per_cpu_attr_read() / show_##file_name() -
576e08f5f5bSGautham R Shenoy  * print out cpufreq information
5771da177e4SLinus Torvalds  *
5781da177e4SLinus Torvalds  * Write out information from cpufreq_driver->policy[cpu]; object must be
5791da177e4SLinus Torvalds  * "unsigned int".
5801da177e4SLinus Torvalds  */
5811da177e4SLinus Torvalds 
5821da177e4SLinus Torvalds #define show_one(file_name, object)			\
5831da177e4SLinus Torvalds static ssize_t show_##file_name				\
5841da177e4SLinus Torvalds (struct cpufreq_policy *policy, char *buf)		\
5851da177e4SLinus Torvalds {							\
5861da177e4SLinus Torvalds 	return sprintf(buf, "%u\n", policy->object);	\
5871da177e4SLinus Torvalds }
5881da177e4SLinus Torvalds 
5891da177e4SLinus Torvalds show_one(cpuinfo_min_freq, cpuinfo.min_freq);
5901da177e4SLinus Torvalds show_one(cpuinfo_max_freq, cpuinfo.max_freq);
591ed129784SThomas Renninger show_one(cpuinfo_transition_latency, cpuinfo.transition_latency);
5921da177e4SLinus Torvalds show_one(scaling_min_freq, min);
5931da177e4SLinus Torvalds show_one(scaling_max_freq, max);
594c034b02eSDirk Brandewie 
59509347b29SViresh Kumar static ssize_t show_scaling_cur_freq(struct cpufreq_policy *policy, char *buf)
596c034b02eSDirk Brandewie {
597c034b02eSDirk Brandewie 	ssize_t ret;
598c034b02eSDirk Brandewie 
599c034b02eSDirk Brandewie 	if (cpufreq_driver && cpufreq_driver->setpolicy && cpufreq_driver->get)
600c034b02eSDirk Brandewie 		ret = sprintf(buf, "%u\n", cpufreq_driver->get(policy->cpu));
601c034b02eSDirk Brandewie 	else
602c034b02eSDirk Brandewie 		ret = sprintf(buf, "%u\n", policy->cur);
603c034b02eSDirk Brandewie 	return ret;
604c034b02eSDirk Brandewie }
6051da177e4SLinus Torvalds 
606037ce839SViresh Kumar static int cpufreq_set_policy(struct cpufreq_policy *policy,
6073a3e9e06SViresh Kumar 				struct cpufreq_policy *new_policy);
6087970e08bSThomas Renninger 
6091da177e4SLinus Torvalds /**
6101da177e4SLinus Torvalds  * cpufreq_per_cpu_attr_write() / store_##file_name() - sysfs write access
6111da177e4SLinus Torvalds  */
6121da177e4SLinus Torvalds #define store_one(file_name, object)			\
6131da177e4SLinus Torvalds static ssize_t store_##file_name					\
6141da177e4SLinus Torvalds (struct cpufreq_policy *policy, const char *buf, size_t count)		\
6151da177e4SLinus Torvalds {									\
616619c144cSVince Hsu 	int ret, temp;							\
6171da177e4SLinus Torvalds 	struct cpufreq_policy new_policy;				\
6181da177e4SLinus Torvalds 									\
6191da177e4SLinus Torvalds 	ret = cpufreq_get_policy(&new_policy, policy->cpu);		\
6201da177e4SLinus Torvalds 	if (ret)							\
6211da177e4SLinus Torvalds 		return -EINVAL;						\
6221da177e4SLinus Torvalds 									\
6231da177e4SLinus Torvalds 	ret = sscanf(buf, "%u", &new_policy.object);			\
6241da177e4SLinus Torvalds 	if (ret != 1)							\
6251da177e4SLinus Torvalds 		return -EINVAL;						\
6261da177e4SLinus Torvalds 									\
627619c144cSVince Hsu 	temp = new_policy.object;					\
628037ce839SViresh Kumar 	ret = cpufreq_set_policy(policy, &new_policy);		\
629619c144cSVince Hsu 	if (!ret)							\
630619c144cSVince Hsu 		policy->user_policy.object = temp;			\
6311da177e4SLinus Torvalds 									\
6321da177e4SLinus Torvalds 	return ret ? ret : count;					\
6331da177e4SLinus Torvalds }
6341da177e4SLinus Torvalds 
6351da177e4SLinus Torvalds store_one(scaling_min_freq, min);
6361da177e4SLinus Torvalds store_one(scaling_max_freq, max);
6371da177e4SLinus Torvalds 
6381da177e4SLinus Torvalds /**
6391da177e4SLinus Torvalds  * show_cpuinfo_cur_freq - current CPU frequency as detected by hardware
6401da177e4SLinus Torvalds  */
641e08f5f5bSGautham R Shenoy static ssize_t show_cpuinfo_cur_freq(struct cpufreq_policy *policy,
642e08f5f5bSGautham R Shenoy 					char *buf)
6431da177e4SLinus Torvalds {
644d92d50a4SViresh Kumar 	unsigned int cur_freq = __cpufreq_get(policy);
6451da177e4SLinus Torvalds 	if (!cur_freq)
6461da177e4SLinus Torvalds 		return sprintf(buf, "<unknown>");
6471da177e4SLinus Torvalds 	return sprintf(buf, "%u\n", cur_freq);
6481da177e4SLinus Torvalds }
6491da177e4SLinus Torvalds 
6501da177e4SLinus Torvalds /**
6511da177e4SLinus Torvalds  * show_scaling_governor - show the current policy for the specified CPU
6521da177e4SLinus Torvalds  */
653905d77cdSDave Jones static ssize_t show_scaling_governor(struct cpufreq_policy *policy, char *buf)
6541da177e4SLinus Torvalds {
6551da177e4SLinus Torvalds 	if (policy->policy == CPUFREQ_POLICY_POWERSAVE)
6561da177e4SLinus Torvalds 		return sprintf(buf, "powersave\n");
6571da177e4SLinus Torvalds 	else if (policy->policy == CPUFREQ_POLICY_PERFORMANCE)
6581da177e4SLinus Torvalds 		return sprintf(buf, "performance\n");
6591da177e4SLinus Torvalds 	else if (policy->governor)
6604b972f0bSviresh kumar 		return scnprintf(buf, CPUFREQ_NAME_PLEN, "%s\n",
66129464f28SDave Jones 				policy->governor->name);
6621da177e4SLinus Torvalds 	return -EINVAL;
6631da177e4SLinus Torvalds }
6641da177e4SLinus Torvalds 
6651da177e4SLinus Torvalds /**
6661da177e4SLinus Torvalds  * store_scaling_governor - store policy for the specified CPU
6671da177e4SLinus Torvalds  */
6681da177e4SLinus Torvalds static ssize_t store_scaling_governor(struct cpufreq_policy *policy,
6691da177e4SLinus Torvalds 					const char *buf, size_t count)
6701da177e4SLinus Torvalds {
6715136fa56SSrivatsa S. Bhat 	int ret;
6721da177e4SLinus Torvalds 	char	str_governor[16];
6731da177e4SLinus Torvalds 	struct cpufreq_policy new_policy;
6741da177e4SLinus Torvalds 
6751da177e4SLinus Torvalds 	ret = cpufreq_get_policy(&new_policy, policy->cpu);
6761da177e4SLinus Torvalds 	if (ret)
6771da177e4SLinus Torvalds 		return ret;
6781da177e4SLinus Torvalds 
6791da177e4SLinus Torvalds 	ret = sscanf(buf, "%15s", str_governor);
6801da177e4SLinus Torvalds 	if (ret != 1)
6811da177e4SLinus Torvalds 		return -EINVAL;
6821da177e4SLinus Torvalds 
683e08f5f5bSGautham R Shenoy 	if (cpufreq_parse_governor(str_governor, &new_policy.policy,
684e08f5f5bSGautham R Shenoy 						&new_policy.governor))
6851da177e4SLinus Torvalds 		return -EINVAL;
6861da177e4SLinus Torvalds 
687037ce839SViresh Kumar 	ret = cpufreq_set_policy(policy, &new_policy);
6887970e08bSThomas Renninger 
6897970e08bSThomas Renninger 	policy->user_policy.policy = policy->policy;
6907970e08bSThomas Renninger 	policy->user_policy.governor = policy->governor;
6917970e08bSThomas Renninger 
692e08f5f5bSGautham R Shenoy 	if (ret)
693e08f5f5bSGautham R Shenoy 		return ret;
694e08f5f5bSGautham R Shenoy 	else
695e08f5f5bSGautham R Shenoy 		return count;
6961da177e4SLinus Torvalds }
6971da177e4SLinus Torvalds 
6981da177e4SLinus Torvalds /**
6991da177e4SLinus Torvalds  * show_scaling_driver - show the cpufreq driver currently loaded
7001da177e4SLinus Torvalds  */
7011da177e4SLinus Torvalds static ssize_t show_scaling_driver(struct cpufreq_policy *policy, char *buf)
7021da177e4SLinus Torvalds {
7031c3d85ddSRafael J. Wysocki 	return scnprintf(buf, CPUFREQ_NAME_PLEN, "%s\n", cpufreq_driver->name);
7041da177e4SLinus Torvalds }
7051da177e4SLinus Torvalds 
7061da177e4SLinus Torvalds /**
7071da177e4SLinus Torvalds  * show_scaling_available_governors - show the available CPUfreq governors
7081da177e4SLinus Torvalds  */
7091da177e4SLinus Torvalds static ssize_t show_scaling_available_governors(struct cpufreq_policy *policy,
7101da177e4SLinus Torvalds 						char *buf)
7111da177e4SLinus Torvalds {
7121da177e4SLinus Torvalds 	ssize_t i = 0;
7131da177e4SLinus Torvalds 	struct cpufreq_governor *t;
7141da177e4SLinus Torvalds 
7159c0ebcf7SViresh Kumar 	if (!has_target()) {
7161da177e4SLinus Torvalds 		i += sprintf(buf, "performance powersave");
7171da177e4SLinus Torvalds 		goto out;
7181da177e4SLinus Torvalds 	}
7191da177e4SLinus Torvalds 
720f7b27061SViresh Kumar 	for_each_governor(t) {
72129464f28SDave Jones 		if (i >= (ssize_t) ((PAGE_SIZE / sizeof(char))
72229464f28SDave Jones 		    - (CPUFREQ_NAME_LEN + 2)))
7231da177e4SLinus Torvalds 			goto out;
7244b972f0bSviresh kumar 		i += scnprintf(&buf[i], CPUFREQ_NAME_PLEN, "%s ", t->name);
7251da177e4SLinus Torvalds 	}
7261da177e4SLinus Torvalds out:
7271da177e4SLinus Torvalds 	i += sprintf(&buf[i], "\n");
7281da177e4SLinus Torvalds 	return i;
7291da177e4SLinus Torvalds }
730e8628dd0SDarrick J. Wong 
731f4fd3797SLan Tianyu ssize_t cpufreq_show_cpus(const struct cpumask *mask, char *buf)
7321da177e4SLinus Torvalds {
7331da177e4SLinus Torvalds 	ssize_t i = 0;
7341da177e4SLinus Torvalds 	unsigned int cpu;
7351da177e4SLinus Torvalds 
736835481d9SRusty Russell 	for_each_cpu(cpu, mask) {
7371da177e4SLinus Torvalds 		if (i)
7381da177e4SLinus Torvalds 			i += scnprintf(&buf[i], (PAGE_SIZE - i - 2), " ");
7391da177e4SLinus Torvalds 		i += scnprintf(&buf[i], (PAGE_SIZE - i - 2), "%u", cpu);
7401da177e4SLinus Torvalds 		if (i >= (PAGE_SIZE - 5))
7411da177e4SLinus Torvalds 			break;
7421da177e4SLinus Torvalds 	}
7431da177e4SLinus Torvalds 	i += sprintf(&buf[i], "\n");
7441da177e4SLinus Torvalds 	return i;
7451da177e4SLinus Torvalds }
746f4fd3797SLan Tianyu EXPORT_SYMBOL_GPL(cpufreq_show_cpus);
7471da177e4SLinus Torvalds 
748e8628dd0SDarrick J. Wong /**
749e8628dd0SDarrick J. Wong  * show_related_cpus - show the CPUs affected by each transition even if
750e8628dd0SDarrick J. Wong  * hw coordination is in use
751e8628dd0SDarrick J. Wong  */
752e8628dd0SDarrick J. Wong static ssize_t show_related_cpus(struct cpufreq_policy *policy, char *buf)
753e8628dd0SDarrick J. Wong {
754f4fd3797SLan Tianyu 	return cpufreq_show_cpus(policy->related_cpus, buf);
755e8628dd0SDarrick J. Wong }
756e8628dd0SDarrick J. Wong 
757e8628dd0SDarrick J. Wong /**
758e8628dd0SDarrick J. Wong  * show_affected_cpus - show the CPUs affected by each transition
759e8628dd0SDarrick J. Wong  */
760e8628dd0SDarrick J. Wong static ssize_t show_affected_cpus(struct cpufreq_policy *policy, char *buf)
761e8628dd0SDarrick J. Wong {
762f4fd3797SLan Tianyu 	return cpufreq_show_cpus(policy->cpus, buf);
763e8628dd0SDarrick J. Wong }
764e8628dd0SDarrick J. Wong 
7659e76988eSVenki Pallipadi static ssize_t store_scaling_setspeed(struct cpufreq_policy *policy,
7669e76988eSVenki Pallipadi 					const char *buf, size_t count)
7679e76988eSVenki Pallipadi {
7689e76988eSVenki Pallipadi 	unsigned int freq = 0;
7699e76988eSVenki Pallipadi 	unsigned int ret;
7709e76988eSVenki Pallipadi 
771879000f9SCHIKAMA masaki 	if (!policy->governor || !policy->governor->store_setspeed)
7729e76988eSVenki Pallipadi 		return -EINVAL;
7739e76988eSVenki Pallipadi 
7749e76988eSVenki Pallipadi 	ret = sscanf(buf, "%u", &freq);
7759e76988eSVenki Pallipadi 	if (ret != 1)
7769e76988eSVenki Pallipadi 		return -EINVAL;
7779e76988eSVenki Pallipadi 
7789e76988eSVenki Pallipadi 	policy->governor->store_setspeed(policy, freq);
7799e76988eSVenki Pallipadi 
7809e76988eSVenki Pallipadi 	return count;
7819e76988eSVenki Pallipadi }
7829e76988eSVenki Pallipadi 
7839e76988eSVenki Pallipadi static ssize_t show_scaling_setspeed(struct cpufreq_policy *policy, char *buf)
7849e76988eSVenki Pallipadi {
785879000f9SCHIKAMA masaki 	if (!policy->governor || !policy->governor->show_setspeed)
7869e76988eSVenki Pallipadi 		return sprintf(buf, "<unsupported>\n");
7879e76988eSVenki Pallipadi 
7889e76988eSVenki Pallipadi 	return policy->governor->show_setspeed(policy, buf);
7899e76988eSVenki Pallipadi }
7901da177e4SLinus Torvalds 
791e2f74f35SThomas Renninger /**
7928bf1ac72Sviresh kumar  * show_bios_limit - show the current cpufreq HW/BIOS limitation
793e2f74f35SThomas Renninger  */
794e2f74f35SThomas Renninger static ssize_t show_bios_limit(struct cpufreq_policy *policy, char *buf)
795e2f74f35SThomas Renninger {
796e2f74f35SThomas Renninger 	unsigned int limit;
797e2f74f35SThomas Renninger 	int ret;
7981c3d85ddSRafael J. Wysocki 	if (cpufreq_driver->bios_limit) {
7991c3d85ddSRafael J. Wysocki 		ret = cpufreq_driver->bios_limit(policy->cpu, &limit);
800e2f74f35SThomas Renninger 		if (!ret)
801e2f74f35SThomas Renninger 			return sprintf(buf, "%u\n", limit);
802e2f74f35SThomas Renninger 	}
803e2f74f35SThomas Renninger 	return sprintf(buf, "%u\n", policy->cpuinfo.max_freq);
804e2f74f35SThomas Renninger }
805e2f74f35SThomas Renninger 
8066dad2a29SBorislav Petkov cpufreq_freq_attr_ro_perm(cpuinfo_cur_freq, 0400);
8076dad2a29SBorislav Petkov cpufreq_freq_attr_ro(cpuinfo_min_freq);
8086dad2a29SBorislav Petkov cpufreq_freq_attr_ro(cpuinfo_max_freq);
8096dad2a29SBorislav Petkov cpufreq_freq_attr_ro(cpuinfo_transition_latency);
8106dad2a29SBorislav Petkov cpufreq_freq_attr_ro(scaling_available_governors);
8116dad2a29SBorislav Petkov cpufreq_freq_attr_ro(scaling_driver);
8126dad2a29SBorislav Petkov cpufreq_freq_attr_ro(scaling_cur_freq);
8136dad2a29SBorislav Petkov cpufreq_freq_attr_ro(bios_limit);
8146dad2a29SBorislav Petkov cpufreq_freq_attr_ro(related_cpus);
8156dad2a29SBorislav Petkov cpufreq_freq_attr_ro(affected_cpus);
8166dad2a29SBorislav Petkov cpufreq_freq_attr_rw(scaling_min_freq);
8176dad2a29SBorislav Petkov cpufreq_freq_attr_rw(scaling_max_freq);
8186dad2a29SBorislav Petkov cpufreq_freq_attr_rw(scaling_governor);
8196dad2a29SBorislav Petkov cpufreq_freq_attr_rw(scaling_setspeed);
8201da177e4SLinus Torvalds 
8211da177e4SLinus Torvalds static struct attribute *default_attrs[] = {
8221da177e4SLinus Torvalds 	&cpuinfo_min_freq.attr,
8231da177e4SLinus Torvalds 	&cpuinfo_max_freq.attr,
824ed129784SThomas Renninger 	&cpuinfo_transition_latency.attr,
8251da177e4SLinus Torvalds 	&scaling_min_freq.attr,
8261da177e4SLinus Torvalds 	&scaling_max_freq.attr,
8271da177e4SLinus Torvalds 	&affected_cpus.attr,
828e8628dd0SDarrick J. Wong 	&related_cpus.attr,
8291da177e4SLinus Torvalds 	&scaling_governor.attr,
8301da177e4SLinus Torvalds 	&scaling_driver.attr,
8311da177e4SLinus Torvalds 	&scaling_available_governors.attr,
8329e76988eSVenki Pallipadi 	&scaling_setspeed.attr,
8331da177e4SLinus Torvalds 	NULL
8341da177e4SLinus Torvalds };
8351da177e4SLinus Torvalds 
8361da177e4SLinus Torvalds #define to_policy(k) container_of(k, struct cpufreq_policy, kobj)
8371da177e4SLinus Torvalds #define to_attr(a) container_of(a, struct freq_attr, attr)
8381da177e4SLinus Torvalds 
8391da177e4SLinus Torvalds static ssize_t show(struct kobject *kobj, struct attribute *attr, char *buf)
8401da177e4SLinus Torvalds {
8411da177e4SLinus Torvalds 	struct cpufreq_policy *policy = to_policy(kobj);
8421da177e4SLinus Torvalds 	struct freq_attr *fattr = to_attr(attr);
8431b750e3bSViresh Kumar 	ssize_t ret;
8446eed9404SViresh Kumar 
8456eed9404SViresh Kumar 	if (!down_read_trylock(&cpufreq_rwsem))
8461b750e3bSViresh Kumar 		return -EINVAL;
8475a01f2e8SVenkatesh Pallipadi 
848ad7722daSviresh kumar 	down_read(&policy->rwsem);
8495a01f2e8SVenkatesh Pallipadi 
850e08f5f5bSGautham R Shenoy 	if (fattr->show)
851e08f5f5bSGautham R Shenoy 		ret = fattr->show(policy, buf);
852e08f5f5bSGautham R Shenoy 	else
853e08f5f5bSGautham R Shenoy 		ret = -EIO;
854e08f5f5bSGautham R Shenoy 
855ad7722daSviresh kumar 	up_read(&policy->rwsem);
8566eed9404SViresh Kumar 	up_read(&cpufreq_rwsem);
8571b750e3bSViresh Kumar 
8581da177e4SLinus Torvalds 	return ret;
8591da177e4SLinus Torvalds }
8601da177e4SLinus Torvalds 
8611da177e4SLinus Torvalds static ssize_t store(struct kobject *kobj, struct attribute *attr,
8621da177e4SLinus Torvalds 		     const char *buf, size_t count)
8631da177e4SLinus Torvalds {
8641da177e4SLinus Torvalds 	struct cpufreq_policy *policy = to_policy(kobj);
8651da177e4SLinus Torvalds 	struct freq_attr *fattr = to_attr(attr);
866a07530b4SDave Jones 	ssize_t ret = -EINVAL;
8676eed9404SViresh Kumar 
8684f750c93SSrivatsa S. Bhat 	get_online_cpus();
8694f750c93SSrivatsa S. Bhat 
8704f750c93SSrivatsa S. Bhat 	if (!cpu_online(policy->cpu))
8714f750c93SSrivatsa S. Bhat 		goto unlock;
8724f750c93SSrivatsa S. Bhat 
8736eed9404SViresh Kumar 	if (!down_read_trylock(&cpufreq_rwsem))
8744f750c93SSrivatsa S. Bhat 		goto unlock;
8755a01f2e8SVenkatesh Pallipadi 
876ad7722daSviresh kumar 	down_write(&policy->rwsem);
8775a01f2e8SVenkatesh Pallipadi 
87811e584cfSViresh Kumar 	/* Updating inactive policies is invalid, so avoid doing that. */
87911e584cfSViresh Kumar 	if (unlikely(policy_is_inactive(policy))) {
88011e584cfSViresh Kumar 		ret = -EBUSY;
88111e584cfSViresh Kumar 		goto unlock_policy_rwsem;
88211e584cfSViresh Kumar 	}
88311e584cfSViresh Kumar 
884e08f5f5bSGautham R Shenoy 	if (fattr->store)
885e08f5f5bSGautham R Shenoy 		ret = fattr->store(policy, buf, count);
886e08f5f5bSGautham R Shenoy 	else
887e08f5f5bSGautham R Shenoy 		ret = -EIO;
888e08f5f5bSGautham R Shenoy 
88911e584cfSViresh Kumar unlock_policy_rwsem:
890ad7722daSviresh kumar 	up_write(&policy->rwsem);
8916eed9404SViresh Kumar 
8926eed9404SViresh Kumar 	up_read(&cpufreq_rwsem);
8934f750c93SSrivatsa S. Bhat unlock:
8944f750c93SSrivatsa S. Bhat 	put_online_cpus();
8954f750c93SSrivatsa S. Bhat 
8961da177e4SLinus Torvalds 	return ret;
8971da177e4SLinus Torvalds }
8981da177e4SLinus Torvalds 
8991da177e4SLinus Torvalds static void cpufreq_sysfs_release(struct kobject *kobj)
9001da177e4SLinus Torvalds {
9011da177e4SLinus Torvalds 	struct cpufreq_policy *policy = to_policy(kobj);
9022d06d8c4SDominik Brodowski 	pr_debug("last reference is dropped\n");
9031da177e4SLinus Torvalds 	complete(&policy->kobj_unregister);
9041da177e4SLinus Torvalds }
9051da177e4SLinus Torvalds 
90652cf25d0SEmese Revfy static const struct sysfs_ops sysfs_ops = {
9071da177e4SLinus Torvalds 	.show	= show,
9081da177e4SLinus Torvalds 	.store	= store,
9091da177e4SLinus Torvalds };
9101da177e4SLinus Torvalds 
9111da177e4SLinus Torvalds static struct kobj_type ktype_cpufreq = {
9121da177e4SLinus Torvalds 	.sysfs_ops	= &sysfs_ops,
9131da177e4SLinus Torvalds 	.default_attrs	= default_attrs,
9141da177e4SLinus Torvalds 	.release	= cpufreq_sysfs_release,
9151da177e4SLinus Torvalds };
9161da177e4SLinus Torvalds 
9172361be23SViresh Kumar struct kobject *cpufreq_global_kobject;
9182361be23SViresh Kumar EXPORT_SYMBOL(cpufreq_global_kobject);
9192361be23SViresh Kumar 
9202361be23SViresh Kumar static int cpufreq_global_kobject_usage;
9212361be23SViresh Kumar 
9222361be23SViresh Kumar int cpufreq_get_global_kobject(void)
9232361be23SViresh Kumar {
9242361be23SViresh Kumar 	if (!cpufreq_global_kobject_usage++)
9252361be23SViresh Kumar 		return kobject_add(cpufreq_global_kobject,
9262361be23SViresh Kumar 				&cpu_subsys.dev_root->kobj, "%s", "cpufreq");
9272361be23SViresh Kumar 
9282361be23SViresh Kumar 	return 0;
9292361be23SViresh Kumar }
9302361be23SViresh Kumar EXPORT_SYMBOL(cpufreq_get_global_kobject);
9312361be23SViresh Kumar 
9322361be23SViresh Kumar void cpufreq_put_global_kobject(void)
9332361be23SViresh Kumar {
9342361be23SViresh Kumar 	if (!--cpufreq_global_kobject_usage)
9352361be23SViresh Kumar 		kobject_del(cpufreq_global_kobject);
9362361be23SViresh Kumar }
9372361be23SViresh Kumar EXPORT_SYMBOL(cpufreq_put_global_kobject);
9382361be23SViresh Kumar 
9392361be23SViresh Kumar int cpufreq_sysfs_create_file(const struct attribute *attr)
9402361be23SViresh Kumar {
9412361be23SViresh Kumar 	int ret = cpufreq_get_global_kobject();
9422361be23SViresh Kumar 
9432361be23SViresh Kumar 	if (!ret) {
9442361be23SViresh Kumar 		ret = sysfs_create_file(cpufreq_global_kobject, attr);
9452361be23SViresh Kumar 		if (ret)
9462361be23SViresh Kumar 			cpufreq_put_global_kobject();
9472361be23SViresh Kumar 	}
9482361be23SViresh Kumar 
9492361be23SViresh Kumar 	return ret;
9502361be23SViresh Kumar }
9512361be23SViresh Kumar EXPORT_SYMBOL(cpufreq_sysfs_create_file);
9522361be23SViresh Kumar 
9532361be23SViresh Kumar void cpufreq_sysfs_remove_file(const struct attribute *attr)
9542361be23SViresh Kumar {
9552361be23SViresh Kumar 	sysfs_remove_file(cpufreq_global_kobject, attr);
9562361be23SViresh Kumar 	cpufreq_put_global_kobject();
9572361be23SViresh Kumar }
9582361be23SViresh Kumar EXPORT_SYMBOL(cpufreq_sysfs_remove_file);
9592361be23SViresh Kumar 
96087549141SViresh Kumar static int add_cpu_dev_symlink(struct cpufreq_policy *policy, int cpu)
96187549141SViresh Kumar {
96287549141SViresh Kumar 	struct device *cpu_dev;
96387549141SViresh Kumar 
96487549141SViresh Kumar 	pr_debug("%s: Adding symlink for CPU: %u\n", __func__, cpu);
96587549141SViresh Kumar 
96687549141SViresh Kumar 	if (!policy)
96787549141SViresh Kumar 		return 0;
96887549141SViresh Kumar 
96987549141SViresh Kumar 	cpu_dev = get_cpu_device(cpu);
97087549141SViresh Kumar 	if (WARN_ON(!cpu_dev))
97187549141SViresh Kumar 		return 0;
97287549141SViresh Kumar 
97387549141SViresh Kumar 	return sysfs_create_link(&cpu_dev->kobj, &policy->kobj, "cpufreq");
97487549141SViresh Kumar }
97587549141SViresh Kumar 
97687549141SViresh Kumar static void remove_cpu_dev_symlink(struct cpufreq_policy *policy, int cpu)
97787549141SViresh Kumar {
97887549141SViresh Kumar 	struct device *cpu_dev;
97987549141SViresh Kumar 
98087549141SViresh Kumar 	pr_debug("%s: Removing symlink for CPU: %u\n", __func__, cpu);
98187549141SViresh Kumar 
98287549141SViresh Kumar 	cpu_dev = get_cpu_device(cpu);
98387549141SViresh Kumar 	if (WARN_ON(!cpu_dev))
98487549141SViresh Kumar 		return;
98587549141SViresh Kumar 
98687549141SViresh Kumar 	sysfs_remove_link(&cpu_dev->kobj, "cpufreq");
98787549141SViresh Kumar }
98887549141SViresh Kumar 
98987549141SViresh Kumar /* Add/remove symlinks for all related CPUs */
990308b60e7SViresh Kumar static int cpufreq_add_dev_symlink(struct cpufreq_policy *policy)
99119d6f7ecSDave Jones {
99219d6f7ecSDave Jones 	unsigned int j;
99319d6f7ecSDave Jones 	int ret = 0;
99419d6f7ecSDave Jones 
99587549141SViresh Kumar 	/* Some related CPUs might not be present (physically hotplugged) */
99687549141SViresh Kumar 	for_each_cpu_and(j, policy->related_cpus, cpu_present_mask) {
9979d16f207SSaravana Kannan 		if (j == policy->kobj_cpu)
99819d6f7ecSDave Jones 			continue;
99919d6f7ecSDave Jones 
100087549141SViresh Kumar 		ret = add_cpu_dev_symlink(policy, j);
100171c3461eSRafael J. Wysocki 		if (ret)
100271c3461eSRafael J. Wysocki 			break;
100319d6f7ecSDave Jones 	}
100487549141SViresh Kumar 
100519d6f7ecSDave Jones 	return ret;
100619d6f7ecSDave Jones }
100719d6f7ecSDave Jones 
100887549141SViresh Kumar static void cpufreq_remove_dev_symlink(struct cpufreq_policy *policy)
100987549141SViresh Kumar {
101087549141SViresh Kumar 	unsigned int j;
101187549141SViresh Kumar 
101287549141SViresh Kumar 	/* Some related CPUs might not be present (physically hotplugged) */
101387549141SViresh Kumar 	for_each_cpu_and(j, policy->related_cpus, cpu_present_mask) {
101487549141SViresh Kumar 		if (j == policy->kobj_cpu)
101587549141SViresh Kumar 			continue;
101687549141SViresh Kumar 
101787549141SViresh Kumar 		remove_cpu_dev_symlink(policy, j);
101887549141SViresh Kumar 	}
101987549141SViresh Kumar }
102087549141SViresh Kumar 
1021308b60e7SViresh Kumar static int cpufreq_add_dev_interface(struct cpufreq_policy *policy,
10228a25a2fdSKay Sievers 				     struct device *dev)
1023909a694eSDave Jones {
1024909a694eSDave Jones 	struct freq_attr **drv_attr;
1025909a694eSDave Jones 	int ret = 0;
1026909a694eSDave Jones 
1027909a694eSDave Jones 	/* set up files for this cpu device */
10281c3d85ddSRafael J. Wysocki 	drv_attr = cpufreq_driver->attr;
1029f13f1184SViresh Kumar 	while (drv_attr && *drv_attr) {
1030909a694eSDave Jones 		ret = sysfs_create_file(&policy->kobj, &((*drv_attr)->attr));
1031909a694eSDave Jones 		if (ret)
10326d4e81edSTomeu Vizoso 			return ret;
1033909a694eSDave Jones 		drv_attr++;
1034909a694eSDave Jones 	}
10351c3d85ddSRafael J. Wysocki 	if (cpufreq_driver->get) {
1036909a694eSDave Jones 		ret = sysfs_create_file(&policy->kobj, &cpuinfo_cur_freq.attr);
1037909a694eSDave Jones 		if (ret)
10386d4e81edSTomeu Vizoso 			return ret;
1039909a694eSDave Jones 	}
1040c034b02eSDirk Brandewie 
1041909a694eSDave Jones 	ret = sysfs_create_file(&policy->kobj, &scaling_cur_freq.attr);
1042909a694eSDave Jones 	if (ret)
10436d4e81edSTomeu Vizoso 		return ret;
1044c034b02eSDirk Brandewie 
10451c3d85ddSRafael J. Wysocki 	if (cpufreq_driver->bios_limit) {
1046e2f74f35SThomas Renninger 		ret = sysfs_create_file(&policy->kobj, &bios_limit.attr);
1047e2f74f35SThomas Renninger 		if (ret)
10486d4e81edSTomeu Vizoso 			return ret;
1049e2f74f35SThomas Renninger 	}
1050909a694eSDave Jones 
10516d4e81edSTomeu Vizoso 	return cpufreq_add_dev_symlink(policy);
1052e18f1682SSrivatsa S. Bhat }
1053e18f1682SSrivatsa S. Bhat 
1054e18f1682SSrivatsa S. Bhat static void cpufreq_init_policy(struct cpufreq_policy *policy)
1055e18f1682SSrivatsa S. Bhat {
10566e2c89d1Sviresh kumar 	struct cpufreq_governor *gov = NULL;
1057e18f1682SSrivatsa S. Bhat 	struct cpufreq_policy new_policy;
1058e18f1682SSrivatsa S. Bhat 	int ret = 0;
1059e18f1682SSrivatsa S. Bhat 
1060d5b73cd8SViresh Kumar 	memcpy(&new_policy, policy, sizeof(*policy));
1061a27a9ab7SJason Baron 
10626e2c89d1Sviresh kumar 	/* Update governor of new_policy to the governor used before hotplug */
10634573237bSViresh Kumar 	gov = find_governor(policy->last_governor);
10646e2c89d1Sviresh kumar 	if (gov)
10656e2c89d1Sviresh kumar 		pr_debug("Restoring governor %s for cpu %d\n",
10666e2c89d1Sviresh kumar 				policy->governor->name, policy->cpu);
10676e2c89d1Sviresh kumar 	else
10686e2c89d1Sviresh kumar 		gov = CPUFREQ_DEFAULT_GOVERNOR;
10696e2c89d1Sviresh kumar 
10706e2c89d1Sviresh kumar 	new_policy.governor = gov;
10716e2c89d1Sviresh kumar 
1072a27a9ab7SJason Baron 	/* Use the default policy if its valid. */
1073a27a9ab7SJason Baron 	if (cpufreq_driver->setpolicy)
10746e2c89d1Sviresh kumar 		cpufreq_parse_governor(gov->name, &new_policy.policy, NULL);
1075ecf7e461SDave Jones 
1076ecf7e461SDave Jones 	/* set default policy */
1077037ce839SViresh Kumar 	ret = cpufreq_set_policy(policy, &new_policy);
1078ecf7e461SDave Jones 	if (ret) {
10792d06d8c4SDominik Brodowski 		pr_debug("setting policy failed\n");
10801c3d85ddSRafael J. Wysocki 		if (cpufreq_driver->exit)
10811c3d85ddSRafael J. Wysocki 			cpufreq_driver->exit(policy);
1082ecf7e461SDave Jones 	}
1083909a694eSDave Jones }
1084909a694eSDave Jones 
1085d8d3b471SViresh Kumar static int cpufreq_add_policy_cpu(struct cpufreq_policy *policy,
108642f921a6SViresh Kumar 				  unsigned int cpu, struct device *dev)
1087fcf80582SViresh Kumar {
10889c0ebcf7SViresh Kumar 	int ret = 0;
1089fcf80582SViresh Kumar 
1090bb29ae15SViresh Kumar 	/* Has this CPU been taken care of already? */
1091bb29ae15SViresh Kumar 	if (cpumask_test_cpu(cpu, policy->cpus))
1092bb29ae15SViresh Kumar 		return 0;
1093bb29ae15SViresh Kumar 
10949c0ebcf7SViresh Kumar 	if (has_target()) {
10953de9bdebSViresh Kumar 		ret = __cpufreq_governor(policy, CPUFREQ_GOV_STOP);
10963de9bdebSViresh Kumar 		if (ret) {
10973de9bdebSViresh Kumar 			pr_err("%s: Failed to stop governor\n", __func__);
10983de9bdebSViresh Kumar 			return ret;
10993de9bdebSViresh Kumar 		}
11003de9bdebSViresh Kumar 	}
1101fcf80582SViresh Kumar 
1102ad7722daSviresh kumar 	down_write(&policy->rwsem);
1103fcf80582SViresh Kumar 	cpumask_set_cpu(cpu, policy->cpus);
1104ad7722daSviresh kumar 	up_write(&policy->rwsem);
11052eaa3e2dSViresh Kumar 
11069c0ebcf7SViresh Kumar 	if (has_target()) {
1107e5c87b76SStratos Karafotis 		ret = __cpufreq_governor(policy, CPUFREQ_GOV_START);
1108e5c87b76SStratos Karafotis 		if (!ret)
1109e5c87b76SStratos Karafotis 			ret = __cpufreq_governor(policy, CPUFREQ_GOV_LIMITS);
1110e5c87b76SStratos Karafotis 
1111e5c87b76SStratos Karafotis 		if (ret) {
11123de9bdebSViresh Kumar 			pr_err("%s: Failed to start governor\n", __func__);
11133de9bdebSViresh Kumar 			return ret;
11143de9bdebSViresh Kumar 		}
1115820c6ca2SViresh Kumar 	}
1116fcf80582SViresh Kumar 
111787549141SViresh Kumar 	return 0;
1118fcf80582SViresh Kumar }
11191da177e4SLinus Torvalds 
11208414809cSSrivatsa S. Bhat static struct cpufreq_policy *cpufreq_policy_restore(unsigned int cpu)
11218414809cSSrivatsa S. Bhat {
11228414809cSSrivatsa S. Bhat 	struct cpufreq_policy *policy;
11238414809cSSrivatsa S. Bhat 	unsigned long flags;
11248414809cSSrivatsa S. Bhat 
112544871c9cSLan Tianyu 	read_lock_irqsave(&cpufreq_driver_lock, flags);
11263914d379SViresh Kumar 	policy = per_cpu(cpufreq_cpu_data, cpu);
112744871c9cSLan Tianyu 	read_unlock_irqrestore(&cpufreq_driver_lock, flags);
11288414809cSSrivatsa S. Bhat 
11293914d379SViresh Kumar 	if (likely(policy)) {
11303914d379SViresh Kumar 		/* Policy should be inactive here */
11313914d379SViresh Kumar 		WARN_ON(!policy_is_inactive(policy));
11323914d379SViresh Kumar 	}
11336e2c89d1Sviresh kumar 
11348414809cSSrivatsa S. Bhat 	return policy;
11358414809cSSrivatsa S. Bhat }
11368414809cSSrivatsa S. Bhat 
11372fc3384dSViresh Kumar static struct cpufreq_policy *cpufreq_policy_alloc(struct device *dev)
1138e9698cc5SSrivatsa S. Bhat {
1139e9698cc5SSrivatsa S. Bhat 	struct cpufreq_policy *policy;
11402fc3384dSViresh Kumar 	int ret;
1141e9698cc5SSrivatsa S. Bhat 
1142e9698cc5SSrivatsa S. Bhat 	policy = kzalloc(sizeof(*policy), GFP_KERNEL);
1143e9698cc5SSrivatsa S. Bhat 	if (!policy)
1144e9698cc5SSrivatsa S. Bhat 		return NULL;
1145e9698cc5SSrivatsa S. Bhat 
1146e9698cc5SSrivatsa S. Bhat 	if (!alloc_cpumask_var(&policy->cpus, GFP_KERNEL))
1147e9698cc5SSrivatsa S. Bhat 		goto err_free_policy;
1148e9698cc5SSrivatsa S. Bhat 
1149e9698cc5SSrivatsa S. Bhat 	if (!zalloc_cpumask_var(&policy->related_cpus, GFP_KERNEL))
1150e9698cc5SSrivatsa S. Bhat 		goto err_free_cpumask;
1151e9698cc5SSrivatsa S. Bhat 
11522fc3384dSViresh Kumar 	ret = kobject_init_and_add(&policy->kobj, &ktype_cpufreq, &dev->kobj,
11532fc3384dSViresh Kumar 				   "cpufreq");
11542fc3384dSViresh Kumar 	if (ret) {
11552fc3384dSViresh Kumar 		pr_err("%s: failed to init policy->kobj: %d\n", __func__, ret);
11562fc3384dSViresh Kumar 		goto err_free_rcpumask;
11572fc3384dSViresh Kumar 	}
11582fc3384dSViresh Kumar 
1159c88a1f8bSLukasz Majewski 	INIT_LIST_HEAD(&policy->policy_list);
1160ad7722daSviresh kumar 	init_rwsem(&policy->rwsem);
116112478cf0SSrivatsa S. Bhat 	spin_lock_init(&policy->transition_lock);
116212478cf0SSrivatsa S. Bhat 	init_waitqueue_head(&policy->transition_wait);
1163818c5712SViresh Kumar 	init_completion(&policy->kobj_unregister);
1164818c5712SViresh Kumar 	INIT_WORK(&policy->update, handle_update);
1165ad7722daSviresh kumar 
11662fc3384dSViresh Kumar 	policy->cpu = dev->id;
116787549141SViresh Kumar 
116887549141SViresh Kumar 	/* Set this once on allocation */
11692fc3384dSViresh Kumar 	policy->kobj_cpu = dev->id;
117087549141SViresh Kumar 
1171e9698cc5SSrivatsa S. Bhat 	return policy;
1172e9698cc5SSrivatsa S. Bhat 
11732fc3384dSViresh Kumar err_free_rcpumask:
11742fc3384dSViresh Kumar 	free_cpumask_var(policy->related_cpus);
1175e9698cc5SSrivatsa S. Bhat err_free_cpumask:
1176e9698cc5SSrivatsa S. Bhat 	free_cpumask_var(policy->cpus);
1177e9698cc5SSrivatsa S. Bhat err_free_policy:
1178e9698cc5SSrivatsa S. Bhat 	kfree(policy);
1179e9698cc5SSrivatsa S. Bhat 
1180e9698cc5SSrivatsa S. Bhat 	return NULL;
1181e9698cc5SSrivatsa S. Bhat }
1182e9698cc5SSrivatsa S. Bhat 
11832fc3384dSViresh Kumar static void cpufreq_policy_put_kobj(struct cpufreq_policy *policy, bool notify)
118442f921a6SViresh Kumar {
118542f921a6SViresh Kumar 	struct kobject *kobj;
118642f921a6SViresh Kumar 	struct completion *cmp;
118742f921a6SViresh Kumar 
11882fc3384dSViresh Kumar 	if (notify)
1189fcd7af91SViresh Kumar 		blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
1190fcd7af91SViresh Kumar 					     CPUFREQ_REMOVE_POLICY, policy);
1191fcd7af91SViresh Kumar 
119287549141SViresh Kumar 	down_write(&policy->rwsem);
119387549141SViresh Kumar 	cpufreq_remove_dev_symlink(policy);
119442f921a6SViresh Kumar 	kobj = &policy->kobj;
119542f921a6SViresh Kumar 	cmp = &policy->kobj_unregister;
119687549141SViresh Kumar 	up_write(&policy->rwsem);
119742f921a6SViresh Kumar 	kobject_put(kobj);
119842f921a6SViresh Kumar 
119942f921a6SViresh Kumar 	/*
120042f921a6SViresh Kumar 	 * We need to make sure that the underlying kobj is
120142f921a6SViresh Kumar 	 * actually not referenced anymore by anybody before we
120242f921a6SViresh Kumar 	 * proceed with unloading.
120342f921a6SViresh Kumar 	 */
120442f921a6SViresh Kumar 	pr_debug("waiting for dropping of refcount\n");
120542f921a6SViresh Kumar 	wait_for_completion(cmp);
120642f921a6SViresh Kumar 	pr_debug("wait complete\n");
120742f921a6SViresh Kumar }
120842f921a6SViresh Kumar 
12093654c5ccSViresh Kumar static void cpufreq_policy_free(struct cpufreq_policy *policy, bool notify)
1210e9698cc5SSrivatsa S. Bhat {
1211988bed09SViresh Kumar 	unsigned long flags;
1212988bed09SViresh Kumar 	int cpu;
1213988bed09SViresh Kumar 
1214988bed09SViresh Kumar 	/* Remove policy from list */
1215988bed09SViresh Kumar 	write_lock_irqsave(&cpufreq_driver_lock, flags);
1216988bed09SViresh Kumar 	list_del(&policy->policy_list);
1217988bed09SViresh Kumar 
1218988bed09SViresh Kumar 	for_each_cpu(cpu, policy->related_cpus)
1219988bed09SViresh Kumar 		per_cpu(cpufreq_cpu_data, cpu) = NULL;
1220988bed09SViresh Kumar 	write_unlock_irqrestore(&cpufreq_driver_lock, flags);
1221988bed09SViresh Kumar 
12223654c5ccSViresh Kumar 	cpufreq_policy_put_kobj(policy, notify);
1223e9698cc5SSrivatsa S. Bhat 	free_cpumask_var(policy->related_cpus);
1224e9698cc5SSrivatsa S. Bhat 	free_cpumask_var(policy->cpus);
1225e9698cc5SSrivatsa S. Bhat 	kfree(policy);
1226e9698cc5SSrivatsa S. Bhat }
1227e9698cc5SSrivatsa S. Bhat 
122887549141SViresh Kumar static void update_policy_cpu(struct cpufreq_policy *policy, unsigned int cpu)
12290d66b91eSSrivatsa S. Bhat {
123099ec899eSSrivatsa S. Bhat 	if (WARN_ON(cpu == policy->cpu))
123187549141SViresh Kumar 		return;
1232cb38ed5cSSrivatsa S. Bhat 
1233ad7722daSviresh kumar 	down_write(&policy->rwsem);
12340d66b91eSSrivatsa S. Bhat 	policy->cpu = cpu;
1235ad7722daSviresh kumar 	up_write(&policy->rwsem);
12360d66b91eSSrivatsa S. Bhat }
12370d66b91eSSrivatsa S. Bhat 
123823faf0b7SViresh Kumar /**
123923faf0b7SViresh Kumar  * cpufreq_add_dev - add a CPU device
124023faf0b7SViresh Kumar  *
124123faf0b7SViresh Kumar  * Adds the cpufreq interface for a CPU device.
124223faf0b7SViresh Kumar  *
124323faf0b7SViresh Kumar  * The Oracle says: try running cpufreq registration/unregistration concurrently
124423faf0b7SViresh Kumar  * with with cpu hotplugging and all hell will break loose. Tried to clean this
124523faf0b7SViresh Kumar  * mess up, but more thorough testing is needed. - Mathieu
124623faf0b7SViresh Kumar  */
124723faf0b7SViresh Kumar static int cpufreq_add_dev(struct device *dev, struct subsys_interface *sif)
12481da177e4SLinus Torvalds {
1249fcf80582SViresh Kumar 	unsigned int j, cpu = dev->id;
125065922465SViresh Kumar 	int ret = -ENOMEM;
12517f0c020aSViresh Kumar 	struct cpufreq_policy *policy;
12521da177e4SLinus Torvalds 	unsigned long flags;
125387549141SViresh Kumar 	bool recover_policy = !sif;
1254c32b6b8eSAshok Raj 
12552d06d8c4SDominik Brodowski 	pr_debug("adding CPU %u\n", cpu);
12561da177e4SLinus Torvalds 
125787549141SViresh Kumar 	/*
125887549141SViresh Kumar 	 * Only possible if 'cpu' wasn't physically present earlier and we are
125987549141SViresh Kumar 	 * here from subsys_interface add callback. A hotplug notifier will
126087549141SViresh Kumar 	 * follow and we will handle it like logical CPU hotplug then. For now,
126187549141SViresh Kumar 	 * just create the sysfs link.
126287549141SViresh Kumar 	 */
126387549141SViresh Kumar 	if (cpu_is_offline(cpu))
126487549141SViresh Kumar 		return add_cpu_dev_symlink(per_cpu(cpufreq_cpu_data, cpu), cpu);
126587549141SViresh Kumar 
12666eed9404SViresh Kumar 	if (!down_read_trylock(&cpufreq_rwsem))
12676eed9404SViresh Kumar 		return 0;
12686eed9404SViresh Kumar 
1269bb29ae15SViresh Kumar 	/* Check if this CPU already has a policy to manage it */
12709104bb26SViresh Kumar 	policy = per_cpu(cpufreq_cpu_data, cpu);
12719104bb26SViresh Kumar 	if (policy && !policy_is_inactive(policy)) {
12729104bb26SViresh Kumar 		WARN_ON(!cpumask_test_cpu(cpu, policy->related_cpus));
12737f0c020aSViresh Kumar 		ret = cpufreq_add_policy_cpu(policy, cpu, dev);
12746eed9404SViresh Kumar 		up_read(&cpufreq_rwsem);
12756eed9404SViresh Kumar 		return ret;
1276fcf80582SViresh Kumar 	}
12771da177e4SLinus Torvalds 
127872368d12SRafael J. Wysocki 	/*
127972368d12SRafael J. Wysocki 	 * Restore the saved policy when doing light-weight init and fall back
128072368d12SRafael J. Wysocki 	 * to the full init if that fails.
128172368d12SRafael J. Wysocki 	 */
128296bbbe4aSViresh Kumar 	policy = recover_policy ? cpufreq_policy_restore(cpu) : NULL;
128372368d12SRafael J. Wysocki 	if (!policy) {
128496bbbe4aSViresh Kumar 		recover_policy = false;
12852fc3384dSViresh Kumar 		policy = cpufreq_policy_alloc(dev);
1286059019a3SDave Jones 		if (!policy)
12871da177e4SLinus Torvalds 			goto nomem_out;
128872368d12SRafael J. Wysocki 	}
12890d66b91eSSrivatsa S. Bhat 
12900d66b91eSSrivatsa S. Bhat 	/*
12910d66b91eSSrivatsa S. Bhat 	 * In the resume path, since we restore a saved policy, the assignment
12920d66b91eSSrivatsa S. Bhat 	 * to policy->cpu is like an update of the existing policy, rather than
12930d66b91eSSrivatsa S. Bhat 	 * the creation of a brand new one. So we need to perform this update
12940d66b91eSSrivatsa S. Bhat 	 * by invoking update_policy_cpu().
12950d66b91eSSrivatsa S. Bhat 	 */
129687549141SViresh Kumar 	if (recover_policy && cpu != policy->cpu)
129787549141SViresh Kumar 		update_policy_cpu(policy, cpu);
12980d66b91eSSrivatsa S. Bhat 
1299835481d9SRusty Russell 	cpumask_copy(policy->cpus, cpumask_of(cpu));
13001da177e4SLinus Torvalds 
13011da177e4SLinus Torvalds 	/* call driver. From then on the cpufreq must be able
13021da177e4SLinus Torvalds 	 * to accept all calls to ->verify and ->setpolicy for this CPU
13031da177e4SLinus Torvalds 	 */
13041c3d85ddSRafael J. Wysocki 	ret = cpufreq_driver->init(policy);
13051da177e4SLinus Torvalds 	if (ret) {
13062d06d8c4SDominik Brodowski 		pr_debug("initialization failed\n");
13072eaa3e2dSViresh Kumar 		goto err_set_policy_cpu;
13081da177e4SLinus Torvalds 	}
1309643ae6e8SViresh Kumar 
13106d4e81edSTomeu Vizoso 	down_write(&policy->rwsem);
13116d4e81edSTomeu Vizoso 
13125a7e56a5SViresh Kumar 	/* related cpus should atleast have policy->cpus */
13135a7e56a5SViresh Kumar 	cpumask_or(policy->related_cpus, policy->related_cpus, policy->cpus);
13145a7e56a5SViresh Kumar 
13155a7e56a5SViresh Kumar 	/*
13165a7e56a5SViresh Kumar 	 * affected cpus must always be the one, which are online. We aren't
13175a7e56a5SViresh Kumar 	 * managing offline cpus here.
13185a7e56a5SViresh Kumar 	 */
13195a7e56a5SViresh Kumar 	cpumask_and(policy->cpus, policy->cpus, cpu_online_mask);
13205a7e56a5SViresh Kumar 
132196bbbe4aSViresh Kumar 	if (!recover_policy) {
13225a7e56a5SViresh Kumar 		policy->user_policy.min = policy->min;
13235a7e56a5SViresh Kumar 		policy->user_policy.max = policy->max;
13246d4e81edSTomeu Vizoso 
1325652ed95dSViresh Kumar 		write_lock_irqsave(&cpufreq_driver_lock, flags);
1326988bed09SViresh Kumar 		for_each_cpu(j, policy->related_cpus)
1327652ed95dSViresh Kumar 			per_cpu(cpufreq_cpu_data, j) = policy;
1328652ed95dSViresh Kumar 		write_unlock_irqrestore(&cpufreq_driver_lock, flags);
1329988bed09SViresh Kumar 	}
1330652ed95dSViresh Kumar 
13312ed99e39SRafael J. Wysocki 	if (cpufreq_driver->get && !cpufreq_driver->setpolicy) {
1332da60ce9fSViresh Kumar 		policy->cur = cpufreq_driver->get(policy->cpu);
1333da60ce9fSViresh Kumar 		if (!policy->cur) {
1334da60ce9fSViresh Kumar 			pr_err("%s: ->get() failed\n", __func__);
1335da60ce9fSViresh Kumar 			goto err_get_freq;
1336da60ce9fSViresh Kumar 		}
1337da60ce9fSViresh Kumar 	}
1338da60ce9fSViresh Kumar 
1339d3916691SViresh Kumar 	/*
1340d3916691SViresh Kumar 	 * Sometimes boot loaders set CPU frequency to a value outside of
1341d3916691SViresh Kumar 	 * frequency table present with cpufreq core. In such cases CPU might be
1342d3916691SViresh Kumar 	 * unstable if it has to run on that frequency for long duration of time
1343d3916691SViresh Kumar 	 * and so its better to set it to a frequency which is specified in
1344d3916691SViresh Kumar 	 * freq-table. This also makes cpufreq stats inconsistent as
1345d3916691SViresh Kumar 	 * cpufreq-stats would fail to register because current frequency of CPU
1346d3916691SViresh Kumar 	 * isn't found in freq-table.
1347d3916691SViresh Kumar 	 *
1348d3916691SViresh Kumar 	 * Because we don't want this change to effect boot process badly, we go
1349d3916691SViresh Kumar 	 * for the next freq which is >= policy->cur ('cur' must be set by now,
1350d3916691SViresh Kumar 	 * otherwise we will end up setting freq to lowest of the table as 'cur'
1351d3916691SViresh Kumar 	 * is initialized to zero).
1352d3916691SViresh Kumar 	 *
1353d3916691SViresh Kumar 	 * We are passing target-freq as "policy->cur - 1" otherwise
1354d3916691SViresh Kumar 	 * __cpufreq_driver_target() would simply fail, as policy->cur will be
1355d3916691SViresh Kumar 	 * equal to target-freq.
1356d3916691SViresh Kumar 	 */
1357d3916691SViresh Kumar 	if ((cpufreq_driver->flags & CPUFREQ_NEED_INITIAL_FREQ_CHECK)
1358d3916691SViresh Kumar 	    && has_target()) {
1359d3916691SViresh Kumar 		/* Are we running at unknown frequency ? */
1360d3916691SViresh Kumar 		ret = cpufreq_frequency_table_get_index(policy, policy->cur);
1361d3916691SViresh Kumar 		if (ret == -EINVAL) {
1362d3916691SViresh Kumar 			/* Warn user and fix it */
1363d3916691SViresh Kumar 			pr_warn("%s: CPU%d: Running at unlisted freq: %u KHz\n",
1364d3916691SViresh Kumar 				__func__, policy->cpu, policy->cur);
1365d3916691SViresh Kumar 			ret = __cpufreq_driver_target(policy, policy->cur - 1,
1366d3916691SViresh Kumar 				CPUFREQ_RELATION_L);
1367d3916691SViresh Kumar 
1368d3916691SViresh Kumar 			/*
1369d3916691SViresh Kumar 			 * Reaching here after boot in a few seconds may not
1370d3916691SViresh Kumar 			 * mean that system will remain stable at "unknown"
1371d3916691SViresh Kumar 			 * frequency for longer duration. Hence, a BUG_ON().
1372d3916691SViresh Kumar 			 */
1373d3916691SViresh Kumar 			BUG_ON(ret);
1374d3916691SViresh Kumar 			pr_warn("%s: CPU%d: Unlisted initial frequency changed to: %u KHz\n",
1375d3916691SViresh Kumar 				__func__, policy->cpu, policy->cur);
1376d3916691SViresh Kumar 		}
1377d3916691SViresh Kumar 	}
1378d3916691SViresh Kumar 
1379a1531acdSThomas Renninger 	blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
1380a1531acdSThomas Renninger 				     CPUFREQ_START, policy);
1381a1531acdSThomas Renninger 
138296bbbe4aSViresh Kumar 	if (!recover_policy) {
1383308b60e7SViresh Kumar 		ret = cpufreq_add_dev_interface(policy, dev);
138419d6f7ecSDave Jones 		if (ret)
13850142f9dcSAhmed S. Darwish 			goto err_out_unregister;
1386fcd7af91SViresh Kumar 		blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
1387fcd7af91SViresh Kumar 				CPUFREQ_CREATE_POLICY, policy);
1388c88a1f8bSLukasz Majewski 
1389c88a1f8bSLukasz Majewski 		write_lock_irqsave(&cpufreq_driver_lock, flags);
1390c88a1f8bSLukasz Majewski 		list_add(&policy->policy_list, &cpufreq_policy_list);
1391c88a1f8bSLukasz Majewski 		write_unlock_irqrestore(&cpufreq_driver_lock, flags);
1392988bed09SViresh Kumar 	}
13938ff69732SDave Jones 
1394e18f1682SSrivatsa S. Bhat 	cpufreq_init_policy(policy);
1395e18f1682SSrivatsa S. Bhat 
139696bbbe4aSViresh Kumar 	if (!recover_policy) {
139708fd8c1cSViresh Kumar 		policy->user_policy.policy = policy->policy;
139808fd8c1cSViresh Kumar 		policy->user_policy.governor = policy->governor;
139908fd8c1cSViresh Kumar 	}
14004e97b631SViresh Kumar 	up_write(&policy->rwsem);
140108fd8c1cSViresh Kumar 
1402038c5b3eSGreg Kroah-Hartman 	kobject_uevent(&policy->kobj, KOBJ_ADD);
14037c45cf31SViresh Kumar 
14046eed9404SViresh Kumar 	up_read(&cpufreq_rwsem);
14056eed9404SViresh Kumar 
14067c45cf31SViresh Kumar 	/* Callback for handling stuff after policy is ready */
14077c45cf31SViresh Kumar 	if (cpufreq_driver->ready)
14087c45cf31SViresh Kumar 		cpufreq_driver->ready(policy);
14097c45cf31SViresh Kumar 
14102d06d8c4SDominik Brodowski 	pr_debug("initialization complete\n");
14111da177e4SLinus Torvalds 
14121da177e4SLinus Torvalds 	return 0;
14131da177e4SLinus Torvalds 
14141da177e4SLinus Torvalds err_out_unregister:
1415652ed95dSViresh Kumar err_get_freq:
14167106e02bSPrarit Bhargava 	up_write(&policy->rwsem);
14177106e02bSPrarit Bhargava 
1418da60ce9fSViresh Kumar 	if (cpufreq_driver->exit)
1419da60ce9fSViresh Kumar 		cpufreq_driver->exit(policy);
14202eaa3e2dSViresh Kumar err_set_policy_cpu:
14213654c5ccSViresh Kumar 	cpufreq_policy_free(policy, recover_policy);
14221da177e4SLinus Torvalds nomem_out:
14236eed9404SViresh Kumar 	up_read(&cpufreq_rwsem);
14246eed9404SViresh Kumar 
14251da177e4SLinus Torvalds 	return ret;
14261da177e4SLinus Torvalds }
14271da177e4SLinus Torvalds 
1428cedb70afSSrivatsa S. Bhat static int __cpufreq_remove_dev_prepare(struct device *dev,
142996bbbe4aSViresh Kumar 					struct subsys_interface *sif)
14301da177e4SLinus Torvalds {
1431*9591becbSViresh Kumar 	unsigned int cpu = dev->id;
1432*9591becbSViresh Kumar 	int ret = 0;
14333a3e9e06SViresh Kumar 	struct cpufreq_policy *policy;
14341da177e4SLinus Torvalds 
1435b8eed8afSViresh Kumar 	pr_debug("%s: unregistering CPU %u\n", __func__, cpu);
14361da177e4SLinus Torvalds 
1437988bed09SViresh Kumar 	policy = cpufreq_cpu_get_raw(cpu);
14383a3e9e06SViresh Kumar 	if (!policy) {
1439b8eed8afSViresh Kumar 		pr_debug("%s: No cpu_data found\n", __func__);
14401da177e4SLinus Torvalds 		return -EINVAL;
14411da177e4SLinus Torvalds 	}
14421da177e4SLinus Torvalds 
14439c0ebcf7SViresh Kumar 	if (has_target()) {
14443de9bdebSViresh Kumar 		ret = __cpufreq_governor(policy, CPUFREQ_GOV_STOP);
14453de9bdebSViresh Kumar 		if (ret) {
14463de9bdebSViresh Kumar 			pr_err("%s: Failed to stop governor\n", __func__);
14473de9bdebSViresh Kumar 			return ret;
14483de9bdebSViresh Kumar 		}
1449db5f2995SViresh Kumar 	}
14501da177e4SLinus Torvalds 
14514573237bSViresh Kumar 	down_write(&policy->rwsem);
1452*9591becbSViresh Kumar 	cpumask_clear_cpu(cpu, policy->cpus);
14534573237bSViresh Kumar 
1454*9591becbSViresh Kumar 	if (policy_is_inactive(policy)) {
1455*9591becbSViresh Kumar 		if (has_target())
14564573237bSViresh Kumar 			strncpy(policy->last_governor, policy->governor->name,
14574573237bSViresh Kumar 				CPUFREQ_NAME_LEN);
1458*9591becbSViresh Kumar 	} else if (cpu == policy->cpu) {
1459*9591becbSViresh Kumar 		/* Nominate new CPU */
1460*9591becbSViresh Kumar 		policy->cpu = cpumask_any(policy->cpus);
1461*9591becbSViresh Kumar 	}
14624573237bSViresh Kumar 	up_write(&policy->rwsem);
14631da177e4SLinus Torvalds 
1464*9591becbSViresh Kumar 	/* Start governor again for active policy */
1465*9591becbSViresh Kumar 	if (!policy_is_inactive(policy)) {
1466*9591becbSViresh Kumar 		if (has_target()) {
1467*9591becbSViresh Kumar 			ret = __cpufreq_governor(policy, CPUFREQ_GOV_START);
1468*9591becbSViresh Kumar 			if (!ret)
1469*9591becbSViresh Kumar 				ret = __cpufreq_governor(policy, CPUFREQ_GOV_LIMITS);
147087549141SViresh Kumar 
1471*9591becbSViresh Kumar 			if (ret)
1472*9591becbSViresh Kumar 				pr_err("%s: Failed to start governor\n", __func__);
1473*9591becbSViresh Kumar 		}
1474*9591becbSViresh Kumar 	} else if (cpufreq_driver->stop_cpu) {
1475367dc4aaSDirk Brandewie 		cpufreq_driver->stop_cpu(policy);
1476*9591becbSViresh Kumar 	}
1477b8eed8afSViresh Kumar 
1478*9591becbSViresh Kumar 	return ret;
1479cedb70afSSrivatsa S. Bhat }
1480cedb70afSSrivatsa S. Bhat 
1481cedb70afSSrivatsa S. Bhat static int __cpufreq_remove_dev_finish(struct device *dev,
148296bbbe4aSViresh Kumar 				       struct subsys_interface *sif)
1483cedb70afSSrivatsa S. Bhat {
1484988bed09SViresh Kumar 	unsigned int cpu = dev->id;
1485cedb70afSSrivatsa S. Bhat 	int ret;
1486*9591becbSViresh Kumar 	struct cpufreq_policy *policy = per_cpu(cpufreq_cpu_data, cpu);
1487cedb70afSSrivatsa S. Bhat 
1488cedb70afSSrivatsa S. Bhat 	if (!policy) {
1489cedb70afSSrivatsa S. Bhat 		pr_debug("%s: No cpu_data found\n", __func__);
1490cedb70afSSrivatsa S. Bhat 		return -EINVAL;
1491cedb70afSSrivatsa S. Bhat 	}
1492cedb70afSSrivatsa S. Bhat 
1493*9591becbSViresh Kumar 	/* Only proceed for inactive policies */
1494*9591becbSViresh Kumar 	if (!policy_is_inactive(policy))
149587549141SViresh Kumar 		return 0;
149687549141SViresh Kumar 
149787549141SViresh Kumar 	/* If cpu is last user of policy, free policy */
149887549141SViresh Kumar 	if (has_target()) {
149987549141SViresh Kumar 		ret = __cpufreq_governor(policy, CPUFREQ_GOV_POLICY_EXIT);
150087549141SViresh Kumar 		if (ret) {
150187549141SViresh Kumar 			pr_err("%s: Failed to exit governor\n", __func__);
15023de9bdebSViresh Kumar 			return ret;
15033de9bdebSViresh Kumar 		}
15043de9bdebSViresh Kumar 	}
15052a998599SRafael J. Wysocki 
15068414809cSSrivatsa S. Bhat 	/*
15078414809cSSrivatsa S. Bhat 	 * Perform the ->exit() even during light-weight tear-down,
15088414809cSSrivatsa S. Bhat 	 * since this is a core component, and is essential for the
15098414809cSSrivatsa S. Bhat 	 * subsequent light-weight ->init() to succeed.
15108414809cSSrivatsa S. Bhat 	 */
15111c3d85ddSRafael J. Wysocki 	if (cpufreq_driver->exit)
15123a3e9e06SViresh Kumar 		cpufreq_driver->exit(policy);
151327ecddc2SJacob Shin 
15143654c5ccSViresh Kumar 	/* Free the policy only if the driver is getting removed. */
151587549141SViresh Kumar 	if (sif)
15163654c5ccSViresh Kumar 		cpufreq_policy_free(policy, true);
15171da177e4SLinus Torvalds 
15181da177e4SLinus Torvalds 	return 0;
15191da177e4SLinus Torvalds }
15201da177e4SLinus Torvalds 
1521cedb70afSSrivatsa S. Bhat /**
152227a862e9SViresh Kumar  * cpufreq_remove_dev - remove a CPU device
1523cedb70afSSrivatsa S. Bhat  *
1524cedb70afSSrivatsa S. Bhat  * Removes the cpufreq interface for a CPU device.
1525cedb70afSSrivatsa S. Bhat  */
15268a25a2fdSKay Sievers static int cpufreq_remove_dev(struct device *dev, struct subsys_interface *sif)
15275a01f2e8SVenkatesh Pallipadi {
15288a25a2fdSKay Sievers 	unsigned int cpu = dev->id;
152927a862e9SViresh Kumar 	int ret;
1530ec28297aSVenki Pallipadi 
153187549141SViresh Kumar 	/*
153287549141SViresh Kumar 	 * Only possible if 'cpu' is getting physically removed now. A hotplug
153387549141SViresh Kumar 	 * notifier should have already been called and we just need to remove
153487549141SViresh Kumar 	 * link or free policy here.
153587549141SViresh Kumar 	 */
153687549141SViresh Kumar 	if (cpu_is_offline(cpu)) {
153787549141SViresh Kumar 		struct cpufreq_policy *policy = per_cpu(cpufreq_cpu_data, cpu);
153887549141SViresh Kumar 		struct cpumask mask;
153987549141SViresh Kumar 
154087549141SViresh Kumar 		if (!policy)
1541ec28297aSVenki Pallipadi 			return 0;
1542ec28297aSVenki Pallipadi 
154387549141SViresh Kumar 		cpumask_copy(&mask, policy->related_cpus);
154487549141SViresh Kumar 		cpumask_clear_cpu(cpu, &mask);
154587549141SViresh Kumar 
154687549141SViresh Kumar 		/*
154787549141SViresh Kumar 		 * Free policy only if all policy->related_cpus are removed
154887549141SViresh Kumar 		 * physically.
154987549141SViresh Kumar 		 */
155087549141SViresh Kumar 		if (cpumask_intersects(&mask, cpu_present_mask)) {
155187549141SViresh Kumar 			remove_cpu_dev_symlink(policy, cpu);
155287549141SViresh Kumar 			return 0;
155387549141SViresh Kumar 		}
155487549141SViresh Kumar 
15553654c5ccSViresh Kumar 		cpufreq_policy_free(policy, true);
155687549141SViresh Kumar 		return 0;
155787549141SViresh Kumar 	}
155887549141SViresh Kumar 
155996bbbe4aSViresh Kumar 	ret = __cpufreq_remove_dev_prepare(dev, sif);
156027a862e9SViresh Kumar 
156127a862e9SViresh Kumar 	if (!ret)
156296bbbe4aSViresh Kumar 		ret = __cpufreq_remove_dev_finish(dev, sif);
156327a862e9SViresh Kumar 
156427a862e9SViresh Kumar 	return ret;
15655a01f2e8SVenkatesh Pallipadi }
15665a01f2e8SVenkatesh Pallipadi 
156765f27f38SDavid Howells static void handle_update(struct work_struct *work)
15681da177e4SLinus Torvalds {
156965f27f38SDavid Howells 	struct cpufreq_policy *policy =
157065f27f38SDavid Howells 		container_of(work, struct cpufreq_policy, update);
157165f27f38SDavid Howells 	unsigned int cpu = policy->cpu;
15722d06d8c4SDominik Brodowski 	pr_debug("handle_update for cpu %u called\n", cpu);
15731da177e4SLinus Torvalds 	cpufreq_update_policy(cpu);
15741da177e4SLinus Torvalds }
15751da177e4SLinus Torvalds 
15761da177e4SLinus Torvalds /**
1577bb176f7dSViresh Kumar  *	cpufreq_out_of_sync - If actual and saved CPU frequency differs, we're
1578bb176f7dSViresh Kumar  *	in deep trouble.
1579a1e1dc41SViresh Kumar  *	@policy: policy managing CPUs
15801da177e4SLinus Torvalds  *	@new_freq: CPU frequency the CPU actually runs at
15811da177e4SLinus Torvalds  *
158229464f28SDave Jones  *	We adjust to current frequency first, and need to clean up later.
158329464f28SDave Jones  *	So either call to cpufreq_update_policy() or schedule handle_update()).
15841da177e4SLinus Torvalds  */
1585a1e1dc41SViresh Kumar static void cpufreq_out_of_sync(struct cpufreq_policy *policy,
1586e08f5f5bSGautham R Shenoy 				unsigned int new_freq)
15871da177e4SLinus Torvalds {
15881da177e4SLinus Torvalds 	struct cpufreq_freqs freqs;
1589b43a7ffbSViresh Kumar 
1590e837f9b5SJoe Perches 	pr_debug("Warning: CPU frequency out of sync: cpufreq and timing core thinks of %u, is %u kHz\n",
1591a1e1dc41SViresh Kumar 		 policy->cur, new_freq);
15921da177e4SLinus Torvalds 
1593a1e1dc41SViresh Kumar 	freqs.old = policy->cur;
15941da177e4SLinus Torvalds 	freqs.new = new_freq;
1595b43a7ffbSViresh Kumar 
15968fec051eSViresh Kumar 	cpufreq_freq_transition_begin(policy, &freqs);
15978fec051eSViresh Kumar 	cpufreq_freq_transition_end(policy, &freqs, 0);
15981da177e4SLinus Torvalds }
15991da177e4SLinus Torvalds 
16001da177e4SLinus Torvalds /**
16014ab70df4SDhaval Giani  * cpufreq_quick_get - get the CPU frequency (in kHz) from policy->cur
160295235ca2SVenkatesh Pallipadi  * @cpu: CPU number
160395235ca2SVenkatesh Pallipadi  *
160495235ca2SVenkatesh Pallipadi  * This is the last known freq, without actually getting it from the driver.
160595235ca2SVenkatesh Pallipadi  * Return value will be same as what is shown in scaling_cur_freq in sysfs.
160695235ca2SVenkatesh Pallipadi  */
160795235ca2SVenkatesh Pallipadi unsigned int cpufreq_quick_get(unsigned int cpu)
160895235ca2SVenkatesh Pallipadi {
16099e21ba8bSDirk Brandewie 	struct cpufreq_policy *policy;
1610e08f5f5bSGautham R Shenoy 	unsigned int ret_freq = 0;
161195235ca2SVenkatesh Pallipadi 
16121c3d85ddSRafael J. Wysocki 	if (cpufreq_driver && cpufreq_driver->setpolicy && cpufreq_driver->get)
16131c3d85ddSRafael J. Wysocki 		return cpufreq_driver->get(cpu);
16149e21ba8bSDirk Brandewie 
16159e21ba8bSDirk Brandewie 	policy = cpufreq_cpu_get(cpu);
161695235ca2SVenkatesh Pallipadi 	if (policy) {
1617e08f5f5bSGautham R Shenoy 		ret_freq = policy->cur;
161895235ca2SVenkatesh Pallipadi 		cpufreq_cpu_put(policy);
161995235ca2SVenkatesh Pallipadi 	}
162095235ca2SVenkatesh Pallipadi 
16214d34a67dSDave Jones 	return ret_freq;
162295235ca2SVenkatesh Pallipadi }
162395235ca2SVenkatesh Pallipadi EXPORT_SYMBOL(cpufreq_quick_get);
162495235ca2SVenkatesh Pallipadi 
16253d737108SJesse Barnes /**
16263d737108SJesse Barnes  * cpufreq_quick_get_max - get the max reported CPU frequency for this CPU
16273d737108SJesse Barnes  * @cpu: CPU number
16283d737108SJesse Barnes  *
16293d737108SJesse Barnes  * Just return the max possible frequency for a given CPU.
16303d737108SJesse Barnes  */
16313d737108SJesse Barnes unsigned int cpufreq_quick_get_max(unsigned int cpu)
16323d737108SJesse Barnes {
16333d737108SJesse Barnes 	struct cpufreq_policy *policy = cpufreq_cpu_get(cpu);
16343d737108SJesse Barnes 	unsigned int ret_freq = 0;
16353d737108SJesse Barnes 
16363d737108SJesse Barnes 	if (policy) {
16373d737108SJesse Barnes 		ret_freq = policy->max;
16383d737108SJesse Barnes 		cpufreq_cpu_put(policy);
16393d737108SJesse Barnes 	}
16403d737108SJesse Barnes 
16413d737108SJesse Barnes 	return ret_freq;
16423d737108SJesse Barnes }
16433d737108SJesse Barnes EXPORT_SYMBOL(cpufreq_quick_get_max);
16443d737108SJesse Barnes 
1645d92d50a4SViresh Kumar static unsigned int __cpufreq_get(struct cpufreq_policy *policy)
16461da177e4SLinus Torvalds {
1647e08f5f5bSGautham R Shenoy 	unsigned int ret_freq = 0;
16481da177e4SLinus Torvalds 
16491c3d85ddSRafael J. Wysocki 	if (!cpufreq_driver->get)
16504d34a67dSDave Jones 		return ret_freq;
16511da177e4SLinus Torvalds 
1652d92d50a4SViresh Kumar 	ret_freq = cpufreq_driver->get(policy->cpu);
16531da177e4SLinus Torvalds 
165411e584cfSViresh Kumar 	/* Updating inactive policies is invalid, so avoid doing that. */
165511e584cfSViresh Kumar 	if (unlikely(policy_is_inactive(policy)))
165611e584cfSViresh Kumar 		return ret_freq;
165711e584cfSViresh Kumar 
1658e08f5f5bSGautham R Shenoy 	if (ret_freq && policy->cur &&
16591c3d85ddSRafael J. Wysocki 		!(cpufreq_driver->flags & CPUFREQ_CONST_LOOPS)) {
1660e08f5f5bSGautham R Shenoy 		/* verify no discrepancy between actual and
1661e08f5f5bSGautham R Shenoy 					saved value exists */
1662e08f5f5bSGautham R Shenoy 		if (unlikely(ret_freq != policy->cur)) {
1663a1e1dc41SViresh Kumar 			cpufreq_out_of_sync(policy, ret_freq);
16641da177e4SLinus Torvalds 			schedule_work(&policy->update);
16651da177e4SLinus Torvalds 		}
16661da177e4SLinus Torvalds 	}
16671da177e4SLinus Torvalds 
16684d34a67dSDave Jones 	return ret_freq;
16695a01f2e8SVenkatesh Pallipadi }
16701da177e4SLinus Torvalds 
16715a01f2e8SVenkatesh Pallipadi /**
16725a01f2e8SVenkatesh Pallipadi  * cpufreq_get - get the current CPU frequency (in kHz)
16735a01f2e8SVenkatesh Pallipadi  * @cpu: CPU number
16745a01f2e8SVenkatesh Pallipadi  *
16755a01f2e8SVenkatesh Pallipadi  * Get the CPU current (static) CPU frequency
16765a01f2e8SVenkatesh Pallipadi  */
16775a01f2e8SVenkatesh Pallipadi unsigned int cpufreq_get(unsigned int cpu)
16785a01f2e8SVenkatesh Pallipadi {
1679999976e0SAaron Plattner 	struct cpufreq_policy *policy = cpufreq_cpu_get(cpu);
16805a01f2e8SVenkatesh Pallipadi 	unsigned int ret_freq = 0;
16815a01f2e8SVenkatesh Pallipadi 
1682999976e0SAaron Plattner 	if (policy) {
1683ad7722daSviresh kumar 		down_read(&policy->rwsem);
1684d92d50a4SViresh Kumar 		ret_freq = __cpufreq_get(policy);
1685ad7722daSviresh kumar 		up_read(&policy->rwsem);
1686999976e0SAaron Plattner 
1687999976e0SAaron Plattner 		cpufreq_cpu_put(policy);
1688999976e0SAaron Plattner 	}
16896eed9404SViresh Kumar 
16904d34a67dSDave Jones 	return ret_freq;
16911da177e4SLinus Torvalds }
16921da177e4SLinus Torvalds EXPORT_SYMBOL(cpufreq_get);
16931da177e4SLinus Torvalds 
16948a25a2fdSKay Sievers static struct subsys_interface cpufreq_interface = {
16958a25a2fdSKay Sievers 	.name		= "cpufreq",
16968a25a2fdSKay Sievers 	.subsys		= &cpu_subsys,
16978a25a2fdSKay Sievers 	.add_dev	= cpufreq_add_dev,
16988a25a2fdSKay Sievers 	.remove_dev	= cpufreq_remove_dev,
1699e00e56dfSRafael J. Wysocki };
1700e00e56dfSRafael J. Wysocki 
1701e28867eaSViresh Kumar /*
1702e28867eaSViresh Kumar  * In case platform wants some specific frequency to be configured
1703e28867eaSViresh Kumar  * during suspend..
170442d4dc3fSBenjamin Herrenschmidt  */
1705e28867eaSViresh Kumar int cpufreq_generic_suspend(struct cpufreq_policy *policy)
170642d4dc3fSBenjamin Herrenschmidt {
1707e28867eaSViresh Kumar 	int ret;
17084bc5d341SDave Jones 
1709e28867eaSViresh Kumar 	if (!policy->suspend_freq) {
1710e28867eaSViresh Kumar 		pr_err("%s: suspend_freq can't be zero\n", __func__);
1711e28867eaSViresh Kumar 		return -EINVAL;
171242d4dc3fSBenjamin Herrenschmidt 	}
171342d4dc3fSBenjamin Herrenschmidt 
1714e28867eaSViresh Kumar 	pr_debug("%s: Setting suspend-freq: %u\n", __func__,
1715e28867eaSViresh Kumar 			policy->suspend_freq);
1716e28867eaSViresh Kumar 
1717e28867eaSViresh Kumar 	ret = __cpufreq_driver_target(policy, policy->suspend_freq,
1718e28867eaSViresh Kumar 			CPUFREQ_RELATION_H);
1719e28867eaSViresh Kumar 	if (ret)
1720e28867eaSViresh Kumar 		pr_err("%s: unable to set suspend-freq: %u. err: %d\n",
1721e28867eaSViresh Kumar 				__func__, policy->suspend_freq, ret);
1722e28867eaSViresh Kumar 
1723c9060494SDave Jones 	return ret;
172442d4dc3fSBenjamin Herrenschmidt }
1725e28867eaSViresh Kumar EXPORT_SYMBOL(cpufreq_generic_suspend);
172642d4dc3fSBenjamin Herrenschmidt 
172742d4dc3fSBenjamin Herrenschmidt /**
17282f0aea93SViresh Kumar  * cpufreq_suspend() - Suspend CPUFreq governors
17291da177e4SLinus Torvalds  *
17302f0aea93SViresh Kumar  * Called during system wide Suspend/Hibernate cycles for suspending governors
17312f0aea93SViresh Kumar  * as some platforms can't change frequency after this point in suspend cycle.
17322f0aea93SViresh Kumar  * Because some of the devices (like: i2c, regulators, etc) they use for
17332f0aea93SViresh Kumar  * changing frequency are suspended quickly after this point.
17341da177e4SLinus Torvalds  */
17352f0aea93SViresh Kumar void cpufreq_suspend(void)
17361da177e4SLinus Torvalds {
17373a3e9e06SViresh Kumar 	struct cpufreq_policy *policy;
17381da177e4SLinus Torvalds 
17392f0aea93SViresh Kumar 	if (!cpufreq_driver)
1740e00e56dfSRafael J. Wysocki 		return;
17411da177e4SLinus Torvalds 
17422f0aea93SViresh Kumar 	if (!has_target())
1743b1b12babSViresh Kumar 		goto suspend;
17441da177e4SLinus Torvalds 
17452f0aea93SViresh Kumar 	pr_debug("%s: Suspending Governors\n", __func__);
17462f0aea93SViresh Kumar 
1747f963735aSViresh Kumar 	for_each_active_policy(policy) {
17482f0aea93SViresh Kumar 		if (__cpufreq_governor(policy, CPUFREQ_GOV_STOP))
17492f0aea93SViresh Kumar 			pr_err("%s: Failed to stop governor for policy: %p\n",
17502f0aea93SViresh Kumar 				__func__, policy);
17512f0aea93SViresh Kumar 		else if (cpufreq_driver->suspend
17522f0aea93SViresh Kumar 		    && cpufreq_driver->suspend(policy))
17532f0aea93SViresh Kumar 			pr_err("%s: Failed to suspend driver: %p\n", __func__,
17542f0aea93SViresh Kumar 				policy);
17551da177e4SLinus Torvalds 	}
1756b1b12babSViresh Kumar 
1757b1b12babSViresh Kumar suspend:
1758b1b12babSViresh Kumar 	cpufreq_suspended = true;
17591da177e4SLinus Torvalds }
17601da177e4SLinus Torvalds 
17611da177e4SLinus Torvalds /**
17622f0aea93SViresh Kumar  * cpufreq_resume() - Resume CPUFreq governors
17631da177e4SLinus Torvalds  *
17642f0aea93SViresh Kumar  * Called during system wide Suspend/Hibernate cycle for resuming governors that
17652f0aea93SViresh Kumar  * are suspended with cpufreq_suspend().
17661da177e4SLinus Torvalds  */
17672f0aea93SViresh Kumar void cpufreq_resume(void)
17681da177e4SLinus Torvalds {
17691da177e4SLinus Torvalds 	struct cpufreq_policy *policy;
17701da177e4SLinus Torvalds 
17712f0aea93SViresh Kumar 	if (!cpufreq_driver)
17721da177e4SLinus Torvalds 		return;
17731da177e4SLinus Torvalds 
17748e30444eSLan Tianyu 	cpufreq_suspended = false;
17758e30444eSLan Tianyu 
17762f0aea93SViresh Kumar 	if (!has_target())
17772f0aea93SViresh Kumar 		return;
17781da177e4SLinus Torvalds 
17792f0aea93SViresh Kumar 	pr_debug("%s: Resuming Governors\n", __func__);
17802f0aea93SViresh Kumar 
1781f963735aSViresh Kumar 	for_each_active_policy(policy) {
17820c5aa405SViresh Kumar 		if (cpufreq_driver->resume && cpufreq_driver->resume(policy))
17830c5aa405SViresh Kumar 			pr_err("%s: Failed to resume driver: %p\n", __func__,
17840c5aa405SViresh Kumar 				policy);
17850c5aa405SViresh Kumar 		else if (__cpufreq_governor(policy, CPUFREQ_GOV_START)
17862f0aea93SViresh Kumar 		    || __cpufreq_governor(policy, CPUFREQ_GOV_LIMITS))
17872f0aea93SViresh Kumar 			pr_err("%s: Failed to start governor for policy: %p\n",
17882f0aea93SViresh Kumar 				__func__, policy);
1789c75de0acSViresh Kumar 	}
17902f0aea93SViresh Kumar 
17912f0aea93SViresh Kumar 	/*
1792c75de0acSViresh Kumar 	 * schedule call cpufreq_update_policy() for first-online CPU, as that
1793c75de0acSViresh Kumar 	 * wouldn't be hotplugged-out on suspend. It will verify that the
1794c75de0acSViresh Kumar 	 * current freq is in sync with what we believe it to be.
17952f0aea93SViresh Kumar 	 */
1796c75de0acSViresh Kumar 	policy = cpufreq_cpu_get_raw(cpumask_first(cpu_online_mask));
1797c75de0acSViresh Kumar 	if (WARN_ON(!policy))
1798c75de0acSViresh Kumar 		return;
1799c75de0acSViresh Kumar 
18003a3e9e06SViresh Kumar 	schedule_work(&policy->update);
18011da177e4SLinus Torvalds }
18021da177e4SLinus Torvalds 
18039d95046eSBorislav Petkov /**
18049d95046eSBorislav Petkov  *	cpufreq_get_current_driver - return current driver's name
18059d95046eSBorislav Petkov  *
18069d95046eSBorislav Petkov  *	Return the name string of the currently loaded cpufreq driver
18079d95046eSBorislav Petkov  *	or NULL, if none.
18089d95046eSBorislav Petkov  */
18099d95046eSBorislav Petkov const char *cpufreq_get_current_driver(void)
18109d95046eSBorislav Petkov {
18111c3d85ddSRafael J. Wysocki 	if (cpufreq_driver)
18121c3d85ddSRafael J. Wysocki 		return cpufreq_driver->name;
18131c3d85ddSRafael J. Wysocki 
18141c3d85ddSRafael J. Wysocki 	return NULL;
18159d95046eSBorislav Petkov }
18169d95046eSBorislav Petkov EXPORT_SYMBOL_GPL(cpufreq_get_current_driver);
18171da177e4SLinus Torvalds 
181851315cdfSThomas Petazzoni /**
181951315cdfSThomas Petazzoni  *	cpufreq_get_driver_data - return current driver data
182051315cdfSThomas Petazzoni  *
182151315cdfSThomas Petazzoni  *	Return the private data of the currently loaded cpufreq
182251315cdfSThomas Petazzoni  *	driver, or NULL if no cpufreq driver is loaded.
182351315cdfSThomas Petazzoni  */
182451315cdfSThomas Petazzoni void *cpufreq_get_driver_data(void)
182551315cdfSThomas Petazzoni {
182651315cdfSThomas Petazzoni 	if (cpufreq_driver)
182751315cdfSThomas Petazzoni 		return cpufreq_driver->driver_data;
182851315cdfSThomas Petazzoni 
182951315cdfSThomas Petazzoni 	return NULL;
183051315cdfSThomas Petazzoni }
183151315cdfSThomas Petazzoni EXPORT_SYMBOL_GPL(cpufreq_get_driver_data);
183251315cdfSThomas Petazzoni 
18331da177e4SLinus Torvalds /*********************************************************************
18341da177e4SLinus Torvalds  *                     NOTIFIER LISTS INTERFACE                      *
18351da177e4SLinus Torvalds  *********************************************************************/
18361da177e4SLinus Torvalds 
18371da177e4SLinus Torvalds /**
18381da177e4SLinus Torvalds  *	cpufreq_register_notifier - register a driver with cpufreq
18391da177e4SLinus Torvalds  *	@nb: notifier function to register
18401da177e4SLinus Torvalds  *      @list: CPUFREQ_TRANSITION_NOTIFIER or CPUFREQ_POLICY_NOTIFIER
18411da177e4SLinus Torvalds  *
18421da177e4SLinus Torvalds  *	Add a driver to one of two lists: either a list of drivers that
18431da177e4SLinus Torvalds  *      are notified about clock rate changes (once before and once after
18441da177e4SLinus Torvalds  *      the transition), or a list of drivers that are notified about
18451da177e4SLinus Torvalds  *      changes in cpufreq policy.
18461da177e4SLinus Torvalds  *
18471da177e4SLinus Torvalds  *	This function may sleep, and has the same return conditions as
1848e041c683SAlan Stern  *	blocking_notifier_chain_register.
18491da177e4SLinus Torvalds  */
18501da177e4SLinus Torvalds int cpufreq_register_notifier(struct notifier_block *nb, unsigned int list)
18511da177e4SLinus Torvalds {
18521da177e4SLinus Torvalds 	int ret;
18531da177e4SLinus Torvalds 
1854d5aaffa9SDirk Brandewie 	if (cpufreq_disabled())
1855d5aaffa9SDirk Brandewie 		return -EINVAL;
1856d5aaffa9SDirk Brandewie 
185774212ca4SCesar Eduardo Barros 	WARN_ON(!init_cpufreq_transition_notifier_list_called);
185874212ca4SCesar Eduardo Barros 
18591da177e4SLinus Torvalds 	switch (list) {
18601da177e4SLinus Torvalds 	case CPUFREQ_TRANSITION_NOTIFIER:
1861b4dfdbb3SAlan Stern 		ret = srcu_notifier_chain_register(
1862e041c683SAlan Stern 				&cpufreq_transition_notifier_list, nb);
18631da177e4SLinus Torvalds 		break;
18641da177e4SLinus Torvalds 	case CPUFREQ_POLICY_NOTIFIER:
1865e041c683SAlan Stern 		ret = blocking_notifier_chain_register(
1866e041c683SAlan Stern 				&cpufreq_policy_notifier_list, nb);
18671da177e4SLinus Torvalds 		break;
18681da177e4SLinus Torvalds 	default:
18691da177e4SLinus Torvalds 		ret = -EINVAL;
18701da177e4SLinus Torvalds 	}
18711da177e4SLinus Torvalds 
18721da177e4SLinus Torvalds 	return ret;
18731da177e4SLinus Torvalds }
18741da177e4SLinus Torvalds EXPORT_SYMBOL(cpufreq_register_notifier);
18751da177e4SLinus Torvalds 
18761da177e4SLinus Torvalds /**
18771da177e4SLinus Torvalds  *	cpufreq_unregister_notifier - unregister a driver with cpufreq
18781da177e4SLinus Torvalds  *	@nb: notifier block to be unregistered
18791da177e4SLinus Torvalds  *	@list: CPUFREQ_TRANSITION_NOTIFIER or CPUFREQ_POLICY_NOTIFIER
18801da177e4SLinus Torvalds  *
18811da177e4SLinus Torvalds  *	Remove a driver from the CPU frequency notifier list.
18821da177e4SLinus Torvalds  *
18831da177e4SLinus Torvalds  *	This function may sleep, and has the same return conditions as
1884e041c683SAlan Stern  *	blocking_notifier_chain_unregister.
18851da177e4SLinus Torvalds  */
18861da177e4SLinus Torvalds int cpufreq_unregister_notifier(struct notifier_block *nb, unsigned int list)
18871da177e4SLinus Torvalds {
18881da177e4SLinus Torvalds 	int ret;
18891da177e4SLinus Torvalds 
1890d5aaffa9SDirk Brandewie 	if (cpufreq_disabled())
1891d5aaffa9SDirk Brandewie 		return -EINVAL;
1892d5aaffa9SDirk Brandewie 
18931da177e4SLinus Torvalds 	switch (list) {
18941da177e4SLinus Torvalds 	case CPUFREQ_TRANSITION_NOTIFIER:
1895b4dfdbb3SAlan Stern 		ret = srcu_notifier_chain_unregister(
1896e041c683SAlan Stern 				&cpufreq_transition_notifier_list, nb);
18971da177e4SLinus Torvalds 		break;
18981da177e4SLinus Torvalds 	case CPUFREQ_POLICY_NOTIFIER:
1899e041c683SAlan Stern 		ret = blocking_notifier_chain_unregister(
1900e041c683SAlan Stern 				&cpufreq_policy_notifier_list, nb);
19011da177e4SLinus Torvalds 		break;
19021da177e4SLinus Torvalds 	default:
19031da177e4SLinus Torvalds 		ret = -EINVAL;
19041da177e4SLinus Torvalds 	}
19051da177e4SLinus Torvalds 
19061da177e4SLinus Torvalds 	return ret;
19071da177e4SLinus Torvalds }
19081da177e4SLinus Torvalds EXPORT_SYMBOL(cpufreq_unregister_notifier);
19091da177e4SLinus Torvalds 
19101da177e4SLinus Torvalds 
19111da177e4SLinus Torvalds /*********************************************************************
19121da177e4SLinus Torvalds  *                              GOVERNORS                            *
19131da177e4SLinus Torvalds  *********************************************************************/
19141da177e4SLinus Torvalds 
19151c03a2d0SViresh Kumar /* Must set freqs->new to intermediate frequency */
19161c03a2d0SViresh Kumar static int __target_intermediate(struct cpufreq_policy *policy,
19171c03a2d0SViresh Kumar 				 struct cpufreq_freqs *freqs, int index)
19181c03a2d0SViresh Kumar {
19191c03a2d0SViresh Kumar 	int ret;
19201c03a2d0SViresh Kumar 
19211c03a2d0SViresh Kumar 	freqs->new = cpufreq_driver->get_intermediate(policy, index);
19221c03a2d0SViresh Kumar 
19231c03a2d0SViresh Kumar 	/* We don't need to switch to intermediate freq */
19241c03a2d0SViresh Kumar 	if (!freqs->new)
19251c03a2d0SViresh Kumar 		return 0;
19261c03a2d0SViresh Kumar 
19271c03a2d0SViresh Kumar 	pr_debug("%s: cpu: %d, switching to intermediate freq: oldfreq: %u, intermediate freq: %u\n",
19281c03a2d0SViresh Kumar 		 __func__, policy->cpu, freqs->old, freqs->new);
19291c03a2d0SViresh Kumar 
19301c03a2d0SViresh Kumar 	cpufreq_freq_transition_begin(policy, freqs);
19311c03a2d0SViresh Kumar 	ret = cpufreq_driver->target_intermediate(policy, index);
19321c03a2d0SViresh Kumar 	cpufreq_freq_transition_end(policy, freqs, ret);
19331c03a2d0SViresh Kumar 
19341c03a2d0SViresh Kumar 	if (ret)
19351c03a2d0SViresh Kumar 		pr_err("%s: Failed to change to intermediate frequency: %d\n",
19361c03a2d0SViresh Kumar 		       __func__, ret);
19371c03a2d0SViresh Kumar 
19381c03a2d0SViresh Kumar 	return ret;
19391c03a2d0SViresh Kumar }
19401c03a2d0SViresh Kumar 
19418d65775dSViresh Kumar static int __target_index(struct cpufreq_policy *policy,
19428d65775dSViresh Kumar 			  struct cpufreq_frequency_table *freq_table, int index)
19438d65775dSViresh Kumar {
19441c03a2d0SViresh Kumar 	struct cpufreq_freqs freqs = {.old = policy->cur, .flags = 0};
19451c03a2d0SViresh Kumar 	unsigned int intermediate_freq = 0;
19468d65775dSViresh Kumar 	int retval = -EINVAL;
19478d65775dSViresh Kumar 	bool notify;
19488d65775dSViresh Kumar 
19498d65775dSViresh Kumar 	notify = !(cpufreq_driver->flags & CPUFREQ_ASYNC_NOTIFICATION);
19508d65775dSViresh Kumar 	if (notify) {
19511c03a2d0SViresh Kumar 		/* Handle switching to intermediate frequency */
19521c03a2d0SViresh Kumar 		if (cpufreq_driver->get_intermediate) {
19531c03a2d0SViresh Kumar 			retval = __target_intermediate(policy, &freqs, index);
19541c03a2d0SViresh Kumar 			if (retval)
19551c03a2d0SViresh Kumar 				return retval;
19568d65775dSViresh Kumar 
19571c03a2d0SViresh Kumar 			intermediate_freq = freqs.new;
19581c03a2d0SViresh Kumar 			/* Set old freq to intermediate */
19591c03a2d0SViresh Kumar 			if (intermediate_freq)
19601c03a2d0SViresh Kumar 				freqs.old = freqs.new;
19611c03a2d0SViresh Kumar 		}
19621c03a2d0SViresh Kumar 
19631c03a2d0SViresh Kumar 		freqs.new = freq_table[index].frequency;
19648d65775dSViresh Kumar 		pr_debug("%s: cpu: %d, oldfreq: %u, new freq: %u\n",
19658d65775dSViresh Kumar 			 __func__, policy->cpu, freqs.old, freqs.new);
19668d65775dSViresh Kumar 
19678d65775dSViresh Kumar 		cpufreq_freq_transition_begin(policy, &freqs);
19688d65775dSViresh Kumar 	}
19698d65775dSViresh Kumar 
19708d65775dSViresh Kumar 	retval = cpufreq_driver->target_index(policy, index);
19718d65775dSViresh Kumar 	if (retval)
19728d65775dSViresh Kumar 		pr_err("%s: Failed to change cpu frequency: %d\n", __func__,
19738d65775dSViresh Kumar 		       retval);
19748d65775dSViresh Kumar 
19751c03a2d0SViresh Kumar 	if (notify) {
19768d65775dSViresh Kumar 		cpufreq_freq_transition_end(policy, &freqs, retval);
19778d65775dSViresh Kumar 
19781c03a2d0SViresh Kumar 		/*
19791c03a2d0SViresh Kumar 		 * Failed after setting to intermediate freq? Driver should have
19801c03a2d0SViresh Kumar 		 * reverted back to initial frequency and so should we. Check
19811c03a2d0SViresh Kumar 		 * here for intermediate_freq instead of get_intermediate, in
198258405af6SShailendra Verma 		 * case we haven't switched to intermediate freq at all.
19831c03a2d0SViresh Kumar 		 */
19841c03a2d0SViresh Kumar 		if (unlikely(retval && intermediate_freq)) {
19851c03a2d0SViresh Kumar 			freqs.old = intermediate_freq;
19861c03a2d0SViresh Kumar 			freqs.new = policy->restore_freq;
19871c03a2d0SViresh Kumar 			cpufreq_freq_transition_begin(policy, &freqs);
19881c03a2d0SViresh Kumar 			cpufreq_freq_transition_end(policy, &freqs, 0);
19891c03a2d0SViresh Kumar 		}
19901c03a2d0SViresh Kumar 	}
19911c03a2d0SViresh Kumar 
19928d65775dSViresh Kumar 	return retval;
19938d65775dSViresh Kumar }
19948d65775dSViresh Kumar 
19951da177e4SLinus Torvalds int __cpufreq_driver_target(struct cpufreq_policy *policy,
19961da177e4SLinus Torvalds 			    unsigned int target_freq,
19971da177e4SLinus Torvalds 			    unsigned int relation)
19981da177e4SLinus Torvalds {
19997249924eSViresh Kumar 	unsigned int old_target_freq = target_freq;
20008d65775dSViresh Kumar 	int retval = -EINVAL;
2001c32b6b8eSAshok Raj 
2002a7b422cdSKonrad Rzeszutek Wilk 	if (cpufreq_disabled())
2003a7b422cdSKonrad Rzeszutek Wilk 		return -ENODEV;
2004a7b422cdSKonrad Rzeszutek Wilk 
20057249924eSViresh Kumar 	/* Make sure that target_freq is within supported range */
20067249924eSViresh Kumar 	if (target_freq > policy->max)
20077249924eSViresh Kumar 		target_freq = policy->max;
20087249924eSViresh Kumar 	if (target_freq < policy->min)
20097249924eSViresh Kumar 		target_freq = policy->min;
20107249924eSViresh Kumar 
20117249924eSViresh Kumar 	pr_debug("target for CPU %u: %u kHz, relation %u, requested %u kHz\n",
20127249924eSViresh Kumar 		 policy->cpu, target_freq, relation, old_target_freq);
20135a1c0228SViresh Kumar 
20149c0ebcf7SViresh Kumar 	/*
20159c0ebcf7SViresh Kumar 	 * This might look like a redundant call as we are checking it again
20169c0ebcf7SViresh Kumar 	 * after finding index. But it is left intentionally for cases where
20179c0ebcf7SViresh Kumar 	 * exactly same freq is called again and so we can save on few function
20189c0ebcf7SViresh Kumar 	 * calls.
20199c0ebcf7SViresh Kumar 	 */
20205a1c0228SViresh Kumar 	if (target_freq == policy->cur)
20215a1c0228SViresh Kumar 		return 0;
20225a1c0228SViresh Kumar 
20231c03a2d0SViresh Kumar 	/* Save last value to restore later on errors */
20241c03a2d0SViresh Kumar 	policy->restore_freq = policy->cur;
20251c03a2d0SViresh Kumar 
20261c3d85ddSRafael J. Wysocki 	if (cpufreq_driver->target)
20271c3d85ddSRafael J. Wysocki 		retval = cpufreq_driver->target(policy, target_freq, relation);
20289c0ebcf7SViresh Kumar 	else if (cpufreq_driver->target_index) {
20299c0ebcf7SViresh Kumar 		struct cpufreq_frequency_table *freq_table;
20309c0ebcf7SViresh Kumar 		int index;
203190d45d17SAshok Raj 
20329c0ebcf7SViresh Kumar 		freq_table = cpufreq_frequency_get_table(policy->cpu);
20339c0ebcf7SViresh Kumar 		if (unlikely(!freq_table)) {
20349c0ebcf7SViresh Kumar 			pr_err("%s: Unable to find freq_table\n", __func__);
20359c0ebcf7SViresh Kumar 			goto out;
20369c0ebcf7SViresh Kumar 		}
20379c0ebcf7SViresh Kumar 
20389c0ebcf7SViresh Kumar 		retval = cpufreq_frequency_table_target(policy, freq_table,
20399c0ebcf7SViresh Kumar 				target_freq, relation, &index);
20409c0ebcf7SViresh Kumar 		if (unlikely(retval)) {
20419c0ebcf7SViresh Kumar 			pr_err("%s: Unable to find matching freq\n", __func__);
20429c0ebcf7SViresh Kumar 			goto out;
20439c0ebcf7SViresh Kumar 		}
20449c0ebcf7SViresh Kumar 
2045d4019f0aSViresh Kumar 		if (freq_table[index].frequency == policy->cur) {
20469c0ebcf7SViresh Kumar 			retval = 0;
2047d4019f0aSViresh Kumar 			goto out;
2048d4019f0aSViresh Kumar 		}
2049d4019f0aSViresh Kumar 
20508d65775dSViresh Kumar 		retval = __target_index(policy, freq_table, index);
20519c0ebcf7SViresh Kumar 	}
20529c0ebcf7SViresh Kumar 
20539c0ebcf7SViresh Kumar out:
20541da177e4SLinus Torvalds 	return retval;
20551da177e4SLinus Torvalds }
20561da177e4SLinus Torvalds EXPORT_SYMBOL_GPL(__cpufreq_driver_target);
20571da177e4SLinus Torvalds 
20581da177e4SLinus Torvalds int cpufreq_driver_target(struct cpufreq_policy *policy,
20591da177e4SLinus Torvalds 			  unsigned int target_freq,
20601da177e4SLinus Torvalds 			  unsigned int relation)
20611da177e4SLinus Torvalds {
2062f1829e4aSJulia Lawall 	int ret = -EINVAL;
20631da177e4SLinus Torvalds 
2064ad7722daSviresh kumar 	down_write(&policy->rwsem);
20651da177e4SLinus Torvalds 
20661da177e4SLinus Torvalds 	ret = __cpufreq_driver_target(policy, target_freq, relation);
20671da177e4SLinus Torvalds 
2068ad7722daSviresh kumar 	up_write(&policy->rwsem);
20691da177e4SLinus Torvalds 
20701da177e4SLinus Torvalds 	return ret;
20711da177e4SLinus Torvalds }
20721da177e4SLinus Torvalds EXPORT_SYMBOL_GPL(cpufreq_driver_target);
20731da177e4SLinus Torvalds 
2074e08f5f5bSGautham R Shenoy static int __cpufreq_governor(struct cpufreq_policy *policy,
2075e08f5f5bSGautham R Shenoy 					unsigned int event)
20761da177e4SLinus Torvalds {
2077cc993cabSDave Jones 	int ret;
20786afde10cSThomas Renninger 
20796afde10cSThomas Renninger 	/* Only must be defined when default governor is known to have latency
20806afde10cSThomas Renninger 	   restrictions, like e.g. conservative or ondemand.
20816afde10cSThomas Renninger 	   That this is the case is already ensured in Kconfig
20826afde10cSThomas Renninger 	*/
20836afde10cSThomas Renninger #ifdef CONFIG_CPU_FREQ_GOV_PERFORMANCE
20846afde10cSThomas Renninger 	struct cpufreq_governor *gov = &cpufreq_gov_performance;
20856afde10cSThomas Renninger #else
20866afde10cSThomas Renninger 	struct cpufreq_governor *gov = NULL;
20876afde10cSThomas Renninger #endif
20881c256245SThomas Renninger 
20892f0aea93SViresh Kumar 	/* Don't start any governor operations if we are entering suspend */
20902f0aea93SViresh Kumar 	if (cpufreq_suspended)
20912f0aea93SViresh Kumar 		return 0;
2092cb57720bSEthan Zhao 	/*
2093cb57720bSEthan Zhao 	 * Governor might not be initiated here if ACPI _PPC changed
2094cb57720bSEthan Zhao 	 * notification happened, so check it.
2095cb57720bSEthan Zhao 	 */
2096cb57720bSEthan Zhao 	if (!policy->governor)
2097cb57720bSEthan Zhao 		return -EINVAL;
20982f0aea93SViresh Kumar 
20991c256245SThomas Renninger 	if (policy->governor->max_transition_latency &&
21001c256245SThomas Renninger 	    policy->cpuinfo.transition_latency >
21011c256245SThomas Renninger 	    policy->governor->max_transition_latency) {
21026afde10cSThomas Renninger 		if (!gov)
21036afde10cSThomas Renninger 			return -EINVAL;
21046afde10cSThomas Renninger 		else {
2105e837f9b5SJoe Perches 			pr_warn("%s governor failed, too long transition latency of HW, fallback to %s governor\n",
2106e837f9b5SJoe Perches 				policy->governor->name, gov->name);
21071c256245SThomas Renninger 			policy->governor = gov;
21081c256245SThomas Renninger 		}
21096afde10cSThomas Renninger 	}
21101da177e4SLinus Torvalds 
2111fe492f3fSViresh Kumar 	if (event == CPUFREQ_GOV_POLICY_INIT)
21121da177e4SLinus Torvalds 		if (!try_module_get(policy->governor->owner))
21131da177e4SLinus Torvalds 			return -EINVAL;
21141da177e4SLinus Torvalds 
21152d06d8c4SDominik Brodowski 	pr_debug("__cpufreq_governor for CPU %u, event %u\n",
2116e08f5f5bSGautham R Shenoy 		 policy->cpu, event);
211795731ebbSXiaoguang Chen 
211895731ebbSXiaoguang Chen 	mutex_lock(&cpufreq_governor_lock);
211956d07db2SSrivatsa S. Bhat 	if ((policy->governor_enabled && event == CPUFREQ_GOV_START)
2120f73d3933SViresh Kumar 	    || (!policy->governor_enabled
2121f73d3933SViresh Kumar 	    && (event == CPUFREQ_GOV_LIMITS || event == CPUFREQ_GOV_STOP))) {
212295731ebbSXiaoguang Chen 		mutex_unlock(&cpufreq_governor_lock);
212395731ebbSXiaoguang Chen 		return -EBUSY;
212495731ebbSXiaoguang Chen 	}
212595731ebbSXiaoguang Chen 
212695731ebbSXiaoguang Chen 	if (event == CPUFREQ_GOV_STOP)
212795731ebbSXiaoguang Chen 		policy->governor_enabled = false;
212895731ebbSXiaoguang Chen 	else if (event == CPUFREQ_GOV_START)
212995731ebbSXiaoguang Chen 		policy->governor_enabled = true;
213095731ebbSXiaoguang Chen 
213195731ebbSXiaoguang Chen 	mutex_unlock(&cpufreq_governor_lock);
213295731ebbSXiaoguang Chen 
21331da177e4SLinus Torvalds 	ret = policy->governor->governor(policy, event);
21341da177e4SLinus Torvalds 
21354d5dcc42SViresh Kumar 	if (!ret) {
21364d5dcc42SViresh Kumar 		if (event == CPUFREQ_GOV_POLICY_INIT)
21378e53695fSViresh Kumar 			policy->governor->initialized++;
21384d5dcc42SViresh Kumar 		else if (event == CPUFREQ_GOV_POLICY_EXIT)
21398e53695fSViresh Kumar 			policy->governor->initialized--;
214095731ebbSXiaoguang Chen 	} else {
214195731ebbSXiaoguang Chen 		/* Restore original values */
214295731ebbSXiaoguang Chen 		mutex_lock(&cpufreq_governor_lock);
214395731ebbSXiaoguang Chen 		if (event == CPUFREQ_GOV_STOP)
214495731ebbSXiaoguang Chen 			policy->governor_enabled = true;
214595731ebbSXiaoguang Chen 		else if (event == CPUFREQ_GOV_START)
214695731ebbSXiaoguang Chen 			policy->governor_enabled = false;
214795731ebbSXiaoguang Chen 		mutex_unlock(&cpufreq_governor_lock);
21484d5dcc42SViresh Kumar 	}
2149b394058fSViresh Kumar 
2150fe492f3fSViresh Kumar 	if (((event == CPUFREQ_GOV_POLICY_INIT) && ret) ||
2151fe492f3fSViresh Kumar 			((event == CPUFREQ_GOV_POLICY_EXIT) && !ret))
21521da177e4SLinus Torvalds 		module_put(policy->governor->owner);
21531da177e4SLinus Torvalds 
21541da177e4SLinus Torvalds 	return ret;
21551da177e4SLinus Torvalds }
21561da177e4SLinus Torvalds 
21571da177e4SLinus Torvalds int cpufreq_register_governor(struct cpufreq_governor *governor)
21581da177e4SLinus Torvalds {
21593bcb09a3SJeremy Fitzhardinge 	int err;
21601da177e4SLinus Torvalds 
21611da177e4SLinus Torvalds 	if (!governor)
21621da177e4SLinus Torvalds 		return -EINVAL;
21631da177e4SLinus Torvalds 
2164a7b422cdSKonrad Rzeszutek Wilk 	if (cpufreq_disabled())
2165a7b422cdSKonrad Rzeszutek Wilk 		return -ENODEV;
2166a7b422cdSKonrad Rzeszutek Wilk 
21673fc54d37Sakpm@osdl.org 	mutex_lock(&cpufreq_governor_mutex);
21681da177e4SLinus Torvalds 
2169b394058fSViresh Kumar 	governor->initialized = 0;
21703bcb09a3SJeremy Fitzhardinge 	err = -EBUSY;
217142f91fa1SViresh Kumar 	if (!find_governor(governor->name)) {
21723bcb09a3SJeremy Fitzhardinge 		err = 0;
21731da177e4SLinus Torvalds 		list_add(&governor->governor_list, &cpufreq_governor_list);
21743bcb09a3SJeremy Fitzhardinge 	}
21751da177e4SLinus Torvalds 
21763fc54d37Sakpm@osdl.org 	mutex_unlock(&cpufreq_governor_mutex);
21773bcb09a3SJeremy Fitzhardinge 	return err;
21781da177e4SLinus Torvalds }
21791da177e4SLinus Torvalds EXPORT_SYMBOL_GPL(cpufreq_register_governor);
21801da177e4SLinus Torvalds 
21811da177e4SLinus Torvalds void cpufreq_unregister_governor(struct cpufreq_governor *governor)
21821da177e4SLinus Torvalds {
21834573237bSViresh Kumar 	struct cpufreq_policy *policy;
21844573237bSViresh Kumar 	unsigned long flags;
218590e41bacSPrarit Bhargava 
21861da177e4SLinus Torvalds 	if (!governor)
21871da177e4SLinus Torvalds 		return;
21881da177e4SLinus Torvalds 
2189a7b422cdSKonrad Rzeszutek Wilk 	if (cpufreq_disabled())
2190a7b422cdSKonrad Rzeszutek Wilk 		return;
2191a7b422cdSKonrad Rzeszutek Wilk 
21924573237bSViresh Kumar 	/* clear last_governor for all inactive policies */
21934573237bSViresh Kumar 	read_lock_irqsave(&cpufreq_driver_lock, flags);
21944573237bSViresh Kumar 	for_each_inactive_policy(policy) {
219518bf3a12SViresh Kumar 		if (!strcmp(policy->last_governor, governor->name)) {
219618bf3a12SViresh Kumar 			policy->governor = NULL;
21974573237bSViresh Kumar 			strcpy(policy->last_governor, "\0");
219890e41bacSPrarit Bhargava 		}
219918bf3a12SViresh Kumar 	}
22004573237bSViresh Kumar 	read_unlock_irqrestore(&cpufreq_driver_lock, flags);
220190e41bacSPrarit Bhargava 
22023fc54d37Sakpm@osdl.org 	mutex_lock(&cpufreq_governor_mutex);
22031da177e4SLinus Torvalds 	list_del(&governor->governor_list);
22043fc54d37Sakpm@osdl.org 	mutex_unlock(&cpufreq_governor_mutex);
22051da177e4SLinus Torvalds 	return;
22061da177e4SLinus Torvalds }
22071da177e4SLinus Torvalds EXPORT_SYMBOL_GPL(cpufreq_unregister_governor);
22081da177e4SLinus Torvalds 
22091da177e4SLinus Torvalds 
22101da177e4SLinus Torvalds /*********************************************************************
22111da177e4SLinus Torvalds  *                          POLICY INTERFACE                         *
22121da177e4SLinus Torvalds  *********************************************************************/
22131da177e4SLinus Torvalds 
22141da177e4SLinus Torvalds /**
22151da177e4SLinus Torvalds  * cpufreq_get_policy - get the current cpufreq_policy
221629464f28SDave Jones  * @policy: struct cpufreq_policy into which the current cpufreq_policy
221729464f28SDave Jones  *	is written
22181da177e4SLinus Torvalds  *
22191da177e4SLinus Torvalds  * Reads the current cpufreq policy.
22201da177e4SLinus Torvalds  */
22211da177e4SLinus Torvalds int cpufreq_get_policy(struct cpufreq_policy *policy, unsigned int cpu)
22221da177e4SLinus Torvalds {
22231da177e4SLinus Torvalds 	struct cpufreq_policy *cpu_policy;
22241da177e4SLinus Torvalds 	if (!policy)
22251da177e4SLinus Torvalds 		return -EINVAL;
22261da177e4SLinus Torvalds 
22271da177e4SLinus Torvalds 	cpu_policy = cpufreq_cpu_get(cpu);
22281da177e4SLinus Torvalds 	if (!cpu_policy)
22291da177e4SLinus Torvalds 		return -EINVAL;
22301da177e4SLinus Torvalds 
2231d5b73cd8SViresh Kumar 	memcpy(policy, cpu_policy, sizeof(*policy));
22321da177e4SLinus Torvalds 
22331da177e4SLinus Torvalds 	cpufreq_cpu_put(cpu_policy);
22341da177e4SLinus Torvalds 	return 0;
22351da177e4SLinus Torvalds }
22361da177e4SLinus Torvalds EXPORT_SYMBOL(cpufreq_get_policy);
22371da177e4SLinus Torvalds 
2238153d7f3fSArjan van de Ven /*
2239037ce839SViresh Kumar  * policy : current policy.
2240037ce839SViresh Kumar  * new_policy: policy to be set.
2241153d7f3fSArjan van de Ven  */
2242037ce839SViresh Kumar static int cpufreq_set_policy(struct cpufreq_policy *policy,
22433a3e9e06SViresh Kumar 				struct cpufreq_policy *new_policy)
22441da177e4SLinus Torvalds {
2245d9a789c7SRafael J. Wysocki 	struct cpufreq_governor *old_gov;
2246d9a789c7SRafael J. Wysocki 	int ret;
22471da177e4SLinus Torvalds 
2248e837f9b5SJoe Perches 	pr_debug("setting new policy for CPU %u: %u - %u kHz\n",
2249e837f9b5SJoe Perches 		 new_policy->cpu, new_policy->min, new_policy->max);
22501da177e4SLinus Torvalds 
2251d5b73cd8SViresh Kumar 	memcpy(&new_policy->cpuinfo, &policy->cpuinfo, sizeof(policy->cpuinfo));
22521da177e4SLinus Torvalds 
2253d9a789c7SRafael J. Wysocki 	if (new_policy->min > policy->max || new_policy->max < policy->min)
2254d9a789c7SRafael J. Wysocki 		return -EINVAL;
22559c9a43edSMattia Dongili 
22561da177e4SLinus Torvalds 	/* verify the cpu speed can be set within this limit */
22573a3e9e06SViresh Kumar 	ret = cpufreq_driver->verify(new_policy);
22581da177e4SLinus Torvalds 	if (ret)
2259d9a789c7SRafael J. Wysocki 		return ret;
22601da177e4SLinus Torvalds 
22611da177e4SLinus Torvalds 	/* adjust if necessary - all reasons */
2262e041c683SAlan Stern 	blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
22633a3e9e06SViresh Kumar 			CPUFREQ_ADJUST, new_policy);
22641da177e4SLinus Torvalds 
22651da177e4SLinus Torvalds 	/* adjust if necessary - hardware incompatibility*/
2266e041c683SAlan Stern 	blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
22673a3e9e06SViresh Kumar 			CPUFREQ_INCOMPATIBLE, new_policy);
22681da177e4SLinus Torvalds 
2269bb176f7dSViresh Kumar 	/*
2270bb176f7dSViresh Kumar 	 * verify the cpu speed can be set within this limit, which might be
2271bb176f7dSViresh Kumar 	 * different to the first one
2272bb176f7dSViresh Kumar 	 */
22733a3e9e06SViresh Kumar 	ret = cpufreq_driver->verify(new_policy);
2274e041c683SAlan Stern 	if (ret)
2275d9a789c7SRafael J. Wysocki 		return ret;
22761da177e4SLinus Torvalds 
22771da177e4SLinus Torvalds 	/* notification of the new policy */
2278e041c683SAlan Stern 	blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
22793a3e9e06SViresh Kumar 			CPUFREQ_NOTIFY, new_policy);
22801da177e4SLinus Torvalds 
22813a3e9e06SViresh Kumar 	policy->min = new_policy->min;
22823a3e9e06SViresh Kumar 	policy->max = new_policy->max;
22831da177e4SLinus Torvalds 
22842d06d8c4SDominik Brodowski 	pr_debug("new min and max freqs are %u - %u kHz\n",
22853a3e9e06SViresh Kumar 		 policy->min, policy->max);
22861da177e4SLinus Torvalds 
22871c3d85ddSRafael J. Wysocki 	if (cpufreq_driver->setpolicy) {
22883a3e9e06SViresh Kumar 		policy->policy = new_policy->policy;
22892d06d8c4SDominik Brodowski 		pr_debug("setting range\n");
2290d9a789c7SRafael J. Wysocki 		return cpufreq_driver->setpolicy(new_policy);
2291d9a789c7SRafael J. Wysocki 	}
2292d9a789c7SRafael J. Wysocki 
2293d9a789c7SRafael J. Wysocki 	if (new_policy->governor == policy->governor)
2294d9a789c7SRafael J. Wysocki 		goto out;
22951da177e4SLinus Torvalds 
22962d06d8c4SDominik Brodowski 	pr_debug("governor switch\n");
22971da177e4SLinus Torvalds 
2298d9a789c7SRafael J. Wysocki 	/* save old, working values */
2299d9a789c7SRafael J. Wysocki 	old_gov = policy->governor;
23001da177e4SLinus Torvalds 	/* end old governor */
2301d9a789c7SRafael J. Wysocki 	if (old_gov) {
23023a3e9e06SViresh Kumar 		__cpufreq_governor(policy, CPUFREQ_GOV_STOP);
2303ad7722daSviresh kumar 		up_write(&policy->rwsem);
2304d9a789c7SRafael J. Wysocki 		__cpufreq_governor(policy, CPUFREQ_GOV_POLICY_EXIT);
2305ad7722daSviresh kumar 		down_write(&policy->rwsem);
23067bd353a9SViresh Kumar 	}
23071da177e4SLinus Torvalds 
23081da177e4SLinus Torvalds 	/* start new governor */
23093a3e9e06SViresh Kumar 	policy->governor = new_policy->governor;
23103a3e9e06SViresh Kumar 	if (!__cpufreq_governor(policy, CPUFREQ_GOV_POLICY_INIT)) {
2311d9a789c7SRafael J. Wysocki 		if (!__cpufreq_governor(policy, CPUFREQ_GOV_START))
2312d9a789c7SRafael J. Wysocki 			goto out;
2313d9a789c7SRafael J. Wysocki 
2314ad7722daSviresh kumar 		up_write(&policy->rwsem);
2315d9a789c7SRafael J. Wysocki 		__cpufreq_governor(policy, CPUFREQ_GOV_POLICY_EXIT);
2316ad7722daSviresh kumar 		down_write(&policy->rwsem);
2317955ef483SViresh Kumar 	}
23187bd353a9SViresh Kumar 
23191da177e4SLinus Torvalds 	/* new governor failed, so re-start old one */
2320d9a789c7SRafael J. Wysocki 	pr_debug("starting governor %s failed\n", policy->governor->name);
23211da177e4SLinus Torvalds 	if (old_gov) {
23223a3e9e06SViresh Kumar 		policy->governor = old_gov;
2323d9a789c7SRafael J. Wysocki 		__cpufreq_governor(policy, CPUFREQ_GOV_POLICY_INIT);
2324d9a789c7SRafael J. Wysocki 		__cpufreq_governor(policy, CPUFREQ_GOV_START);
23251da177e4SLinus Torvalds 	}
23261da177e4SLinus Torvalds 
2327d9a789c7SRafael J. Wysocki 	return -EINVAL;
2328d9a789c7SRafael J. Wysocki 
2329d9a789c7SRafael J. Wysocki  out:
2330d9a789c7SRafael J. Wysocki 	pr_debug("governor: change or update limits\n");
2331d9a789c7SRafael J. Wysocki 	return __cpufreq_governor(policy, CPUFREQ_GOV_LIMITS);
23321da177e4SLinus Torvalds }
23331da177e4SLinus Torvalds 
23341da177e4SLinus Torvalds /**
23351da177e4SLinus Torvalds  *	cpufreq_update_policy - re-evaluate an existing cpufreq policy
23361da177e4SLinus Torvalds  *	@cpu: CPU which shall be re-evaluated
23371da177e4SLinus Torvalds  *
233825985edcSLucas De Marchi  *	Useful for policy notifiers which have different necessities
23391da177e4SLinus Torvalds  *	at different times.
23401da177e4SLinus Torvalds  */
23411da177e4SLinus Torvalds int cpufreq_update_policy(unsigned int cpu)
23421da177e4SLinus Torvalds {
23433a3e9e06SViresh Kumar 	struct cpufreq_policy *policy = cpufreq_cpu_get(cpu);
23443a3e9e06SViresh Kumar 	struct cpufreq_policy new_policy;
2345f1829e4aSJulia Lawall 	int ret;
23461da177e4SLinus Torvalds 
2347fefa8ff8SAaron Plattner 	if (!policy)
2348fefa8ff8SAaron Plattner 		return -ENODEV;
23491da177e4SLinus Torvalds 
2350ad7722daSviresh kumar 	down_write(&policy->rwsem);
23511da177e4SLinus Torvalds 
23522d06d8c4SDominik Brodowski 	pr_debug("updating policy for CPU %u\n", cpu);
2353d5b73cd8SViresh Kumar 	memcpy(&new_policy, policy, sizeof(*policy));
23543a3e9e06SViresh Kumar 	new_policy.min = policy->user_policy.min;
23553a3e9e06SViresh Kumar 	new_policy.max = policy->user_policy.max;
23563a3e9e06SViresh Kumar 	new_policy.policy = policy->user_policy.policy;
23573a3e9e06SViresh Kumar 	new_policy.governor = policy->user_policy.governor;
23581da177e4SLinus Torvalds 
2359bb176f7dSViresh Kumar 	/*
2360bb176f7dSViresh Kumar 	 * BIOS might change freq behind our back
2361bb176f7dSViresh Kumar 	 * -> ask driver for current freq and notify governors about a change
2362bb176f7dSViresh Kumar 	 */
23632ed99e39SRafael J. Wysocki 	if (cpufreq_driver->get && !cpufreq_driver->setpolicy) {
23643a3e9e06SViresh Kumar 		new_policy.cur = cpufreq_driver->get(cpu);
2365bd0fa9bbSViresh Kumar 		if (WARN_ON(!new_policy.cur)) {
2366bd0fa9bbSViresh Kumar 			ret = -EIO;
2367fefa8ff8SAaron Plattner 			goto unlock;
2368bd0fa9bbSViresh Kumar 		}
2369bd0fa9bbSViresh Kumar 
23703a3e9e06SViresh Kumar 		if (!policy->cur) {
2371e837f9b5SJoe Perches 			pr_debug("Driver did not initialize current freq\n");
23723a3e9e06SViresh Kumar 			policy->cur = new_policy.cur;
2373a85f7bd3SThomas Renninger 		} else {
23749c0ebcf7SViresh Kumar 			if (policy->cur != new_policy.cur && has_target())
2375a1e1dc41SViresh Kumar 				cpufreq_out_of_sync(policy, new_policy.cur);
23760961dd0dSThomas Renninger 		}
2377a85f7bd3SThomas Renninger 	}
23780961dd0dSThomas Renninger 
2379037ce839SViresh Kumar 	ret = cpufreq_set_policy(policy, &new_policy);
23801da177e4SLinus Torvalds 
2381fefa8ff8SAaron Plattner unlock:
2382ad7722daSviresh kumar 	up_write(&policy->rwsem);
23835a01f2e8SVenkatesh Pallipadi 
23843a3e9e06SViresh Kumar 	cpufreq_cpu_put(policy);
23851da177e4SLinus Torvalds 	return ret;
23861da177e4SLinus Torvalds }
23871da177e4SLinus Torvalds EXPORT_SYMBOL(cpufreq_update_policy);
23881da177e4SLinus Torvalds 
23892760984fSPaul Gortmaker static int cpufreq_cpu_callback(struct notifier_block *nfb,
2390c32b6b8eSAshok Raj 					unsigned long action, void *hcpu)
2391c32b6b8eSAshok Raj {
2392c32b6b8eSAshok Raj 	unsigned int cpu = (unsigned long)hcpu;
23938a25a2fdSKay Sievers 	struct device *dev;
2394c32b6b8eSAshok Raj 
23958a25a2fdSKay Sievers 	dev = get_cpu_device(cpu);
23968a25a2fdSKay Sievers 	if (dev) {
23975302c3fbSSrivatsa S. Bhat 		switch (action & ~CPU_TASKS_FROZEN) {
2398c32b6b8eSAshok Raj 		case CPU_ONLINE:
239923faf0b7SViresh Kumar 			cpufreq_add_dev(dev, NULL);
2400c32b6b8eSAshok Raj 			break;
24015302c3fbSSrivatsa S. Bhat 
2402c32b6b8eSAshok Raj 		case CPU_DOWN_PREPARE:
240396bbbe4aSViresh Kumar 			__cpufreq_remove_dev_prepare(dev, NULL);
24041aee40acSSrivatsa S. Bhat 			break;
24051aee40acSSrivatsa S. Bhat 
24061aee40acSSrivatsa S. Bhat 		case CPU_POST_DEAD:
240796bbbe4aSViresh Kumar 			__cpufreq_remove_dev_finish(dev, NULL);
2408c32b6b8eSAshok Raj 			break;
24095302c3fbSSrivatsa S. Bhat 
24105a01f2e8SVenkatesh Pallipadi 		case CPU_DOWN_FAILED:
241123faf0b7SViresh Kumar 			cpufreq_add_dev(dev, NULL);
2412c32b6b8eSAshok Raj 			break;
2413c32b6b8eSAshok Raj 		}
2414c32b6b8eSAshok Raj 	}
2415c32b6b8eSAshok Raj 	return NOTIFY_OK;
2416c32b6b8eSAshok Raj }
2417c32b6b8eSAshok Raj 
24189c36f746SNeal Buckendahl static struct notifier_block __refdata cpufreq_cpu_notifier = {
2419c32b6b8eSAshok Raj 	.notifier_call = cpufreq_cpu_callback,
2420c32b6b8eSAshok Raj };
24211da177e4SLinus Torvalds 
24221da177e4SLinus Torvalds /*********************************************************************
24236f19efc0SLukasz Majewski  *               BOOST						     *
24246f19efc0SLukasz Majewski  *********************************************************************/
24256f19efc0SLukasz Majewski static int cpufreq_boost_set_sw(int state)
24266f19efc0SLukasz Majewski {
24276f19efc0SLukasz Majewski 	struct cpufreq_frequency_table *freq_table;
24286f19efc0SLukasz Majewski 	struct cpufreq_policy *policy;
24296f19efc0SLukasz Majewski 	int ret = -EINVAL;
24306f19efc0SLukasz Majewski 
2431f963735aSViresh Kumar 	for_each_active_policy(policy) {
24326f19efc0SLukasz Majewski 		freq_table = cpufreq_frequency_get_table(policy->cpu);
24336f19efc0SLukasz Majewski 		if (freq_table) {
24346f19efc0SLukasz Majewski 			ret = cpufreq_frequency_table_cpuinfo(policy,
24356f19efc0SLukasz Majewski 							freq_table);
24366f19efc0SLukasz Majewski 			if (ret) {
24376f19efc0SLukasz Majewski 				pr_err("%s: Policy frequency update failed\n",
24386f19efc0SLukasz Majewski 				       __func__);
24396f19efc0SLukasz Majewski 				break;
24406f19efc0SLukasz Majewski 			}
24416f19efc0SLukasz Majewski 			policy->user_policy.max = policy->max;
24426f19efc0SLukasz Majewski 			__cpufreq_governor(policy, CPUFREQ_GOV_LIMITS);
24436f19efc0SLukasz Majewski 		}
24446f19efc0SLukasz Majewski 	}
24456f19efc0SLukasz Majewski 
24466f19efc0SLukasz Majewski 	return ret;
24476f19efc0SLukasz Majewski }
24486f19efc0SLukasz Majewski 
24496f19efc0SLukasz Majewski int cpufreq_boost_trigger_state(int state)
24506f19efc0SLukasz Majewski {
24516f19efc0SLukasz Majewski 	unsigned long flags;
24526f19efc0SLukasz Majewski 	int ret = 0;
24536f19efc0SLukasz Majewski 
24546f19efc0SLukasz Majewski 	if (cpufreq_driver->boost_enabled == state)
24556f19efc0SLukasz Majewski 		return 0;
24566f19efc0SLukasz Majewski 
24576f19efc0SLukasz Majewski 	write_lock_irqsave(&cpufreq_driver_lock, flags);
24586f19efc0SLukasz Majewski 	cpufreq_driver->boost_enabled = state;
24596f19efc0SLukasz Majewski 	write_unlock_irqrestore(&cpufreq_driver_lock, flags);
24606f19efc0SLukasz Majewski 
24616f19efc0SLukasz Majewski 	ret = cpufreq_driver->set_boost(state);
24626f19efc0SLukasz Majewski 	if (ret) {
24636f19efc0SLukasz Majewski 		write_lock_irqsave(&cpufreq_driver_lock, flags);
24646f19efc0SLukasz Majewski 		cpufreq_driver->boost_enabled = !state;
24656f19efc0SLukasz Majewski 		write_unlock_irqrestore(&cpufreq_driver_lock, flags);
24666f19efc0SLukasz Majewski 
2467e837f9b5SJoe Perches 		pr_err("%s: Cannot %s BOOST\n",
2468e837f9b5SJoe Perches 		       __func__, state ? "enable" : "disable");
24696f19efc0SLukasz Majewski 	}
24706f19efc0SLukasz Majewski 
24716f19efc0SLukasz Majewski 	return ret;
24726f19efc0SLukasz Majewski }
24736f19efc0SLukasz Majewski 
24746f19efc0SLukasz Majewski int cpufreq_boost_supported(void)
24756f19efc0SLukasz Majewski {
24766f19efc0SLukasz Majewski 	if (likely(cpufreq_driver))
24776f19efc0SLukasz Majewski 		return cpufreq_driver->boost_supported;
24786f19efc0SLukasz Majewski 
24796f19efc0SLukasz Majewski 	return 0;
24806f19efc0SLukasz Majewski }
24816f19efc0SLukasz Majewski EXPORT_SYMBOL_GPL(cpufreq_boost_supported);
24826f19efc0SLukasz Majewski 
24836f19efc0SLukasz Majewski int cpufreq_boost_enabled(void)
24846f19efc0SLukasz Majewski {
24856f19efc0SLukasz Majewski 	return cpufreq_driver->boost_enabled;
24866f19efc0SLukasz Majewski }
24876f19efc0SLukasz Majewski EXPORT_SYMBOL_GPL(cpufreq_boost_enabled);
24886f19efc0SLukasz Majewski 
24896f19efc0SLukasz Majewski /*********************************************************************
24901da177e4SLinus Torvalds  *               REGISTER / UNREGISTER CPUFREQ DRIVER                *
24911da177e4SLinus Torvalds  *********************************************************************/
24921da177e4SLinus Torvalds 
24931da177e4SLinus Torvalds /**
24941da177e4SLinus Torvalds  * cpufreq_register_driver - register a CPU Frequency driver
24951da177e4SLinus Torvalds  * @driver_data: A struct cpufreq_driver containing the values#
24961da177e4SLinus Torvalds  * submitted by the CPU Frequency driver.
24971da177e4SLinus Torvalds  *
24981da177e4SLinus Torvalds  * Registers a CPU Frequency driver to this core code. This code
24991da177e4SLinus Torvalds  * returns zero on success, -EBUSY when another driver got here first
25001da177e4SLinus Torvalds  * (and isn't unregistered in the meantime).
25011da177e4SLinus Torvalds  *
25021da177e4SLinus Torvalds  */
2503221dee28SLinus Torvalds int cpufreq_register_driver(struct cpufreq_driver *driver_data)
25041da177e4SLinus Torvalds {
25051da177e4SLinus Torvalds 	unsigned long flags;
25061da177e4SLinus Torvalds 	int ret;
25071da177e4SLinus Torvalds 
2508a7b422cdSKonrad Rzeszutek Wilk 	if (cpufreq_disabled())
2509a7b422cdSKonrad Rzeszutek Wilk 		return -ENODEV;
2510a7b422cdSKonrad Rzeszutek Wilk 
25111da177e4SLinus Torvalds 	if (!driver_data || !driver_data->verify || !driver_data->init ||
25129c0ebcf7SViresh Kumar 	    !(driver_data->setpolicy || driver_data->target_index ||
25139832235fSRafael J. Wysocki 		    driver_data->target) ||
25149832235fSRafael J. Wysocki 	     (driver_data->setpolicy && (driver_data->target_index ||
25151c03a2d0SViresh Kumar 		    driver_data->target)) ||
25161c03a2d0SViresh Kumar 	     (!!driver_data->get_intermediate != !!driver_data->target_intermediate))
25171da177e4SLinus Torvalds 		return -EINVAL;
25181da177e4SLinus Torvalds 
25192d06d8c4SDominik Brodowski 	pr_debug("trying to register driver %s\n", driver_data->name);
25201da177e4SLinus Torvalds 
25210d1857a1SNathan Zimmer 	write_lock_irqsave(&cpufreq_driver_lock, flags);
25221c3d85ddSRafael J. Wysocki 	if (cpufreq_driver) {
25230d1857a1SNathan Zimmer 		write_unlock_irqrestore(&cpufreq_driver_lock, flags);
25244dea5806SYinghai Lu 		return -EEXIST;
25251da177e4SLinus Torvalds 	}
25261c3d85ddSRafael J. Wysocki 	cpufreq_driver = driver_data;
25270d1857a1SNathan Zimmer 	write_unlock_irqrestore(&cpufreq_driver_lock, flags);
25281da177e4SLinus Torvalds 
2529bc68b7dfSViresh Kumar 	if (driver_data->setpolicy)
2530bc68b7dfSViresh Kumar 		driver_data->flags |= CPUFREQ_CONST_LOOPS;
2531bc68b7dfSViresh Kumar 
25326f19efc0SLukasz Majewski 	if (cpufreq_boost_supported()) {
25336f19efc0SLukasz Majewski 		/*
25346f19efc0SLukasz Majewski 		 * Check if driver provides function to enable boost -
25356f19efc0SLukasz Majewski 		 * if not, use cpufreq_boost_set_sw as default
25366f19efc0SLukasz Majewski 		 */
25376f19efc0SLukasz Majewski 		if (!cpufreq_driver->set_boost)
25386f19efc0SLukasz Majewski 			cpufreq_driver->set_boost = cpufreq_boost_set_sw;
25396f19efc0SLukasz Majewski 
25406f19efc0SLukasz Majewski 		ret = cpufreq_sysfs_create_file(&boost.attr);
25416f19efc0SLukasz Majewski 		if (ret) {
25426f19efc0SLukasz Majewski 			pr_err("%s: cannot register global BOOST sysfs file\n",
25436f19efc0SLukasz Majewski 			       __func__);
25446f19efc0SLukasz Majewski 			goto err_null_driver;
25456f19efc0SLukasz Majewski 		}
25466f19efc0SLukasz Majewski 	}
25476f19efc0SLukasz Majewski 
25488a25a2fdSKay Sievers 	ret = subsys_interface_register(&cpufreq_interface);
25498f5bc2abSJiri Slaby 	if (ret)
25506f19efc0SLukasz Majewski 		goto err_boost_unreg;
25511da177e4SLinus Torvalds 
2552ce1bcfe9SViresh Kumar 	if (!(cpufreq_driver->flags & CPUFREQ_STICKY) &&
2553ce1bcfe9SViresh Kumar 	    list_empty(&cpufreq_policy_list)) {
25541da177e4SLinus Torvalds 		/* if all ->init() calls failed, unregister */
2555ce1bcfe9SViresh Kumar 		pr_debug("%s: No CPU initialized for driver %s\n", __func__,
2556e08f5f5bSGautham R Shenoy 			 driver_data->name);
25578a25a2fdSKay Sievers 		goto err_if_unreg;
25581da177e4SLinus Torvalds 	}
25591da177e4SLinus Torvalds 
256065edc68cSChandra Seetharaman 	register_hotcpu_notifier(&cpufreq_cpu_notifier);
25612d06d8c4SDominik Brodowski 	pr_debug("driver %s up and running\n", driver_data->name);
25621da177e4SLinus Torvalds 
25638f5bc2abSJiri Slaby 	return 0;
25648a25a2fdSKay Sievers err_if_unreg:
25658a25a2fdSKay Sievers 	subsys_interface_unregister(&cpufreq_interface);
25666f19efc0SLukasz Majewski err_boost_unreg:
25676f19efc0SLukasz Majewski 	if (cpufreq_boost_supported())
25686f19efc0SLukasz Majewski 		cpufreq_sysfs_remove_file(&boost.attr);
25698f5bc2abSJiri Slaby err_null_driver:
25700d1857a1SNathan Zimmer 	write_lock_irqsave(&cpufreq_driver_lock, flags);
25711c3d85ddSRafael J. Wysocki 	cpufreq_driver = NULL;
25720d1857a1SNathan Zimmer 	write_unlock_irqrestore(&cpufreq_driver_lock, flags);
25734d34a67dSDave Jones 	return ret;
25741da177e4SLinus Torvalds }
25751da177e4SLinus Torvalds EXPORT_SYMBOL_GPL(cpufreq_register_driver);
25761da177e4SLinus Torvalds 
25771da177e4SLinus Torvalds /**
25781da177e4SLinus Torvalds  * cpufreq_unregister_driver - unregister the current CPUFreq driver
25791da177e4SLinus Torvalds  *
25801da177e4SLinus Torvalds  * Unregister the current CPUFreq driver. Only call this if you have
25811da177e4SLinus Torvalds  * the right to do so, i.e. if you have succeeded in initialising before!
25821da177e4SLinus Torvalds  * Returns zero if successful, and -EINVAL if the cpufreq_driver is
25831da177e4SLinus Torvalds  * currently not initialised.
25841da177e4SLinus Torvalds  */
2585221dee28SLinus Torvalds int cpufreq_unregister_driver(struct cpufreq_driver *driver)
25861da177e4SLinus Torvalds {
25871da177e4SLinus Torvalds 	unsigned long flags;
25881da177e4SLinus Torvalds 
25891c3d85ddSRafael J. Wysocki 	if (!cpufreq_driver || (driver != cpufreq_driver))
25901da177e4SLinus Torvalds 		return -EINVAL;
25911da177e4SLinus Torvalds 
25922d06d8c4SDominik Brodowski 	pr_debug("unregistering driver %s\n", driver->name);
25931da177e4SLinus Torvalds 
25948a25a2fdSKay Sievers 	subsys_interface_unregister(&cpufreq_interface);
25956f19efc0SLukasz Majewski 	if (cpufreq_boost_supported())
25966f19efc0SLukasz Majewski 		cpufreq_sysfs_remove_file(&boost.attr);
25976f19efc0SLukasz Majewski 
259865edc68cSChandra Seetharaman 	unregister_hotcpu_notifier(&cpufreq_cpu_notifier);
25991da177e4SLinus Torvalds 
26006eed9404SViresh Kumar 	down_write(&cpufreq_rwsem);
26010d1857a1SNathan Zimmer 	write_lock_irqsave(&cpufreq_driver_lock, flags);
26026eed9404SViresh Kumar 
26031c3d85ddSRafael J. Wysocki 	cpufreq_driver = NULL;
26046eed9404SViresh Kumar 
26050d1857a1SNathan Zimmer 	write_unlock_irqrestore(&cpufreq_driver_lock, flags);
26066eed9404SViresh Kumar 	up_write(&cpufreq_rwsem);
26071da177e4SLinus Torvalds 
26081da177e4SLinus Torvalds 	return 0;
26091da177e4SLinus Torvalds }
26101da177e4SLinus Torvalds EXPORT_SYMBOL_GPL(cpufreq_unregister_driver);
26115a01f2e8SVenkatesh Pallipadi 
261290de2a4aSDoug Anderson /*
261390de2a4aSDoug Anderson  * Stop cpufreq at shutdown to make sure it isn't holding any locks
261490de2a4aSDoug Anderson  * or mutexes when secondary CPUs are halted.
261590de2a4aSDoug Anderson  */
261690de2a4aSDoug Anderson static struct syscore_ops cpufreq_syscore_ops = {
261790de2a4aSDoug Anderson 	.shutdown = cpufreq_suspend,
261890de2a4aSDoug Anderson };
261990de2a4aSDoug Anderson 
26205a01f2e8SVenkatesh Pallipadi static int __init cpufreq_core_init(void)
26215a01f2e8SVenkatesh Pallipadi {
2622a7b422cdSKonrad Rzeszutek Wilk 	if (cpufreq_disabled())
2623a7b422cdSKonrad Rzeszutek Wilk 		return -ENODEV;
2624a7b422cdSKonrad Rzeszutek Wilk 
26252361be23SViresh Kumar 	cpufreq_global_kobject = kobject_create();
26268aa84ad8SThomas Renninger 	BUG_ON(!cpufreq_global_kobject);
26278aa84ad8SThomas Renninger 
262890de2a4aSDoug Anderson 	register_syscore_ops(&cpufreq_syscore_ops);
262990de2a4aSDoug Anderson 
26305a01f2e8SVenkatesh Pallipadi 	return 0;
26315a01f2e8SVenkatesh Pallipadi }
26325a01f2e8SVenkatesh Pallipadi core_initcall(cpufreq_core_init);
2633