xref: /openbmc/linux/drivers/cpufreq/cpufreq.c (revision 8101f99703048ceaa31c756abe1098d099249ad9)
11da177e4SLinus Torvalds /*
21da177e4SLinus Torvalds  *  linux/drivers/cpufreq/cpufreq.c
31da177e4SLinus Torvalds  *
41da177e4SLinus Torvalds  *  Copyright (C) 2001 Russell King
51da177e4SLinus Torvalds  *            (C) 2002 - 2003 Dominik Brodowski <linux@brodo.de>
6bb176f7dSViresh Kumar  *            (C) 2013 Viresh Kumar <viresh.kumar@linaro.org>
71da177e4SLinus Torvalds  *
8c32b6b8eSAshok Raj  *  Oct 2005 - Ashok Raj <ashok.raj@intel.com>
9c32b6b8eSAshok Raj  *	Added handling for CPU hotplug
108ff69732SDave Jones  *  Feb 2006 - Jacob Shin <jacob.shin@amd.com>
118ff69732SDave Jones  *	Fix handling for CPU hotplug -- affected CPUs
12c32b6b8eSAshok Raj  *
131da177e4SLinus Torvalds  * This program is free software; you can redistribute it and/or modify
141da177e4SLinus Torvalds  * it under the terms of the GNU General Public License version 2 as
151da177e4SLinus Torvalds  * published by the Free Software Foundation.
161da177e4SLinus Torvalds  */
171da177e4SLinus Torvalds 
18db701151SViresh Kumar #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
19db701151SViresh Kumar 
205ff0a268SViresh Kumar #include <linux/cpu.h>
211da177e4SLinus Torvalds #include <linux/cpufreq.h>
221da177e4SLinus Torvalds #include <linux/delay.h>
231da177e4SLinus Torvalds #include <linux/device.h>
245ff0a268SViresh Kumar #include <linux/init.h>
255ff0a268SViresh Kumar #include <linux/kernel_stat.h>
265ff0a268SViresh Kumar #include <linux/module.h>
273fc54d37Sakpm@osdl.org #include <linux/mutex.h>
285ff0a268SViresh Kumar #include <linux/slab.h>
292f0aea93SViresh Kumar #include <linux/suspend.h>
3090de2a4aSDoug Anderson #include <linux/syscore_ops.h>
315ff0a268SViresh Kumar #include <linux/tick.h>
326f4f2723SThomas Renninger #include <trace/events/power.h>
336f4f2723SThomas Renninger 
34b4f0676fSViresh Kumar static LIST_HEAD(cpufreq_policy_list);
35f963735aSViresh Kumar 
36f963735aSViresh Kumar static inline bool policy_is_inactive(struct cpufreq_policy *policy)
37f963735aSViresh Kumar {
38f963735aSViresh Kumar 	return cpumask_empty(policy->cpus);
39f963735aSViresh Kumar }
40f963735aSViresh Kumar 
41f963735aSViresh Kumar static bool suitable_policy(struct cpufreq_policy *policy, bool active)
42f963735aSViresh Kumar {
43f963735aSViresh Kumar 	return active == !policy_is_inactive(policy);
44f963735aSViresh Kumar }
45f963735aSViresh Kumar 
46f963735aSViresh Kumar /* Finds Next Acive/Inactive policy */
47f963735aSViresh Kumar static struct cpufreq_policy *next_policy(struct cpufreq_policy *policy,
48f963735aSViresh Kumar 					  bool active)
49f963735aSViresh Kumar {
50f963735aSViresh Kumar 	do {
51f963735aSViresh Kumar 		policy = list_next_entry(policy, policy_list);
52f963735aSViresh Kumar 
53f963735aSViresh Kumar 		/* No more policies in the list */
54f963735aSViresh Kumar 		if (&policy->policy_list == &cpufreq_policy_list)
55f963735aSViresh Kumar 			return NULL;
56f963735aSViresh Kumar 	} while (!suitable_policy(policy, active));
57f963735aSViresh Kumar 
58f963735aSViresh Kumar 	return policy;
59f963735aSViresh Kumar }
60f963735aSViresh Kumar 
61f963735aSViresh Kumar static struct cpufreq_policy *first_policy(bool active)
62f963735aSViresh Kumar {
63f963735aSViresh Kumar 	struct cpufreq_policy *policy;
64f963735aSViresh Kumar 
65f963735aSViresh Kumar 	/* No policies in the list */
66f963735aSViresh Kumar 	if (list_empty(&cpufreq_policy_list))
67f963735aSViresh Kumar 		return NULL;
68f963735aSViresh Kumar 
69f963735aSViresh Kumar 	policy = list_first_entry(&cpufreq_policy_list, typeof(*policy),
70f963735aSViresh Kumar 				  policy_list);
71f963735aSViresh Kumar 
72f963735aSViresh Kumar 	if (!suitable_policy(policy, active))
73f963735aSViresh Kumar 		policy = next_policy(policy, active);
74f963735aSViresh Kumar 
75f963735aSViresh Kumar 	return policy;
76f963735aSViresh Kumar }
77f963735aSViresh Kumar 
78f963735aSViresh Kumar /* Macros to iterate over CPU policies */
79f963735aSViresh Kumar #define for_each_suitable_policy(__policy, __active)	\
80f963735aSViresh Kumar 	for (__policy = first_policy(__active);		\
81f963735aSViresh Kumar 	     __policy;					\
82f963735aSViresh Kumar 	     __policy = next_policy(__policy, __active))
83f963735aSViresh Kumar 
84f963735aSViresh Kumar #define for_each_active_policy(__policy)		\
85f963735aSViresh Kumar 	for_each_suitable_policy(__policy, true)
86f963735aSViresh Kumar #define for_each_inactive_policy(__policy)		\
87f963735aSViresh Kumar 	for_each_suitable_policy(__policy, false)
88f963735aSViresh Kumar 
89b4f0676fSViresh Kumar #define for_each_policy(__policy)			\
90b4f0676fSViresh Kumar 	list_for_each_entry(__policy, &cpufreq_policy_list, policy_list)
91b4f0676fSViresh Kumar 
92f7b27061SViresh Kumar /* Iterate over governors */
93f7b27061SViresh Kumar static LIST_HEAD(cpufreq_governor_list);
94f7b27061SViresh Kumar #define for_each_governor(__governor)				\
95f7b27061SViresh Kumar 	list_for_each_entry(__governor, &cpufreq_governor_list, governor_list)
96f7b27061SViresh Kumar 
971da177e4SLinus Torvalds /**
98cd878479SDave Jones  * The "cpufreq driver" - the arch- or hardware-dependent low
991da177e4SLinus Torvalds  * level driver of CPUFreq support, and its spinlock. This lock
1001da177e4SLinus Torvalds  * also protects the cpufreq_cpu_data array.
1011da177e4SLinus Torvalds  */
1021c3d85ddSRafael J. Wysocki static struct cpufreq_driver *cpufreq_driver;
1037a6aedfaSMike Travis static DEFINE_PER_CPU(struct cpufreq_policy *, cpufreq_cpu_data);
104bb176f7dSViresh Kumar static DEFINE_RWLOCK(cpufreq_driver_lock);
1056f1e4efdSJane Li DEFINE_MUTEX(cpufreq_governor_lock);
106bb176f7dSViresh Kumar 
1072f0aea93SViresh Kumar /* Flag to suspend/resume CPUFreq governors */
1082f0aea93SViresh Kumar static bool cpufreq_suspended;
1091da177e4SLinus Torvalds 
1109c0ebcf7SViresh Kumar static inline bool has_target(void)
1119c0ebcf7SViresh Kumar {
1129c0ebcf7SViresh Kumar 	return cpufreq_driver->target_index || cpufreq_driver->target;
1139c0ebcf7SViresh Kumar }
1149c0ebcf7SViresh Kumar 
1155a01f2e8SVenkatesh Pallipadi /*
1166eed9404SViresh Kumar  * rwsem to guarantee that cpufreq driver module doesn't unload during critical
1176eed9404SViresh Kumar  * sections
1186eed9404SViresh Kumar  */
1196eed9404SViresh Kumar static DECLARE_RWSEM(cpufreq_rwsem);
1206eed9404SViresh Kumar 
1211da177e4SLinus Torvalds /* internal prototypes */
12229464f28SDave Jones static int __cpufreq_governor(struct cpufreq_policy *policy,
12329464f28SDave Jones 		unsigned int event);
124d92d50a4SViresh Kumar static unsigned int __cpufreq_get(struct cpufreq_policy *policy);
12565f27f38SDavid Howells static void handle_update(struct work_struct *work);
1261da177e4SLinus Torvalds 
1271da177e4SLinus Torvalds /**
1281da177e4SLinus Torvalds  * Two notifier lists: the "policy" list is involved in the
1291da177e4SLinus Torvalds  * validation process for a new CPU frequency policy; the
1301da177e4SLinus Torvalds  * "transition" list for kernel code that needs to handle
1311da177e4SLinus Torvalds  * changes to devices when the CPU clock speed changes.
1321da177e4SLinus Torvalds  * The mutex locks both lists.
1331da177e4SLinus Torvalds  */
134e041c683SAlan Stern static BLOCKING_NOTIFIER_HEAD(cpufreq_policy_notifier_list);
135b4dfdbb3SAlan Stern static struct srcu_notifier_head cpufreq_transition_notifier_list;
1361da177e4SLinus Torvalds 
13774212ca4SCesar Eduardo Barros static bool init_cpufreq_transition_notifier_list_called;
138b4dfdbb3SAlan Stern static int __init init_cpufreq_transition_notifier_list(void)
139b4dfdbb3SAlan Stern {
140b4dfdbb3SAlan Stern 	srcu_init_notifier_head(&cpufreq_transition_notifier_list);
14174212ca4SCesar Eduardo Barros 	init_cpufreq_transition_notifier_list_called = true;
142b4dfdbb3SAlan Stern 	return 0;
143b4dfdbb3SAlan Stern }
144b3438f82SLinus Torvalds pure_initcall(init_cpufreq_transition_notifier_list);
1451da177e4SLinus Torvalds 
146a7b422cdSKonrad Rzeszutek Wilk static int off __read_mostly;
147da584455SViresh Kumar static int cpufreq_disabled(void)
148a7b422cdSKonrad Rzeszutek Wilk {
149a7b422cdSKonrad Rzeszutek Wilk 	return off;
150a7b422cdSKonrad Rzeszutek Wilk }
151a7b422cdSKonrad Rzeszutek Wilk void disable_cpufreq(void)
152a7b422cdSKonrad Rzeszutek Wilk {
153a7b422cdSKonrad Rzeszutek Wilk 	off = 1;
154a7b422cdSKonrad Rzeszutek Wilk }
1553fc54d37Sakpm@osdl.org static DEFINE_MUTEX(cpufreq_governor_mutex);
1561da177e4SLinus Torvalds 
1574d5dcc42SViresh Kumar bool have_governor_per_policy(void)
1584d5dcc42SViresh Kumar {
1590b981e70SViresh Kumar 	return !!(cpufreq_driver->flags & CPUFREQ_HAVE_GOVERNOR_PER_POLICY);
1604d5dcc42SViresh Kumar }
1613f869d6dSViresh Kumar EXPORT_SYMBOL_GPL(have_governor_per_policy);
1624d5dcc42SViresh Kumar 
163944e9a03SViresh Kumar struct kobject *get_governor_parent_kobj(struct cpufreq_policy *policy)
164944e9a03SViresh Kumar {
165944e9a03SViresh Kumar 	if (have_governor_per_policy())
166944e9a03SViresh Kumar 		return &policy->kobj;
167944e9a03SViresh Kumar 	else
168944e9a03SViresh Kumar 		return cpufreq_global_kobject;
169944e9a03SViresh Kumar }
170944e9a03SViresh Kumar EXPORT_SYMBOL_GPL(get_governor_parent_kobj);
171944e9a03SViresh Kumar 
1725a31d594SViresh Kumar struct cpufreq_frequency_table *cpufreq_frequency_get_table(unsigned int cpu)
1735a31d594SViresh Kumar {
1745a31d594SViresh Kumar 	struct cpufreq_policy *policy = per_cpu(cpufreq_cpu_data, cpu);
1755a31d594SViresh Kumar 
1765a31d594SViresh Kumar 	return policy && !policy_is_inactive(policy) ?
1775a31d594SViresh Kumar 		policy->freq_table : NULL;
1785a31d594SViresh Kumar }
1795a31d594SViresh Kumar EXPORT_SYMBOL_GPL(cpufreq_frequency_get_table);
1805a31d594SViresh Kumar 
18172a4ce34SViresh Kumar static inline u64 get_cpu_idle_time_jiffy(unsigned int cpu, u64 *wall)
18272a4ce34SViresh Kumar {
18372a4ce34SViresh Kumar 	u64 idle_time;
18472a4ce34SViresh Kumar 	u64 cur_wall_time;
18572a4ce34SViresh Kumar 	u64 busy_time;
18672a4ce34SViresh Kumar 
18772a4ce34SViresh Kumar 	cur_wall_time = jiffies64_to_cputime64(get_jiffies_64());
18872a4ce34SViresh Kumar 
18972a4ce34SViresh Kumar 	busy_time = kcpustat_cpu(cpu).cpustat[CPUTIME_USER];
19072a4ce34SViresh Kumar 	busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_SYSTEM];
19172a4ce34SViresh Kumar 	busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_IRQ];
19272a4ce34SViresh Kumar 	busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_SOFTIRQ];
19372a4ce34SViresh Kumar 	busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_STEAL];
19472a4ce34SViresh Kumar 	busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_NICE];
19572a4ce34SViresh Kumar 
19672a4ce34SViresh Kumar 	idle_time = cur_wall_time - busy_time;
19772a4ce34SViresh Kumar 	if (wall)
19872a4ce34SViresh Kumar 		*wall = cputime_to_usecs(cur_wall_time);
19972a4ce34SViresh Kumar 
20072a4ce34SViresh Kumar 	return cputime_to_usecs(idle_time);
20172a4ce34SViresh Kumar }
20272a4ce34SViresh Kumar 
20372a4ce34SViresh Kumar u64 get_cpu_idle_time(unsigned int cpu, u64 *wall, int io_busy)
20472a4ce34SViresh Kumar {
20572a4ce34SViresh Kumar 	u64 idle_time = get_cpu_idle_time_us(cpu, io_busy ? wall : NULL);
20672a4ce34SViresh Kumar 
20772a4ce34SViresh Kumar 	if (idle_time == -1ULL)
20872a4ce34SViresh Kumar 		return get_cpu_idle_time_jiffy(cpu, wall);
20972a4ce34SViresh Kumar 	else if (!io_busy)
21072a4ce34SViresh Kumar 		idle_time += get_cpu_iowait_time_us(cpu, wall);
21172a4ce34SViresh Kumar 
21272a4ce34SViresh Kumar 	return idle_time;
21372a4ce34SViresh Kumar }
21472a4ce34SViresh Kumar EXPORT_SYMBOL_GPL(get_cpu_idle_time);
21572a4ce34SViresh Kumar 
21670e9e778SViresh Kumar /*
21770e9e778SViresh Kumar  * This is a generic cpufreq init() routine which can be used by cpufreq
21870e9e778SViresh Kumar  * drivers of SMP systems. It will do following:
21970e9e778SViresh Kumar  * - validate & show freq table passed
22070e9e778SViresh Kumar  * - set policies transition latency
22170e9e778SViresh Kumar  * - policy->cpus with all possible CPUs
22270e9e778SViresh Kumar  */
22370e9e778SViresh Kumar int cpufreq_generic_init(struct cpufreq_policy *policy,
22470e9e778SViresh Kumar 		struct cpufreq_frequency_table *table,
22570e9e778SViresh Kumar 		unsigned int transition_latency)
22670e9e778SViresh Kumar {
22770e9e778SViresh Kumar 	int ret;
22870e9e778SViresh Kumar 
22970e9e778SViresh Kumar 	ret = cpufreq_table_validate_and_show(policy, table);
23070e9e778SViresh Kumar 	if (ret) {
23170e9e778SViresh Kumar 		pr_err("%s: invalid frequency table: %d\n", __func__, ret);
23270e9e778SViresh Kumar 		return ret;
23370e9e778SViresh Kumar 	}
23470e9e778SViresh Kumar 
23570e9e778SViresh Kumar 	policy->cpuinfo.transition_latency = transition_latency;
23670e9e778SViresh Kumar 
23770e9e778SViresh Kumar 	/*
23858405af6SShailendra Verma 	 * The driver only supports the SMP configuration where all processors
23970e9e778SViresh Kumar 	 * share the clock and voltage and clock.
24070e9e778SViresh Kumar 	 */
24170e9e778SViresh Kumar 	cpumask_setall(policy->cpus);
24270e9e778SViresh Kumar 
24370e9e778SViresh Kumar 	return 0;
24470e9e778SViresh Kumar }
24570e9e778SViresh Kumar EXPORT_SYMBOL_GPL(cpufreq_generic_init);
24670e9e778SViresh Kumar 
247988bed09SViresh Kumar /* Only for cpufreq core internal use */
248988bed09SViresh Kumar struct cpufreq_policy *cpufreq_cpu_get_raw(unsigned int cpu)
249652ed95dSViresh Kumar {
250652ed95dSViresh Kumar 	struct cpufreq_policy *policy = per_cpu(cpufreq_cpu_data, cpu);
251652ed95dSViresh Kumar 
252988bed09SViresh Kumar 	return policy && cpumask_test_cpu(cpu, policy->cpus) ? policy : NULL;
253988bed09SViresh Kumar }
254988bed09SViresh Kumar 
255988bed09SViresh Kumar unsigned int cpufreq_generic_get(unsigned int cpu)
256988bed09SViresh Kumar {
257988bed09SViresh Kumar 	struct cpufreq_policy *policy = cpufreq_cpu_get_raw(cpu);
258988bed09SViresh Kumar 
259652ed95dSViresh Kumar 	if (!policy || IS_ERR(policy->clk)) {
260e837f9b5SJoe Perches 		pr_err("%s: No %s associated to cpu: %d\n",
261e837f9b5SJoe Perches 		       __func__, policy ? "clk" : "policy", cpu);
262652ed95dSViresh Kumar 		return 0;
263652ed95dSViresh Kumar 	}
264652ed95dSViresh Kumar 
265652ed95dSViresh Kumar 	return clk_get_rate(policy->clk) / 1000;
266652ed95dSViresh Kumar }
267652ed95dSViresh Kumar EXPORT_SYMBOL_GPL(cpufreq_generic_get);
268652ed95dSViresh Kumar 
26950e9c852SViresh Kumar /**
27050e9c852SViresh Kumar  * cpufreq_cpu_get: returns policy for a cpu and marks it busy.
27150e9c852SViresh Kumar  *
27250e9c852SViresh Kumar  * @cpu: cpu to find policy for.
27350e9c852SViresh Kumar  *
27450e9c852SViresh Kumar  * This returns policy for 'cpu', returns NULL if it doesn't exist.
27550e9c852SViresh Kumar  * It also increments the kobject reference count to mark it busy and so would
27650e9c852SViresh Kumar  * require a corresponding call to cpufreq_cpu_put() to decrement it back.
27750e9c852SViresh Kumar  * If corresponding call cpufreq_cpu_put() isn't made, the policy wouldn't be
27850e9c852SViresh Kumar  * freed as that depends on the kobj count.
27950e9c852SViresh Kumar  *
28050e9c852SViresh Kumar  * It also takes a read-lock of 'cpufreq_rwsem' and doesn't put it back if a
28150e9c852SViresh Kumar  * valid policy is found. This is done to make sure the driver doesn't get
28250e9c852SViresh Kumar  * unregistered while the policy is being used.
28350e9c852SViresh Kumar  *
28450e9c852SViresh Kumar  * Return: A valid policy on success, otherwise NULL on failure.
28550e9c852SViresh Kumar  */
2866eed9404SViresh Kumar struct cpufreq_policy *cpufreq_cpu_get(unsigned int cpu)
2871da177e4SLinus Torvalds {
2886eed9404SViresh Kumar 	struct cpufreq_policy *policy = NULL;
2891da177e4SLinus Torvalds 	unsigned long flags;
2901da177e4SLinus Torvalds 
2911b947c90SViresh Kumar 	if (WARN_ON(cpu >= nr_cpu_ids))
2926eed9404SViresh Kumar 		return NULL;
2936eed9404SViresh Kumar 
2946eed9404SViresh Kumar 	if (!down_read_trylock(&cpufreq_rwsem))
2956eed9404SViresh Kumar 		return NULL;
2961da177e4SLinus Torvalds 
2971da177e4SLinus Torvalds 	/* get the cpufreq driver */
2980d1857a1SNathan Zimmer 	read_lock_irqsave(&cpufreq_driver_lock, flags);
2991da177e4SLinus Torvalds 
3006eed9404SViresh Kumar 	if (cpufreq_driver) {
3011da177e4SLinus Torvalds 		/* get the CPU */
302988bed09SViresh Kumar 		policy = cpufreq_cpu_get_raw(cpu);
3036eed9404SViresh Kumar 		if (policy)
3046eed9404SViresh Kumar 			kobject_get(&policy->kobj);
3056eed9404SViresh Kumar 	}
3066eed9404SViresh Kumar 
3076eed9404SViresh Kumar 	read_unlock_irqrestore(&cpufreq_driver_lock, flags);
3081da177e4SLinus Torvalds 
3093a3e9e06SViresh Kumar 	if (!policy)
3106eed9404SViresh Kumar 		up_read(&cpufreq_rwsem);
3111da177e4SLinus Torvalds 
3123a3e9e06SViresh Kumar 	return policy;
313a9144436SStephen Boyd }
3141da177e4SLinus Torvalds EXPORT_SYMBOL_GPL(cpufreq_cpu_get);
3151da177e4SLinus Torvalds 
31650e9c852SViresh Kumar /**
31750e9c852SViresh Kumar  * cpufreq_cpu_put: Decrements the usage count of a policy
31850e9c852SViresh Kumar  *
31950e9c852SViresh Kumar  * @policy: policy earlier returned by cpufreq_cpu_get().
32050e9c852SViresh Kumar  *
32150e9c852SViresh Kumar  * This decrements the kobject reference count incremented earlier by calling
32250e9c852SViresh Kumar  * cpufreq_cpu_get().
32350e9c852SViresh Kumar  *
32450e9c852SViresh Kumar  * It also drops the read-lock of 'cpufreq_rwsem' taken at cpufreq_cpu_get().
32550e9c852SViresh Kumar  */
3263a3e9e06SViresh Kumar void cpufreq_cpu_put(struct cpufreq_policy *policy)
327a9144436SStephen Boyd {
3286eed9404SViresh Kumar 	kobject_put(&policy->kobj);
3296eed9404SViresh Kumar 	up_read(&cpufreq_rwsem);
330a9144436SStephen Boyd }
3311da177e4SLinus Torvalds EXPORT_SYMBOL_GPL(cpufreq_cpu_put);
3321da177e4SLinus Torvalds 
3331da177e4SLinus Torvalds /*********************************************************************
3341da177e4SLinus Torvalds  *            EXTERNALLY AFFECTING FREQUENCY CHANGES                 *
3351da177e4SLinus Torvalds  *********************************************************************/
3361da177e4SLinus Torvalds 
3371da177e4SLinus Torvalds /**
3381da177e4SLinus Torvalds  * adjust_jiffies - adjust the system "loops_per_jiffy"
3391da177e4SLinus Torvalds  *
3401da177e4SLinus Torvalds  * This function alters the system "loops_per_jiffy" for the clock
3411da177e4SLinus Torvalds  * speed change. Note that loops_per_jiffy cannot be updated on SMP
3421da177e4SLinus Torvalds  * systems as each CPU might be scaled differently. So, use the arch
3431da177e4SLinus Torvalds  * per-CPU loops_per_jiffy value wherever possible.
3441da177e4SLinus Torvalds  */
34539c132eeSViresh Kumar static void adjust_jiffies(unsigned long val, struct cpufreq_freqs *ci)
34639c132eeSViresh Kumar {
3471da177e4SLinus Torvalds #ifndef CONFIG_SMP
3481da177e4SLinus Torvalds 	static unsigned long l_p_j_ref;
3491da177e4SLinus Torvalds 	static unsigned int l_p_j_ref_freq;
3501da177e4SLinus Torvalds 
3511da177e4SLinus Torvalds 	if (ci->flags & CPUFREQ_CONST_LOOPS)
3521da177e4SLinus Torvalds 		return;
3531da177e4SLinus Torvalds 
3541da177e4SLinus Torvalds 	if (!l_p_j_ref_freq) {
3551da177e4SLinus Torvalds 		l_p_j_ref = loops_per_jiffy;
3561da177e4SLinus Torvalds 		l_p_j_ref_freq = ci->old;
357e837f9b5SJoe Perches 		pr_debug("saving %lu as reference value for loops_per_jiffy; freq is %u kHz\n",
358e837f9b5SJoe Perches 			 l_p_j_ref, l_p_j_ref_freq);
3591da177e4SLinus Torvalds 	}
3600b443eadSViresh Kumar 	if (val == CPUFREQ_POSTCHANGE && ci->old != ci->new) {
361e08f5f5bSGautham R Shenoy 		loops_per_jiffy = cpufreq_scale(l_p_j_ref, l_p_j_ref_freq,
362e08f5f5bSGautham R Shenoy 								ci->new);
363e837f9b5SJoe Perches 		pr_debug("scaling loops_per_jiffy to %lu for frequency %u kHz\n",
364e837f9b5SJoe Perches 			 loops_per_jiffy, ci->new);
3651da177e4SLinus Torvalds 	}
3661da177e4SLinus Torvalds #endif
36739c132eeSViresh Kumar }
3681da177e4SLinus Torvalds 
3690956df9cSViresh Kumar static void __cpufreq_notify_transition(struct cpufreq_policy *policy,
370b43a7ffbSViresh Kumar 		struct cpufreq_freqs *freqs, unsigned int state)
3711da177e4SLinus Torvalds {
3721da177e4SLinus Torvalds 	BUG_ON(irqs_disabled());
3731da177e4SLinus Torvalds 
374d5aaffa9SDirk Brandewie 	if (cpufreq_disabled())
375d5aaffa9SDirk Brandewie 		return;
376d5aaffa9SDirk Brandewie 
3771c3d85ddSRafael J. Wysocki 	freqs->flags = cpufreq_driver->flags;
3782d06d8c4SDominik Brodowski 	pr_debug("notification %u of frequency transition to %u kHz\n",
379e4472cb3SDave Jones 		 state, freqs->new);
3801da177e4SLinus Torvalds 
3811da177e4SLinus Torvalds 	switch (state) {
382e4472cb3SDave Jones 
3831da177e4SLinus Torvalds 	case CPUFREQ_PRECHANGE:
384e4472cb3SDave Jones 		/* detect if the driver reported a value as "old frequency"
385e4472cb3SDave Jones 		 * which is not equal to what the cpufreq core thinks is
386e4472cb3SDave Jones 		 * "old frequency".
3871da177e4SLinus Torvalds 		 */
3881c3d85ddSRafael J. Wysocki 		if (!(cpufreq_driver->flags & CPUFREQ_CONST_LOOPS)) {
389e4472cb3SDave Jones 			if ((policy) && (policy->cpu == freqs->cpu) &&
390e4472cb3SDave Jones 			    (policy->cur) && (policy->cur != freqs->old)) {
391e837f9b5SJoe Perches 				pr_debug("Warning: CPU frequency is %u, cpufreq assumed %u kHz\n",
392e4472cb3SDave Jones 					 freqs->old, policy->cur);
393e4472cb3SDave Jones 				freqs->old = policy->cur;
3941da177e4SLinus Torvalds 			}
3951da177e4SLinus Torvalds 		}
396b4dfdbb3SAlan Stern 		srcu_notifier_call_chain(&cpufreq_transition_notifier_list,
397e4472cb3SDave Jones 				CPUFREQ_PRECHANGE, freqs);
3981da177e4SLinus Torvalds 		adjust_jiffies(CPUFREQ_PRECHANGE, freqs);
3991da177e4SLinus Torvalds 		break;
400e4472cb3SDave Jones 
4011da177e4SLinus Torvalds 	case CPUFREQ_POSTCHANGE:
4021da177e4SLinus Torvalds 		adjust_jiffies(CPUFREQ_POSTCHANGE, freqs);
403e837f9b5SJoe Perches 		pr_debug("FREQ: %lu - CPU: %lu\n",
404e837f9b5SJoe Perches 			 (unsigned long)freqs->new, (unsigned long)freqs->cpu);
40525e41933SThomas Renninger 		trace_cpu_frequency(freqs->new, freqs->cpu);
406b4dfdbb3SAlan Stern 		srcu_notifier_call_chain(&cpufreq_transition_notifier_list,
407e4472cb3SDave Jones 				CPUFREQ_POSTCHANGE, freqs);
408e4472cb3SDave Jones 		if (likely(policy) && likely(policy->cpu == freqs->cpu))
409e4472cb3SDave Jones 			policy->cur = freqs->new;
4101da177e4SLinus Torvalds 		break;
4111da177e4SLinus Torvalds 	}
4121da177e4SLinus Torvalds }
413bb176f7dSViresh Kumar 
414b43a7ffbSViresh Kumar /**
415b43a7ffbSViresh Kumar  * cpufreq_notify_transition - call notifier chain and adjust_jiffies
416b43a7ffbSViresh Kumar  * on frequency transition.
417b43a7ffbSViresh Kumar  *
418b43a7ffbSViresh Kumar  * This function calls the transition notifiers and the "adjust_jiffies"
419b43a7ffbSViresh Kumar  * function. It is called twice on all CPU frequency changes that have
420b43a7ffbSViresh Kumar  * external effects.
421b43a7ffbSViresh Kumar  */
422236a9800SViresh Kumar static void cpufreq_notify_transition(struct cpufreq_policy *policy,
423b43a7ffbSViresh Kumar 		struct cpufreq_freqs *freqs, unsigned int state)
424b43a7ffbSViresh Kumar {
425b43a7ffbSViresh Kumar 	for_each_cpu(freqs->cpu, policy->cpus)
426b43a7ffbSViresh Kumar 		__cpufreq_notify_transition(policy, freqs, state);
427b43a7ffbSViresh Kumar }
4281da177e4SLinus Torvalds 
429f7ba3b41SViresh Kumar /* Do post notifications when there are chances that transition has failed */
430236a9800SViresh Kumar static void cpufreq_notify_post_transition(struct cpufreq_policy *policy,
431f7ba3b41SViresh Kumar 		struct cpufreq_freqs *freqs, int transition_failed)
432f7ba3b41SViresh Kumar {
433f7ba3b41SViresh Kumar 	cpufreq_notify_transition(policy, freqs, CPUFREQ_POSTCHANGE);
434f7ba3b41SViresh Kumar 	if (!transition_failed)
435f7ba3b41SViresh Kumar 		return;
436f7ba3b41SViresh Kumar 
437f7ba3b41SViresh Kumar 	swap(freqs->old, freqs->new);
438f7ba3b41SViresh Kumar 	cpufreq_notify_transition(policy, freqs, CPUFREQ_PRECHANGE);
439f7ba3b41SViresh Kumar 	cpufreq_notify_transition(policy, freqs, CPUFREQ_POSTCHANGE);
440f7ba3b41SViresh Kumar }
441f7ba3b41SViresh Kumar 
44212478cf0SSrivatsa S. Bhat void cpufreq_freq_transition_begin(struct cpufreq_policy *policy,
44312478cf0SSrivatsa S. Bhat 		struct cpufreq_freqs *freqs)
44412478cf0SSrivatsa S. Bhat {
445ca654dc3SSrivatsa S. Bhat 
446ca654dc3SSrivatsa S. Bhat 	/*
447ca654dc3SSrivatsa S. Bhat 	 * Catch double invocations of _begin() which lead to self-deadlock.
448ca654dc3SSrivatsa S. Bhat 	 * ASYNC_NOTIFICATION drivers are left out because the cpufreq core
449ca654dc3SSrivatsa S. Bhat 	 * doesn't invoke _begin() on their behalf, and hence the chances of
450ca654dc3SSrivatsa S. Bhat 	 * double invocations are very low. Moreover, there are scenarios
451ca654dc3SSrivatsa S. Bhat 	 * where these checks can emit false-positive warnings in these
452ca654dc3SSrivatsa S. Bhat 	 * drivers; so we avoid that by skipping them altogether.
453ca654dc3SSrivatsa S. Bhat 	 */
454ca654dc3SSrivatsa S. Bhat 	WARN_ON(!(cpufreq_driver->flags & CPUFREQ_ASYNC_NOTIFICATION)
455ca654dc3SSrivatsa S. Bhat 				&& current == policy->transition_task);
456ca654dc3SSrivatsa S. Bhat 
45712478cf0SSrivatsa S. Bhat wait:
45812478cf0SSrivatsa S. Bhat 	wait_event(policy->transition_wait, !policy->transition_ongoing);
45912478cf0SSrivatsa S. Bhat 
46012478cf0SSrivatsa S. Bhat 	spin_lock(&policy->transition_lock);
46112478cf0SSrivatsa S. Bhat 
46212478cf0SSrivatsa S. Bhat 	if (unlikely(policy->transition_ongoing)) {
46312478cf0SSrivatsa S. Bhat 		spin_unlock(&policy->transition_lock);
46412478cf0SSrivatsa S. Bhat 		goto wait;
46512478cf0SSrivatsa S. Bhat 	}
46612478cf0SSrivatsa S. Bhat 
46712478cf0SSrivatsa S. Bhat 	policy->transition_ongoing = true;
468ca654dc3SSrivatsa S. Bhat 	policy->transition_task = current;
46912478cf0SSrivatsa S. Bhat 
47012478cf0SSrivatsa S. Bhat 	spin_unlock(&policy->transition_lock);
47112478cf0SSrivatsa S. Bhat 
47212478cf0SSrivatsa S. Bhat 	cpufreq_notify_transition(policy, freqs, CPUFREQ_PRECHANGE);
47312478cf0SSrivatsa S. Bhat }
47412478cf0SSrivatsa S. Bhat EXPORT_SYMBOL_GPL(cpufreq_freq_transition_begin);
47512478cf0SSrivatsa S. Bhat 
47612478cf0SSrivatsa S. Bhat void cpufreq_freq_transition_end(struct cpufreq_policy *policy,
47712478cf0SSrivatsa S. Bhat 		struct cpufreq_freqs *freqs, int transition_failed)
47812478cf0SSrivatsa S. Bhat {
47912478cf0SSrivatsa S. Bhat 	if (unlikely(WARN_ON(!policy->transition_ongoing)))
48012478cf0SSrivatsa S. Bhat 		return;
48112478cf0SSrivatsa S. Bhat 
48212478cf0SSrivatsa S. Bhat 	cpufreq_notify_post_transition(policy, freqs, transition_failed);
48312478cf0SSrivatsa S. Bhat 
48412478cf0SSrivatsa S. Bhat 	policy->transition_ongoing = false;
485ca654dc3SSrivatsa S. Bhat 	policy->transition_task = NULL;
48612478cf0SSrivatsa S. Bhat 
48712478cf0SSrivatsa S. Bhat 	wake_up(&policy->transition_wait);
48812478cf0SSrivatsa S. Bhat }
48912478cf0SSrivatsa S. Bhat EXPORT_SYMBOL_GPL(cpufreq_freq_transition_end);
49012478cf0SSrivatsa S. Bhat 
4911da177e4SLinus Torvalds 
4921da177e4SLinus Torvalds /*********************************************************************
4931da177e4SLinus Torvalds  *                          SYSFS INTERFACE                          *
4941da177e4SLinus Torvalds  *********************************************************************/
4958a5c74a1SRashika Kheria static ssize_t show_boost(struct kobject *kobj,
4966f19efc0SLukasz Majewski 				 struct attribute *attr, char *buf)
4976f19efc0SLukasz Majewski {
4986f19efc0SLukasz Majewski 	return sprintf(buf, "%d\n", cpufreq_driver->boost_enabled);
4996f19efc0SLukasz Majewski }
5006f19efc0SLukasz Majewski 
5016f19efc0SLukasz Majewski static ssize_t store_boost(struct kobject *kobj, struct attribute *attr,
5026f19efc0SLukasz Majewski 				  const char *buf, size_t count)
5036f19efc0SLukasz Majewski {
5046f19efc0SLukasz Majewski 	int ret, enable;
5056f19efc0SLukasz Majewski 
5066f19efc0SLukasz Majewski 	ret = sscanf(buf, "%d", &enable);
5076f19efc0SLukasz Majewski 	if (ret != 1 || enable < 0 || enable > 1)
5086f19efc0SLukasz Majewski 		return -EINVAL;
5096f19efc0SLukasz Majewski 
5106f19efc0SLukasz Majewski 	if (cpufreq_boost_trigger_state(enable)) {
511e837f9b5SJoe Perches 		pr_err("%s: Cannot %s BOOST!\n",
512e837f9b5SJoe Perches 		       __func__, enable ? "enable" : "disable");
5136f19efc0SLukasz Majewski 		return -EINVAL;
5146f19efc0SLukasz Majewski 	}
5156f19efc0SLukasz Majewski 
516e837f9b5SJoe Perches 	pr_debug("%s: cpufreq BOOST %s\n",
517e837f9b5SJoe Perches 		 __func__, enable ? "enabled" : "disabled");
5186f19efc0SLukasz Majewski 
5196f19efc0SLukasz Majewski 	return count;
5206f19efc0SLukasz Majewski }
5216f19efc0SLukasz Majewski define_one_global_rw(boost);
5221da177e4SLinus Torvalds 
52342f91fa1SViresh Kumar static struct cpufreq_governor *find_governor(const char *str_governor)
5243bcb09a3SJeremy Fitzhardinge {
5253bcb09a3SJeremy Fitzhardinge 	struct cpufreq_governor *t;
5263bcb09a3SJeremy Fitzhardinge 
527f7b27061SViresh Kumar 	for_each_governor(t)
5287c4f4539SRasmus Villemoes 		if (!strncasecmp(str_governor, t->name, CPUFREQ_NAME_LEN))
5293bcb09a3SJeremy Fitzhardinge 			return t;
5303bcb09a3SJeremy Fitzhardinge 
5313bcb09a3SJeremy Fitzhardinge 	return NULL;
5323bcb09a3SJeremy Fitzhardinge }
5333bcb09a3SJeremy Fitzhardinge 
5341da177e4SLinus Torvalds /**
5351da177e4SLinus Torvalds  * cpufreq_parse_governor - parse a governor string
5361da177e4SLinus Torvalds  */
5371da177e4SLinus Torvalds static int cpufreq_parse_governor(char *str_governor, unsigned int *policy,
5381da177e4SLinus Torvalds 				struct cpufreq_governor **governor)
5391da177e4SLinus Torvalds {
5403bcb09a3SJeremy Fitzhardinge 	int err = -EINVAL;
5413bcb09a3SJeremy Fitzhardinge 
5421c3d85ddSRafael J. Wysocki 	if (!cpufreq_driver)
5433bcb09a3SJeremy Fitzhardinge 		goto out;
5443bcb09a3SJeremy Fitzhardinge 
5451c3d85ddSRafael J. Wysocki 	if (cpufreq_driver->setpolicy) {
5467c4f4539SRasmus Villemoes 		if (!strncasecmp(str_governor, "performance", CPUFREQ_NAME_LEN)) {
5471da177e4SLinus Torvalds 			*policy = CPUFREQ_POLICY_PERFORMANCE;
5483bcb09a3SJeremy Fitzhardinge 			err = 0;
5497c4f4539SRasmus Villemoes 		} else if (!strncasecmp(str_governor, "powersave",
550e08f5f5bSGautham R Shenoy 						CPUFREQ_NAME_LEN)) {
5511da177e4SLinus Torvalds 			*policy = CPUFREQ_POLICY_POWERSAVE;
5523bcb09a3SJeremy Fitzhardinge 			err = 0;
5531da177e4SLinus Torvalds 		}
5542e1cc3a5SViresh Kumar 	} else {
5551da177e4SLinus Torvalds 		struct cpufreq_governor *t;
5563bcb09a3SJeremy Fitzhardinge 
5573fc54d37Sakpm@osdl.org 		mutex_lock(&cpufreq_governor_mutex);
5583bcb09a3SJeremy Fitzhardinge 
55942f91fa1SViresh Kumar 		t = find_governor(str_governor);
5603bcb09a3SJeremy Fitzhardinge 
561ea714970SJeremy Fitzhardinge 		if (t == NULL) {
562ea714970SJeremy Fitzhardinge 			int ret;
563ea714970SJeremy Fitzhardinge 
564ea714970SJeremy Fitzhardinge 			mutex_unlock(&cpufreq_governor_mutex);
5651a8e1463SKees Cook 			ret = request_module("cpufreq_%s", str_governor);
566ea714970SJeremy Fitzhardinge 			mutex_lock(&cpufreq_governor_mutex);
567ea714970SJeremy Fitzhardinge 
568ea714970SJeremy Fitzhardinge 			if (ret == 0)
56942f91fa1SViresh Kumar 				t = find_governor(str_governor);
570ea714970SJeremy Fitzhardinge 		}
571ea714970SJeremy Fitzhardinge 
5723bcb09a3SJeremy Fitzhardinge 		if (t != NULL) {
5731da177e4SLinus Torvalds 			*governor = t;
5743bcb09a3SJeremy Fitzhardinge 			err = 0;
5751da177e4SLinus Torvalds 		}
5763bcb09a3SJeremy Fitzhardinge 
5773bcb09a3SJeremy Fitzhardinge 		mutex_unlock(&cpufreq_governor_mutex);
5781da177e4SLinus Torvalds 	}
5791da177e4SLinus Torvalds out:
5803bcb09a3SJeremy Fitzhardinge 	return err;
5811da177e4SLinus Torvalds }
5821da177e4SLinus Torvalds 
5831da177e4SLinus Torvalds /**
584e08f5f5bSGautham R Shenoy  * cpufreq_per_cpu_attr_read() / show_##file_name() -
585e08f5f5bSGautham R Shenoy  * print out cpufreq information
5861da177e4SLinus Torvalds  *
5871da177e4SLinus Torvalds  * Write out information from cpufreq_driver->policy[cpu]; object must be
5881da177e4SLinus Torvalds  * "unsigned int".
5891da177e4SLinus Torvalds  */
5901da177e4SLinus Torvalds 
5911da177e4SLinus Torvalds #define show_one(file_name, object)			\
5921da177e4SLinus Torvalds static ssize_t show_##file_name				\
5931da177e4SLinus Torvalds (struct cpufreq_policy *policy, char *buf)		\
5941da177e4SLinus Torvalds {							\
5951da177e4SLinus Torvalds 	return sprintf(buf, "%u\n", policy->object);	\
5961da177e4SLinus Torvalds }
5971da177e4SLinus Torvalds 
5981da177e4SLinus Torvalds show_one(cpuinfo_min_freq, cpuinfo.min_freq);
5991da177e4SLinus Torvalds show_one(cpuinfo_max_freq, cpuinfo.max_freq);
600ed129784SThomas Renninger show_one(cpuinfo_transition_latency, cpuinfo.transition_latency);
6011da177e4SLinus Torvalds show_one(scaling_min_freq, min);
6021da177e4SLinus Torvalds show_one(scaling_max_freq, max);
603c034b02eSDirk Brandewie 
60409347b29SViresh Kumar static ssize_t show_scaling_cur_freq(struct cpufreq_policy *policy, char *buf)
605c034b02eSDirk Brandewie {
606c034b02eSDirk Brandewie 	ssize_t ret;
607c034b02eSDirk Brandewie 
608c034b02eSDirk Brandewie 	if (cpufreq_driver && cpufreq_driver->setpolicy && cpufreq_driver->get)
609c034b02eSDirk Brandewie 		ret = sprintf(buf, "%u\n", cpufreq_driver->get(policy->cpu));
610c034b02eSDirk Brandewie 	else
611c034b02eSDirk Brandewie 		ret = sprintf(buf, "%u\n", policy->cur);
612c034b02eSDirk Brandewie 	return ret;
613c034b02eSDirk Brandewie }
6141da177e4SLinus Torvalds 
615037ce839SViresh Kumar static int cpufreq_set_policy(struct cpufreq_policy *policy,
6163a3e9e06SViresh Kumar 				struct cpufreq_policy *new_policy);
6177970e08bSThomas Renninger 
6181da177e4SLinus Torvalds /**
6191da177e4SLinus Torvalds  * cpufreq_per_cpu_attr_write() / store_##file_name() - sysfs write access
6201da177e4SLinus Torvalds  */
6211da177e4SLinus Torvalds #define store_one(file_name, object)			\
6221da177e4SLinus Torvalds static ssize_t store_##file_name					\
6231da177e4SLinus Torvalds (struct cpufreq_policy *policy, const char *buf, size_t count)		\
6241da177e4SLinus Torvalds {									\
625619c144cSVince Hsu 	int ret, temp;							\
6261da177e4SLinus Torvalds 	struct cpufreq_policy new_policy;				\
6271da177e4SLinus Torvalds 									\
6281da177e4SLinus Torvalds 	ret = cpufreq_get_policy(&new_policy, policy->cpu);		\
6291da177e4SLinus Torvalds 	if (ret)							\
6301da177e4SLinus Torvalds 		return -EINVAL;						\
6311da177e4SLinus Torvalds 									\
6321da177e4SLinus Torvalds 	ret = sscanf(buf, "%u", &new_policy.object);			\
6331da177e4SLinus Torvalds 	if (ret != 1)							\
6341da177e4SLinus Torvalds 		return -EINVAL;						\
6351da177e4SLinus Torvalds 									\
636619c144cSVince Hsu 	temp = new_policy.object;					\
637037ce839SViresh Kumar 	ret = cpufreq_set_policy(policy, &new_policy);		\
638619c144cSVince Hsu 	if (!ret)							\
639619c144cSVince Hsu 		policy->user_policy.object = temp;			\
6401da177e4SLinus Torvalds 									\
6411da177e4SLinus Torvalds 	return ret ? ret : count;					\
6421da177e4SLinus Torvalds }
6431da177e4SLinus Torvalds 
6441da177e4SLinus Torvalds store_one(scaling_min_freq, min);
6451da177e4SLinus Torvalds store_one(scaling_max_freq, max);
6461da177e4SLinus Torvalds 
6471da177e4SLinus Torvalds /**
6481da177e4SLinus Torvalds  * show_cpuinfo_cur_freq - current CPU frequency as detected by hardware
6491da177e4SLinus Torvalds  */
650e08f5f5bSGautham R Shenoy static ssize_t show_cpuinfo_cur_freq(struct cpufreq_policy *policy,
651e08f5f5bSGautham R Shenoy 					char *buf)
6521da177e4SLinus Torvalds {
653d92d50a4SViresh Kumar 	unsigned int cur_freq = __cpufreq_get(policy);
6541da177e4SLinus Torvalds 	if (!cur_freq)
6551da177e4SLinus Torvalds 		return sprintf(buf, "<unknown>");
6561da177e4SLinus Torvalds 	return sprintf(buf, "%u\n", cur_freq);
6571da177e4SLinus Torvalds }
6581da177e4SLinus Torvalds 
6591da177e4SLinus Torvalds /**
6601da177e4SLinus Torvalds  * show_scaling_governor - show the current policy for the specified CPU
6611da177e4SLinus Torvalds  */
662905d77cdSDave Jones static ssize_t show_scaling_governor(struct cpufreq_policy *policy, char *buf)
6631da177e4SLinus Torvalds {
6641da177e4SLinus Torvalds 	if (policy->policy == CPUFREQ_POLICY_POWERSAVE)
6651da177e4SLinus Torvalds 		return sprintf(buf, "powersave\n");
6661da177e4SLinus Torvalds 	else if (policy->policy == CPUFREQ_POLICY_PERFORMANCE)
6671da177e4SLinus Torvalds 		return sprintf(buf, "performance\n");
6681da177e4SLinus Torvalds 	else if (policy->governor)
6694b972f0bSviresh kumar 		return scnprintf(buf, CPUFREQ_NAME_PLEN, "%s\n",
67029464f28SDave Jones 				policy->governor->name);
6711da177e4SLinus Torvalds 	return -EINVAL;
6721da177e4SLinus Torvalds }
6731da177e4SLinus Torvalds 
6741da177e4SLinus Torvalds /**
6751da177e4SLinus Torvalds  * store_scaling_governor - store policy for the specified CPU
6761da177e4SLinus Torvalds  */
6771da177e4SLinus Torvalds static ssize_t store_scaling_governor(struct cpufreq_policy *policy,
6781da177e4SLinus Torvalds 					const char *buf, size_t count)
6791da177e4SLinus Torvalds {
6805136fa56SSrivatsa S. Bhat 	int ret;
6811da177e4SLinus Torvalds 	char	str_governor[16];
6821da177e4SLinus Torvalds 	struct cpufreq_policy new_policy;
6831da177e4SLinus Torvalds 
6841da177e4SLinus Torvalds 	ret = cpufreq_get_policy(&new_policy, policy->cpu);
6851da177e4SLinus Torvalds 	if (ret)
6861da177e4SLinus Torvalds 		return ret;
6871da177e4SLinus Torvalds 
6881da177e4SLinus Torvalds 	ret = sscanf(buf, "%15s", str_governor);
6891da177e4SLinus Torvalds 	if (ret != 1)
6901da177e4SLinus Torvalds 		return -EINVAL;
6911da177e4SLinus Torvalds 
692e08f5f5bSGautham R Shenoy 	if (cpufreq_parse_governor(str_governor, &new_policy.policy,
693e08f5f5bSGautham R Shenoy 						&new_policy.governor))
6941da177e4SLinus Torvalds 		return -EINVAL;
6951da177e4SLinus Torvalds 
696037ce839SViresh Kumar 	ret = cpufreq_set_policy(policy, &new_policy);
6977970e08bSThomas Renninger 
6987970e08bSThomas Renninger 	policy->user_policy.policy = policy->policy;
6997970e08bSThomas Renninger 	policy->user_policy.governor = policy->governor;
7007970e08bSThomas Renninger 
701e08f5f5bSGautham R Shenoy 	if (ret)
702e08f5f5bSGautham R Shenoy 		return ret;
703e08f5f5bSGautham R Shenoy 	else
704e08f5f5bSGautham R Shenoy 		return count;
7051da177e4SLinus Torvalds }
7061da177e4SLinus Torvalds 
7071da177e4SLinus Torvalds /**
7081da177e4SLinus Torvalds  * show_scaling_driver - show the cpufreq driver currently loaded
7091da177e4SLinus Torvalds  */
7101da177e4SLinus Torvalds static ssize_t show_scaling_driver(struct cpufreq_policy *policy, char *buf)
7111da177e4SLinus Torvalds {
7121c3d85ddSRafael J. Wysocki 	return scnprintf(buf, CPUFREQ_NAME_PLEN, "%s\n", cpufreq_driver->name);
7131da177e4SLinus Torvalds }
7141da177e4SLinus Torvalds 
7151da177e4SLinus Torvalds /**
7161da177e4SLinus Torvalds  * show_scaling_available_governors - show the available CPUfreq governors
7171da177e4SLinus Torvalds  */
7181da177e4SLinus Torvalds static ssize_t show_scaling_available_governors(struct cpufreq_policy *policy,
7191da177e4SLinus Torvalds 						char *buf)
7201da177e4SLinus Torvalds {
7211da177e4SLinus Torvalds 	ssize_t i = 0;
7221da177e4SLinus Torvalds 	struct cpufreq_governor *t;
7231da177e4SLinus Torvalds 
7249c0ebcf7SViresh Kumar 	if (!has_target()) {
7251da177e4SLinus Torvalds 		i += sprintf(buf, "performance powersave");
7261da177e4SLinus Torvalds 		goto out;
7271da177e4SLinus Torvalds 	}
7281da177e4SLinus Torvalds 
729f7b27061SViresh Kumar 	for_each_governor(t) {
73029464f28SDave Jones 		if (i >= (ssize_t) ((PAGE_SIZE / sizeof(char))
73129464f28SDave Jones 		    - (CPUFREQ_NAME_LEN + 2)))
7321da177e4SLinus Torvalds 			goto out;
7334b972f0bSviresh kumar 		i += scnprintf(&buf[i], CPUFREQ_NAME_PLEN, "%s ", t->name);
7341da177e4SLinus Torvalds 	}
7351da177e4SLinus Torvalds out:
7361da177e4SLinus Torvalds 	i += sprintf(&buf[i], "\n");
7371da177e4SLinus Torvalds 	return i;
7381da177e4SLinus Torvalds }
739e8628dd0SDarrick J. Wong 
740f4fd3797SLan Tianyu ssize_t cpufreq_show_cpus(const struct cpumask *mask, char *buf)
7411da177e4SLinus Torvalds {
7421da177e4SLinus Torvalds 	ssize_t i = 0;
7431da177e4SLinus Torvalds 	unsigned int cpu;
7441da177e4SLinus Torvalds 
745835481d9SRusty Russell 	for_each_cpu(cpu, mask) {
7461da177e4SLinus Torvalds 		if (i)
7471da177e4SLinus Torvalds 			i += scnprintf(&buf[i], (PAGE_SIZE - i - 2), " ");
7481da177e4SLinus Torvalds 		i += scnprintf(&buf[i], (PAGE_SIZE - i - 2), "%u", cpu);
7491da177e4SLinus Torvalds 		if (i >= (PAGE_SIZE - 5))
7501da177e4SLinus Torvalds 			break;
7511da177e4SLinus Torvalds 	}
7521da177e4SLinus Torvalds 	i += sprintf(&buf[i], "\n");
7531da177e4SLinus Torvalds 	return i;
7541da177e4SLinus Torvalds }
755f4fd3797SLan Tianyu EXPORT_SYMBOL_GPL(cpufreq_show_cpus);
7561da177e4SLinus Torvalds 
757e8628dd0SDarrick J. Wong /**
758e8628dd0SDarrick J. Wong  * show_related_cpus - show the CPUs affected by each transition even if
759e8628dd0SDarrick J. Wong  * hw coordination is in use
760e8628dd0SDarrick J. Wong  */
761e8628dd0SDarrick J. Wong static ssize_t show_related_cpus(struct cpufreq_policy *policy, char *buf)
762e8628dd0SDarrick J. Wong {
763f4fd3797SLan Tianyu 	return cpufreq_show_cpus(policy->related_cpus, buf);
764e8628dd0SDarrick J. Wong }
765e8628dd0SDarrick J. Wong 
766e8628dd0SDarrick J. Wong /**
767e8628dd0SDarrick J. Wong  * show_affected_cpus - show the CPUs affected by each transition
768e8628dd0SDarrick J. Wong  */
769e8628dd0SDarrick J. Wong static ssize_t show_affected_cpus(struct cpufreq_policy *policy, char *buf)
770e8628dd0SDarrick J. Wong {
771f4fd3797SLan Tianyu 	return cpufreq_show_cpus(policy->cpus, buf);
772e8628dd0SDarrick J. Wong }
773e8628dd0SDarrick J. Wong 
7749e76988eSVenki Pallipadi static ssize_t store_scaling_setspeed(struct cpufreq_policy *policy,
7759e76988eSVenki Pallipadi 					const char *buf, size_t count)
7769e76988eSVenki Pallipadi {
7779e76988eSVenki Pallipadi 	unsigned int freq = 0;
7789e76988eSVenki Pallipadi 	unsigned int ret;
7799e76988eSVenki Pallipadi 
780879000f9SCHIKAMA masaki 	if (!policy->governor || !policy->governor->store_setspeed)
7819e76988eSVenki Pallipadi 		return -EINVAL;
7829e76988eSVenki Pallipadi 
7839e76988eSVenki Pallipadi 	ret = sscanf(buf, "%u", &freq);
7849e76988eSVenki Pallipadi 	if (ret != 1)
7859e76988eSVenki Pallipadi 		return -EINVAL;
7869e76988eSVenki Pallipadi 
7879e76988eSVenki Pallipadi 	policy->governor->store_setspeed(policy, freq);
7889e76988eSVenki Pallipadi 
7899e76988eSVenki Pallipadi 	return count;
7909e76988eSVenki Pallipadi }
7919e76988eSVenki Pallipadi 
7929e76988eSVenki Pallipadi static ssize_t show_scaling_setspeed(struct cpufreq_policy *policy, char *buf)
7939e76988eSVenki Pallipadi {
794879000f9SCHIKAMA masaki 	if (!policy->governor || !policy->governor->show_setspeed)
7959e76988eSVenki Pallipadi 		return sprintf(buf, "<unsupported>\n");
7969e76988eSVenki Pallipadi 
7979e76988eSVenki Pallipadi 	return policy->governor->show_setspeed(policy, buf);
7989e76988eSVenki Pallipadi }
7991da177e4SLinus Torvalds 
800e2f74f35SThomas Renninger /**
8018bf1ac72Sviresh kumar  * show_bios_limit - show the current cpufreq HW/BIOS limitation
802e2f74f35SThomas Renninger  */
803e2f74f35SThomas Renninger static ssize_t show_bios_limit(struct cpufreq_policy *policy, char *buf)
804e2f74f35SThomas Renninger {
805e2f74f35SThomas Renninger 	unsigned int limit;
806e2f74f35SThomas Renninger 	int ret;
8071c3d85ddSRafael J. Wysocki 	if (cpufreq_driver->bios_limit) {
8081c3d85ddSRafael J. Wysocki 		ret = cpufreq_driver->bios_limit(policy->cpu, &limit);
809e2f74f35SThomas Renninger 		if (!ret)
810e2f74f35SThomas Renninger 			return sprintf(buf, "%u\n", limit);
811e2f74f35SThomas Renninger 	}
812e2f74f35SThomas Renninger 	return sprintf(buf, "%u\n", policy->cpuinfo.max_freq);
813e2f74f35SThomas Renninger }
814e2f74f35SThomas Renninger 
8156dad2a29SBorislav Petkov cpufreq_freq_attr_ro_perm(cpuinfo_cur_freq, 0400);
8166dad2a29SBorislav Petkov cpufreq_freq_attr_ro(cpuinfo_min_freq);
8176dad2a29SBorislav Petkov cpufreq_freq_attr_ro(cpuinfo_max_freq);
8186dad2a29SBorislav Petkov cpufreq_freq_attr_ro(cpuinfo_transition_latency);
8196dad2a29SBorislav Petkov cpufreq_freq_attr_ro(scaling_available_governors);
8206dad2a29SBorislav Petkov cpufreq_freq_attr_ro(scaling_driver);
8216dad2a29SBorislav Petkov cpufreq_freq_attr_ro(scaling_cur_freq);
8226dad2a29SBorislav Petkov cpufreq_freq_attr_ro(bios_limit);
8236dad2a29SBorislav Petkov cpufreq_freq_attr_ro(related_cpus);
8246dad2a29SBorislav Petkov cpufreq_freq_attr_ro(affected_cpus);
8256dad2a29SBorislav Petkov cpufreq_freq_attr_rw(scaling_min_freq);
8266dad2a29SBorislav Petkov cpufreq_freq_attr_rw(scaling_max_freq);
8276dad2a29SBorislav Petkov cpufreq_freq_attr_rw(scaling_governor);
8286dad2a29SBorislav Petkov cpufreq_freq_attr_rw(scaling_setspeed);
8291da177e4SLinus Torvalds 
8301da177e4SLinus Torvalds static struct attribute *default_attrs[] = {
8311da177e4SLinus Torvalds 	&cpuinfo_min_freq.attr,
8321da177e4SLinus Torvalds 	&cpuinfo_max_freq.attr,
833ed129784SThomas Renninger 	&cpuinfo_transition_latency.attr,
8341da177e4SLinus Torvalds 	&scaling_min_freq.attr,
8351da177e4SLinus Torvalds 	&scaling_max_freq.attr,
8361da177e4SLinus Torvalds 	&affected_cpus.attr,
837e8628dd0SDarrick J. Wong 	&related_cpus.attr,
8381da177e4SLinus Torvalds 	&scaling_governor.attr,
8391da177e4SLinus Torvalds 	&scaling_driver.attr,
8401da177e4SLinus Torvalds 	&scaling_available_governors.attr,
8419e76988eSVenki Pallipadi 	&scaling_setspeed.attr,
8421da177e4SLinus Torvalds 	NULL
8431da177e4SLinus Torvalds };
8441da177e4SLinus Torvalds 
8451da177e4SLinus Torvalds #define to_policy(k) container_of(k, struct cpufreq_policy, kobj)
8461da177e4SLinus Torvalds #define to_attr(a) container_of(a, struct freq_attr, attr)
8471da177e4SLinus Torvalds 
8481da177e4SLinus Torvalds static ssize_t show(struct kobject *kobj, struct attribute *attr, char *buf)
8491da177e4SLinus Torvalds {
8501da177e4SLinus Torvalds 	struct cpufreq_policy *policy = to_policy(kobj);
8511da177e4SLinus Torvalds 	struct freq_attr *fattr = to_attr(attr);
8521b750e3bSViresh Kumar 	ssize_t ret;
8536eed9404SViresh Kumar 
8546eed9404SViresh Kumar 	if (!down_read_trylock(&cpufreq_rwsem))
8551b750e3bSViresh Kumar 		return -EINVAL;
8565a01f2e8SVenkatesh Pallipadi 
857ad7722daSviresh kumar 	down_read(&policy->rwsem);
8585a01f2e8SVenkatesh Pallipadi 
859e08f5f5bSGautham R Shenoy 	if (fattr->show)
860e08f5f5bSGautham R Shenoy 		ret = fattr->show(policy, buf);
861e08f5f5bSGautham R Shenoy 	else
862e08f5f5bSGautham R Shenoy 		ret = -EIO;
863e08f5f5bSGautham R Shenoy 
864ad7722daSviresh kumar 	up_read(&policy->rwsem);
8656eed9404SViresh Kumar 	up_read(&cpufreq_rwsem);
8661b750e3bSViresh Kumar 
8671da177e4SLinus Torvalds 	return ret;
8681da177e4SLinus Torvalds }
8691da177e4SLinus Torvalds 
8701da177e4SLinus Torvalds static ssize_t store(struct kobject *kobj, struct attribute *attr,
8711da177e4SLinus Torvalds 		     const char *buf, size_t count)
8721da177e4SLinus Torvalds {
8731da177e4SLinus Torvalds 	struct cpufreq_policy *policy = to_policy(kobj);
8741da177e4SLinus Torvalds 	struct freq_attr *fattr = to_attr(attr);
875a07530b4SDave Jones 	ssize_t ret = -EINVAL;
8766eed9404SViresh Kumar 
8774f750c93SSrivatsa S. Bhat 	get_online_cpus();
8784f750c93SSrivatsa S. Bhat 
8794f750c93SSrivatsa S. Bhat 	if (!cpu_online(policy->cpu))
8804f750c93SSrivatsa S. Bhat 		goto unlock;
8814f750c93SSrivatsa S. Bhat 
8826eed9404SViresh Kumar 	if (!down_read_trylock(&cpufreq_rwsem))
8834f750c93SSrivatsa S. Bhat 		goto unlock;
8845a01f2e8SVenkatesh Pallipadi 
885ad7722daSviresh kumar 	down_write(&policy->rwsem);
8865a01f2e8SVenkatesh Pallipadi 
88711e584cfSViresh Kumar 	/* Updating inactive policies is invalid, so avoid doing that. */
88811e584cfSViresh Kumar 	if (unlikely(policy_is_inactive(policy))) {
88911e584cfSViresh Kumar 		ret = -EBUSY;
89011e584cfSViresh Kumar 		goto unlock_policy_rwsem;
89111e584cfSViresh Kumar 	}
89211e584cfSViresh Kumar 
893e08f5f5bSGautham R Shenoy 	if (fattr->store)
894e08f5f5bSGautham R Shenoy 		ret = fattr->store(policy, buf, count);
895e08f5f5bSGautham R Shenoy 	else
896e08f5f5bSGautham R Shenoy 		ret = -EIO;
897e08f5f5bSGautham R Shenoy 
89811e584cfSViresh Kumar unlock_policy_rwsem:
899ad7722daSviresh kumar 	up_write(&policy->rwsem);
9006eed9404SViresh Kumar 
9016eed9404SViresh Kumar 	up_read(&cpufreq_rwsem);
9024f750c93SSrivatsa S. Bhat unlock:
9034f750c93SSrivatsa S. Bhat 	put_online_cpus();
9044f750c93SSrivatsa S. Bhat 
9051da177e4SLinus Torvalds 	return ret;
9061da177e4SLinus Torvalds }
9071da177e4SLinus Torvalds 
9081da177e4SLinus Torvalds static void cpufreq_sysfs_release(struct kobject *kobj)
9091da177e4SLinus Torvalds {
9101da177e4SLinus Torvalds 	struct cpufreq_policy *policy = to_policy(kobj);
9112d06d8c4SDominik Brodowski 	pr_debug("last reference is dropped\n");
9121da177e4SLinus Torvalds 	complete(&policy->kobj_unregister);
9131da177e4SLinus Torvalds }
9141da177e4SLinus Torvalds 
91552cf25d0SEmese Revfy static const struct sysfs_ops sysfs_ops = {
9161da177e4SLinus Torvalds 	.show	= show,
9171da177e4SLinus Torvalds 	.store	= store,
9181da177e4SLinus Torvalds };
9191da177e4SLinus Torvalds 
9201da177e4SLinus Torvalds static struct kobj_type ktype_cpufreq = {
9211da177e4SLinus Torvalds 	.sysfs_ops	= &sysfs_ops,
9221da177e4SLinus Torvalds 	.default_attrs	= default_attrs,
9231da177e4SLinus Torvalds 	.release	= cpufreq_sysfs_release,
9241da177e4SLinus Torvalds };
9251da177e4SLinus Torvalds 
9262361be23SViresh Kumar struct kobject *cpufreq_global_kobject;
9272361be23SViresh Kumar EXPORT_SYMBOL(cpufreq_global_kobject);
9282361be23SViresh Kumar 
9292361be23SViresh Kumar static int cpufreq_global_kobject_usage;
9302361be23SViresh Kumar 
9312361be23SViresh Kumar int cpufreq_get_global_kobject(void)
9322361be23SViresh Kumar {
9332361be23SViresh Kumar 	if (!cpufreq_global_kobject_usage++)
9342361be23SViresh Kumar 		return kobject_add(cpufreq_global_kobject,
9352361be23SViresh Kumar 				&cpu_subsys.dev_root->kobj, "%s", "cpufreq");
9362361be23SViresh Kumar 
9372361be23SViresh Kumar 	return 0;
9382361be23SViresh Kumar }
9392361be23SViresh Kumar EXPORT_SYMBOL(cpufreq_get_global_kobject);
9402361be23SViresh Kumar 
9412361be23SViresh Kumar void cpufreq_put_global_kobject(void)
9422361be23SViresh Kumar {
9432361be23SViresh Kumar 	if (!--cpufreq_global_kobject_usage)
9442361be23SViresh Kumar 		kobject_del(cpufreq_global_kobject);
9452361be23SViresh Kumar }
9462361be23SViresh Kumar EXPORT_SYMBOL(cpufreq_put_global_kobject);
9472361be23SViresh Kumar 
9482361be23SViresh Kumar int cpufreq_sysfs_create_file(const struct attribute *attr)
9492361be23SViresh Kumar {
9502361be23SViresh Kumar 	int ret = cpufreq_get_global_kobject();
9512361be23SViresh Kumar 
9522361be23SViresh Kumar 	if (!ret) {
9532361be23SViresh Kumar 		ret = sysfs_create_file(cpufreq_global_kobject, attr);
9542361be23SViresh Kumar 		if (ret)
9552361be23SViresh Kumar 			cpufreq_put_global_kobject();
9562361be23SViresh Kumar 	}
9572361be23SViresh Kumar 
9582361be23SViresh Kumar 	return ret;
9592361be23SViresh Kumar }
9602361be23SViresh Kumar EXPORT_SYMBOL(cpufreq_sysfs_create_file);
9612361be23SViresh Kumar 
9622361be23SViresh Kumar void cpufreq_sysfs_remove_file(const struct attribute *attr)
9632361be23SViresh Kumar {
9642361be23SViresh Kumar 	sysfs_remove_file(cpufreq_global_kobject, attr);
9652361be23SViresh Kumar 	cpufreq_put_global_kobject();
9662361be23SViresh Kumar }
9672361be23SViresh Kumar EXPORT_SYMBOL(cpufreq_sysfs_remove_file);
9682361be23SViresh Kumar 
96987549141SViresh Kumar static int add_cpu_dev_symlink(struct cpufreq_policy *policy, int cpu)
97087549141SViresh Kumar {
97187549141SViresh Kumar 	struct device *cpu_dev;
97287549141SViresh Kumar 
97387549141SViresh Kumar 	pr_debug("%s: Adding symlink for CPU: %u\n", __func__, cpu);
97487549141SViresh Kumar 
97587549141SViresh Kumar 	if (!policy)
97687549141SViresh Kumar 		return 0;
97787549141SViresh Kumar 
97887549141SViresh Kumar 	cpu_dev = get_cpu_device(cpu);
97987549141SViresh Kumar 	if (WARN_ON(!cpu_dev))
98087549141SViresh Kumar 		return 0;
98187549141SViresh Kumar 
98287549141SViresh Kumar 	return sysfs_create_link(&cpu_dev->kobj, &policy->kobj, "cpufreq");
98387549141SViresh Kumar }
98487549141SViresh Kumar 
98587549141SViresh Kumar static void remove_cpu_dev_symlink(struct cpufreq_policy *policy, int cpu)
98687549141SViresh Kumar {
98787549141SViresh Kumar 	struct device *cpu_dev;
98887549141SViresh Kumar 
98987549141SViresh Kumar 	pr_debug("%s: Removing symlink for CPU: %u\n", __func__, cpu);
99087549141SViresh Kumar 
99187549141SViresh Kumar 	cpu_dev = get_cpu_device(cpu);
99287549141SViresh Kumar 	if (WARN_ON(!cpu_dev))
99387549141SViresh Kumar 		return;
99487549141SViresh Kumar 
99587549141SViresh Kumar 	sysfs_remove_link(&cpu_dev->kobj, "cpufreq");
99687549141SViresh Kumar }
99787549141SViresh Kumar 
99887549141SViresh Kumar /* Add/remove symlinks for all related CPUs */
999308b60e7SViresh Kumar static int cpufreq_add_dev_symlink(struct cpufreq_policy *policy)
100019d6f7ecSDave Jones {
100119d6f7ecSDave Jones 	unsigned int j;
100219d6f7ecSDave Jones 	int ret = 0;
100319d6f7ecSDave Jones 
100487549141SViresh Kumar 	/* Some related CPUs might not be present (physically hotplugged) */
100587549141SViresh Kumar 	for_each_cpu_and(j, policy->related_cpus, cpu_present_mask) {
10069d16f207SSaravana Kannan 		if (j == policy->kobj_cpu)
100719d6f7ecSDave Jones 			continue;
100819d6f7ecSDave Jones 
100987549141SViresh Kumar 		ret = add_cpu_dev_symlink(policy, j);
101071c3461eSRafael J. Wysocki 		if (ret)
101171c3461eSRafael J. Wysocki 			break;
101219d6f7ecSDave Jones 	}
101387549141SViresh Kumar 
101419d6f7ecSDave Jones 	return ret;
101519d6f7ecSDave Jones }
101619d6f7ecSDave Jones 
101787549141SViresh Kumar static void cpufreq_remove_dev_symlink(struct cpufreq_policy *policy)
101887549141SViresh Kumar {
101987549141SViresh Kumar 	unsigned int j;
102087549141SViresh Kumar 
102187549141SViresh Kumar 	/* Some related CPUs might not be present (physically hotplugged) */
102287549141SViresh Kumar 	for_each_cpu_and(j, policy->related_cpus, cpu_present_mask) {
102387549141SViresh Kumar 		if (j == policy->kobj_cpu)
102487549141SViresh Kumar 			continue;
102587549141SViresh Kumar 
102687549141SViresh Kumar 		remove_cpu_dev_symlink(policy, j);
102787549141SViresh Kumar 	}
102887549141SViresh Kumar }
102987549141SViresh Kumar 
1030308b60e7SViresh Kumar static int cpufreq_add_dev_interface(struct cpufreq_policy *policy,
10318a25a2fdSKay Sievers 				     struct device *dev)
1032909a694eSDave Jones {
1033909a694eSDave Jones 	struct freq_attr **drv_attr;
1034909a694eSDave Jones 	int ret = 0;
1035909a694eSDave Jones 
1036909a694eSDave Jones 	/* set up files for this cpu device */
10371c3d85ddSRafael J. Wysocki 	drv_attr = cpufreq_driver->attr;
1038f13f1184SViresh Kumar 	while (drv_attr && *drv_attr) {
1039909a694eSDave Jones 		ret = sysfs_create_file(&policy->kobj, &((*drv_attr)->attr));
1040909a694eSDave Jones 		if (ret)
10416d4e81edSTomeu Vizoso 			return ret;
1042909a694eSDave Jones 		drv_attr++;
1043909a694eSDave Jones 	}
10441c3d85ddSRafael J. Wysocki 	if (cpufreq_driver->get) {
1045909a694eSDave Jones 		ret = sysfs_create_file(&policy->kobj, &cpuinfo_cur_freq.attr);
1046909a694eSDave Jones 		if (ret)
10476d4e81edSTomeu Vizoso 			return ret;
1048909a694eSDave Jones 	}
1049c034b02eSDirk Brandewie 
1050909a694eSDave Jones 	ret = sysfs_create_file(&policy->kobj, &scaling_cur_freq.attr);
1051909a694eSDave Jones 	if (ret)
10526d4e81edSTomeu Vizoso 		return ret;
1053c034b02eSDirk Brandewie 
10541c3d85ddSRafael J. Wysocki 	if (cpufreq_driver->bios_limit) {
1055e2f74f35SThomas Renninger 		ret = sysfs_create_file(&policy->kobj, &bios_limit.attr);
1056e2f74f35SThomas Renninger 		if (ret)
10576d4e81edSTomeu Vizoso 			return ret;
1058e2f74f35SThomas Renninger 	}
1059909a694eSDave Jones 
10606d4e81edSTomeu Vizoso 	return cpufreq_add_dev_symlink(policy);
1061e18f1682SSrivatsa S. Bhat }
1062e18f1682SSrivatsa S. Bhat 
1063e18f1682SSrivatsa S. Bhat static void cpufreq_init_policy(struct cpufreq_policy *policy)
1064e18f1682SSrivatsa S. Bhat {
10656e2c89d1Sviresh kumar 	struct cpufreq_governor *gov = NULL;
1066e18f1682SSrivatsa S. Bhat 	struct cpufreq_policy new_policy;
1067e18f1682SSrivatsa S. Bhat 	int ret = 0;
1068e18f1682SSrivatsa S. Bhat 
1069d5b73cd8SViresh Kumar 	memcpy(&new_policy, policy, sizeof(*policy));
1070a27a9ab7SJason Baron 
10716e2c89d1Sviresh kumar 	/* Update governor of new_policy to the governor used before hotplug */
10724573237bSViresh Kumar 	gov = find_governor(policy->last_governor);
10736e2c89d1Sviresh kumar 	if (gov)
10746e2c89d1Sviresh kumar 		pr_debug("Restoring governor %s for cpu %d\n",
10756e2c89d1Sviresh kumar 				policy->governor->name, policy->cpu);
10766e2c89d1Sviresh kumar 	else
10776e2c89d1Sviresh kumar 		gov = CPUFREQ_DEFAULT_GOVERNOR;
10786e2c89d1Sviresh kumar 
10796e2c89d1Sviresh kumar 	new_policy.governor = gov;
10806e2c89d1Sviresh kumar 
1081a27a9ab7SJason Baron 	/* Use the default policy if its valid. */
1082a27a9ab7SJason Baron 	if (cpufreq_driver->setpolicy)
10836e2c89d1Sviresh kumar 		cpufreq_parse_governor(gov->name, &new_policy.policy, NULL);
1084ecf7e461SDave Jones 
1085ecf7e461SDave Jones 	/* set default policy */
1086037ce839SViresh Kumar 	ret = cpufreq_set_policy(policy, &new_policy);
1087ecf7e461SDave Jones 	if (ret) {
10882d06d8c4SDominik Brodowski 		pr_debug("setting policy failed\n");
10891c3d85ddSRafael J. Wysocki 		if (cpufreq_driver->exit)
10901c3d85ddSRafael J. Wysocki 			cpufreq_driver->exit(policy);
1091ecf7e461SDave Jones 	}
1092909a694eSDave Jones }
1093909a694eSDave Jones 
1094d8d3b471SViresh Kumar static int cpufreq_add_policy_cpu(struct cpufreq_policy *policy,
109542f921a6SViresh Kumar 				  unsigned int cpu, struct device *dev)
1096fcf80582SViresh Kumar {
10979c0ebcf7SViresh Kumar 	int ret = 0;
1098fcf80582SViresh Kumar 
1099bb29ae15SViresh Kumar 	/* Has this CPU been taken care of already? */
1100bb29ae15SViresh Kumar 	if (cpumask_test_cpu(cpu, policy->cpus))
1101bb29ae15SViresh Kumar 		return 0;
1102bb29ae15SViresh Kumar 
11039c0ebcf7SViresh Kumar 	if (has_target()) {
11043de9bdebSViresh Kumar 		ret = __cpufreq_governor(policy, CPUFREQ_GOV_STOP);
11053de9bdebSViresh Kumar 		if (ret) {
11063de9bdebSViresh Kumar 			pr_err("%s: Failed to stop governor\n", __func__);
11073de9bdebSViresh Kumar 			return ret;
11083de9bdebSViresh Kumar 		}
11093de9bdebSViresh Kumar 	}
1110fcf80582SViresh Kumar 
1111ad7722daSviresh kumar 	down_write(&policy->rwsem);
1112fcf80582SViresh Kumar 	cpumask_set_cpu(cpu, policy->cpus);
1113ad7722daSviresh kumar 	up_write(&policy->rwsem);
11142eaa3e2dSViresh Kumar 
11159c0ebcf7SViresh Kumar 	if (has_target()) {
1116e5c87b76SStratos Karafotis 		ret = __cpufreq_governor(policy, CPUFREQ_GOV_START);
1117e5c87b76SStratos Karafotis 		if (!ret)
1118e5c87b76SStratos Karafotis 			ret = __cpufreq_governor(policy, CPUFREQ_GOV_LIMITS);
1119e5c87b76SStratos Karafotis 
1120e5c87b76SStratos Karafotis 		if (ret) {
11213de9bdebSViresh Kumar 			pr_err("%s: Failed to start governor\n", __func__);
11223de9bdebSViresh Kumar 			return ret;
11233de9bdebSViresh Kumar 		}
1124820c6ca2SViresh Kumar 	}
1125fcf80582SViresh Kumar 
112687549141SViresh Kumar 	return 0;
1127fcf80582SViresh Kumar }
11281da177e4SLinus Torvalds 
11298414809cSSrivatsa S. Bhat static struct cpufreq_policy *cpufreq_policy_restore(unsigned int cpu)
11308414809cSSrivatsa S. Bhat {
11318414809cSSrivatsa S. Bhat 	struct cpufreq_policy *policy;
11328414809cSSrivatsa S. Bhat 	unsigned long flags;
11338414809cSSrivatsa S. Bhat 
113444871c9cSLan Tianyu 	read_lock_irqsave(&cpufreq_driver_lock, flags);
11353914d379SViresh Kumar 	policy = per_cpu(cpufreq_cpu_data, cpu);
113644871c9cSLan Tianyu 	read_unlock_irqrestore(&cpufreq_driver_lock, flags);
11378414809cSSrivatsa S. Bhat 
11383914d379SViresh Kumar 	if (likely(policy)) {
11393914d379SViresh Kumar 		/* Policy should be inactive here */
11403914d379SViresh Kumar 		WARN_ON(!policy_is_inactive(policy));
114137829029SViresh Kumar 
114237829029SViresh Kumar 		down_write(&policy->rwsem);
114337829029SViresh Kumar 		policy->cpu = cpu;
114435afd02eSViresh Kumar 		policy->governor = NULL;
114537829029SViresh Kumar 		up_write(&policy->rwsem);
11463914d379SViresh Kumar 	}
11476e2c89d1Sviresh kumar 
11488414809cSSrivatsa S. Bhat 	return policy;
11498414809cSSrivatsa S. Bhat }
11508414809cSSrivatsa S. Bhat 
11512fc3384dSViresh Kumar static struct cpufreq_policy *cpufreq_policy_alloc(struct device *dev)
1152e9698cc5SSrivatsa S. Bhat {
1153e9698cc5SSrivatsa S. Bhat 	struct cpufreq_policy *policy;
11542fc3384dSViresh Kumar 	int ret;
1155e9698cc5SSrivatsa S. Bhat 
1156e9698cc5SSrivatsa S. Bhat 	policy = kzalloc(sizeof(*policy), GFP_KERNEL);
1157e9698cc5SSrivatsa S. Bhat 	if (!policy)
1158e9698cc5SSrivatsa S. Bhat 		return NULL;
1159e9698cc5SSrivatsa S. Bhat 
1160e9698cc5SSrivatsa S. Bhat 	if (!alloc_cpumask_var(&policy->cpus, GFP_KERNEL))
1161e9698cc5SSrivatsa S. Bhat 		goto err_free_policy;
1162e9698cc5SSrivatsa S. Bhat 
1163e9698cc5SSrivatsa S. Bhat 	if (!zalloc_cpumask_var(&policy->related_cpus, GFP_KERNEL))
1164e9698cc5SSrivatsa S. Bhat 		goto err_free_cpumask;
1165e9698cc5SSrivatsa S. Bhat 
11662fc3384dSViresh Kumar 	ret = kobject_init_and_add(&policy->kobj, &ktype_cpufreq, &dev->kobj,
11672fc3384dSViresh Kumar 				   "cpufreq");
11682fc3384dSViresh Kumar 	if (ret) {
11692fc3384dSViresh Kumar 		pr_err("%s: failed to init policy->kobj: %d\n", __func__, ret);
11702fc3384dSViresh Kumar 		goto err_free_rcpumask;
11712fc3384dSViresh Kumar 	}
11722fc3384dSViresh Kumar 
1173c88a1f8bSLukasz Majewski 	INIT_LIST_HEAD(&policy->policy_list);
1174ad7722daSviresh kumar 	init_rwsem(&policy->rwsem);
117512478cf0SSrivatsa S. Bhat 	spin_lock_init(&policy->transition_lock);
117612478cf0SSrivatsa S. Bhat 	init_waitqueue_head(&policy->transition_wait);
1177818c5712SViresh Kumar 	init_completion(&policy->kobj_unregister);
1178818c5712SViresh Kumar 	INIT_WORK(&policy->update, handle_update);
1179ad7722daSviresh kumar 
11802fc3384dSViresh Kumar 	policy->cpu = dev->id;
118187549141SViresh Kumar 
118287549141SViresh Kumar 	/* Set this once on allocation */
11832fc3384dSViresh Kumar 	policy->kobj_cpu = dev->id;
118487549141SViresh Kumar 
1185e9698cc5SSrivatsa S. Bhat 	return policy;
1186e9698cc5SSrivatsa S. Bhat 
11872fc3384dSViresh Kumar err_free_rcpumask:
11882fc3384dSViresh Kumar 	free_cpumask_var(policy->related_cpus);
1189e9698cc5SSrivatsa S. Bhat err_free_cpumask:
1190e9698cc5SSrivatsa S. Bhat 	free_cpumask_var(policy->cpus);
1191e9698cc5SSrivatsa S. Bhat err_free_policy:
1192e9698cc5SSrivatsa S. Bhat 	kfree(policy);
1193e9698cc5SSrivatsa S. Bhat 
1194e9698cc5SSrivatsa S. Bhat 	return NULL;
1195e9698cc5SSrivatsa S. Bhat }
1196e9698cc5SSrivatsa S. Bhat 
11972fc3384dSViresh Kumar static void cpufreq_policy_put_kobj(struct cpufreq_policy *policy, bool notify)
119842f921a6SViresh Kumar {
119942f921a6SViresh Kumar 	struct kobject *kobj;
120042f921a6SViresh Kumar 	struct completion *cmp;
120142f921a6SViresh Kumar 
12022fc3384dSViresh Kumar 	if (notify)
1203fcd7af91SViresh Kumar 		blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
1204fcd7af91SViresh Kumar 					     CPUFREQ_REMOVE_POLICY, policy);
1205fcd7af91SViresh Kumar 
120687549141SViresh Kumar 	down_write(&policy->rwsem);
120787549141SViresh Kumar 	cpufreq_remove_dev_symlink(policy);
120842f921a6SViresh Kumar 	kobj = &policy->kobj;
120942f921a6SViresh Kumar 	cmp = &policy->kobj_unregister;
121087549141SViresh Kumar 	up_write(&policy->rwsem);
121142f921a6SViresh Kumar 	kobject_put(kobj);
121242f921a6SViresh Kumar 
121342f921a6SViresh Kumar 	/*
121442f921a6SViresh Kumar 	 * We need to make sure that the underlying kobj is
121542f921a6SViresh Kumar 	 * actually not referenced anymore by anybody before we
121642f921a6SViresh Kumar 	 * proceed with unloading.
121742f921a6SViresh Kumar 	 */
121842f921a6SViresh Kumar 	pr_debug("waiting for dropping of refcount\n");
121942f921a6SViresh Kumar 	wait_for_completion(cmp);
122042f921a6SViresh Kumar 	pr_debug("wait complete\n");
122142f921a6SViresh Kumar }
122242f921a6SViresh Kumar 
12233654c5ccSViresh Kumar static void cpufreq_policy_free(struct cpufreq_policy *policy, bool notify)
1224e9698cc5SSrivatsa S. Bhat {
1225988bed09SViresh Kumar 	unsigned long flags;
1226988bed09SViresh Kumar 	int cpu;
1227988bed09SViresh Kumar 
1228988bed09SViresh Kumar 	/* Remove policy from list */
1229988bed09SViresh Kumar 	write_lock_irqsave(&cpufreq_driver_lock, flags);
1230988bed09SViresh Kumar 	list_del(&policy->policy_list);
1231988bed09SViresh Kumar 
1232988bed09SViresh Kumar 	for_each_cpu(cpu, policy->related_cpus)
1233988bed09SViresh Kumar 		per_cpu(cpufreq_cpu_data, cpu) = NULL;
1234988bed09SViresh Kumar 	write_unlock_irqrestore(&cpufreq_driver_lock, flags);
1235988bed09SViresh Kumar 
12363654c5ccSViresh Kumar 	cpufreq_policy_put_kobj(policy, notify);
1237e9698cc5SSrivatsa S. Bhat 	free_cpumask_var(policy->related_cpus);
1238e9698cc5SSrivatsa S. Bhat 	free_cpumask_var(policy->cpus);
1239e9698cc5SSrivatsa S. Bhat 	kfree(policy);
1240e9698cc5SSrivatsa S. Bhat }
1241e9698cc5SSrivatsa S. Bhat 
124223faf0b7SViresh Kumar /**
124323faf0b7SViresh Kumar  * cpufreq_add_dev - add a CPU device
124423faf0b7SViresh Kumar  *
124523faf0b7SViresh Kumar  * Adds the cpufreq interface for a CPU device.
124623faf0b7SViresh Kumar  *
124723faf0b7SViresh Kumar  * The Oracle says: try running cpufreq registration/unregistration concurrently
124823faf0b7SViresh Kumar  * with with cpu hotplugging and all hell will break loose. Tried to clean this
124923faf0b7SViresh Kumar  * mess up, but more thorough testing is needed. - Mathieu
125023faf0b7SViresh Kumar  */
125123faf0b7SViresh Kumar static int cpufreq_add_dev(struct device *dev, struct subsys_interface *sif)
12521da177e4SLinus Torvalds {
1253fcf80582SViresh Kumar 	unsigned int j, cpu = dev->id;
125465922465SViresh Kumar 	int ret = -ENOMEM;
12557f0c020aSViresh Kumar 	struct cpufreq_policy *policy;
12561da177e4SLinus Torvalds 	unsigned long flags;
125787549141SViresh Kumar 	bool recover_policy = !sif;
1258c32b6b8eSAshok Raj 
12592d06d8c4SDominik Brodowski 	pr_debug("adding CPU %u\n", cpu);
12601da177e4SLinus Torvalds 
126187549141SViresh Kumar 	/*
126287549141SViresh Kumar 	 * Only possible if 'cpu' wasn't physically present earlier and we are
126387549141SViresh Kumar 	 * here from subsys_interface add callback. A hotplug notifier will
126487549141SViresh Kumar 	 * follow and we will handle it like logical CPU hotplug then. For now,
126587549141SViresh Kumar 	 * just create the sysfs link.
126687549141SViresh Kumar 	 */
126787549141SViresh Kumar 	if (cpu_is_offline(cpu))
126887549141SViresh Kumar 		return add_cpu_dev_symlink(per_cpu(cpufreq_cpu_data, cpu), cpu);
126987549141SViresh Kumar 
12706eed9404SViresh Kumar 	if (!down_read_trylock(&cpufreq_rwsem))
12716eed9404SViresh Kumar 		return 0;
12726eed9404SViresh Kumar 
1273bb29ae15SViresh Kumar 	/* Check if this CPU already has a policy to manage it */
12749104bb26SViresh Kumar 	policy = per_cpu(cpufreq_cpu_data, cpu);
12759104bb26SViresh Kumar 	if (policy && !policy_is_inactive(policy)) {
12769104bb26SViresh Kumar 		WARN_ON(!cpumask_test_cpu(cpu, policy->related_cpus));
12777f0c020aSViresh Kumar 		ret = cpufreq_add_policy_cpu(policy, cpu, dev);
12786eed9404SViresh Kumar 		up_read(&cpufreq_rwsem);
12796eed9404SViresh Kumar 		return ret;
1280fcf80582SViresh Kumar 	}
12811da177e4SLinus Torvalds 
128272368d12SRafael J. Wysocki 	/*
128372368d12SRafael J. Wysocki 	 * Restore the saved policy when doing light-weight init and fall back
128472368d12SRafael J. Wysocki 	 * to the full init if that fails.
128572368d12SRafael J. Wysocki 	 */
128696bbbe4aSViresh Kumar 	policy = recover_policy ? cpufreq_policy_restore(cpu) : NULL;
128772368d12SRafael J. Wysocki 	if (!policy) {
128896bbbe4aSViresh Kumar 		recover_policy = false;
12892fc3384dSViresh Kumar 		policy = cpufreq_policy_alloc(dev);
1290059019a3SDave Jones 		if (!policy)
1291*8101f997SViresh Kumar 			goto out_release_rwsem;
129272368d12SRafael J. Wysocki 	}
12930d66b91eSSrivatsa S. Bhat 
1294835481d9SRusty Russell 	cpumask_copy(policy->cpus, cpumask_of(cpu));
12951da177e4SLinus Torvalds 
12961da177e4SLinus Torvalds 	/* call driver. From then on the cpufreq must be able
12971da177e4SLinus Torvalds 	 * to accept all calls to ->verify and ->setpolicy for this CPU
12981da177e4SLinus Torvalds 	 */
12991c3d85ddSRafael J. Wysocki 	ret = cpufreq_driver->init(policy);
13001da177e4SLinus Torvalds 	if (ret) {
13012d06d8c4SDominik Brodowski 		pr_debug("initialization failed\n");
1302*8101f997SViresh Kumar 		goto out_free_policy;
13031da177e4SLinus Torvalds 	}
1304643ae6e8SViresh Kumar 
13056d4e81edSTomeu Vizoso 	down_write(&policy->rwsem);
13066d4e81edSTomeu Vizoso 
13075a7e56a5SViresh Kumar 	/* related cpus should atleast have policy->cpus */
13085a7e56a5SViresh Kumar 	cpumask_or(policy->related_cpus, policy->related_cpus, policy->cpus);
13095a7e56a5SViresh Kumar 
13105a7e56a5SViresh Kumar 	/*
13115a7e56a5SViresh Kumar 	 * affected cpus must always be the one, which are online. We aren't
13125a7e56a5SViresh Kumar 	 * managing offline cpus here.
13135a7e56a5SViresh Kumar 	 */
13145a7e56a5SViresh Kumar 	cpumask_and(policy->cpus, policy->cpus, cpu_online_mask);
13155a7e56a5SViresh Kumar 
131696bbbe4aSViresh Kumar 	if (!recover_policy) {
13175a7e56a5SViresh Kumar 		policy->user_policy.min = policy->min;
13185a7e56a5SViresh Kumar 		policy->user_policy.max = policy->max;
13196d4e81edSTomeu Vizoso 
1320652ed95dSViresh Kumar 		write_lock_irqsave(&cpufreq_driver_lock, flags);
1321988bed09SViresh Kumar 		for_each_cpu(j, policy->related_cpus)
1322652ed95dSViresh Kumar 			per_cpu(cpufreq_cpu_data, j) = policy;
1323652ed95dSViresh Kumar 		write_unlock_irqrestore(&cpufreq_driver_lock, flags);
1324988bed09SViresh Kumar 	}
1325652ed95dSViresh Kumar 
13262ed99e39SRafael J. Wysocki 	if (cpufreq_driver->get && !cpufreq_driver->setpolicy) {
1327da60ce9fSViresh Kumar 		policy->cur = cpufreq_driver->get(policy->cpu);
1328da60ce9fSViresh Kumar 		if (!policy->cur) {
1329da60ce9fSViresh Kumar 			pr_err("%s: ->get() failed\n", __func__);
1330*8101f997SViresh Kumar 			goto out_exit_policy;
1331da60ce9fSViresh Kumar 		}
1332da60ce9fSViresh Kumar 	}
1333da60ce9fSViresh Kumar 
1334d3916691SViresh Kumar 	/*
1335d3916691SViresh Kumar 	 * Sometimes boot loaders set CPU frequency to a value outside of
1336d3916691SViresh Kumar 	 * frequency table present with cpufreq core. In such cases CPU might be
1337d3916691SViresh Kumar 	 * unstable if it has to run on that frequency for long duration of time
1338d3916691SViresh Kumar 	 * and so its better to set it to a frequency which is specified in
1339d3916691SViresh Kumar 	 * freq-table. This also makes cpufreq stats inconsistent as
1340d3916691SViresh Kumar 	 * cpufreq-stats would fail to register because current frequency of CPU
1341d3916691SViresh Kumar 	 * isn't found in freq-table.
1342d3916691SViresh Kumar 	 *
1343d3916691SViresh Kumar 	 * Because we don't want this change to effect boot process badly, we go
1344d3916691SViresh Kumar 	 * for the next freq which is >= policy->cur ('cur' must be set by now,
1345d3916691SViresh Kumar 	 * otherwise we will end up setting freq to lowest of the table as 'cur'
1346d3916691SViresh Kumar 	 * is initialized to zero).
1347d3916691SViresh Kumar 	 *
1348d3916691SViresh Kumar 	 * We are passing target-freq as "policy->cur - 1" otherwise
1349d3916691SViresh Kumar 	 * __cpufreq_driver_target() would simply fail, as policy->cur will be
1350d3916691SViresh Kumar 	 * equal to target-freq.
1351d3916691SViresh Kumar 	 */
1352d3916691SViresh Kumar 	if ((cpufreq_driver->flags & CPUFREQ_NEED_INITIAL_FREQ_CHECK)
1353d3916691SViresh Kumar 	    && has_target()) {
1354d3916691SViresh Kumar 		/* Are we running at unknown frequency ? */
1355d3916691SViresh Kumar 		ret = cpufreq_frequency_table_get_index(policy, policy->cur);
1356d3916691SViresh Kumar 		if (ret == -EINVAL) {
1357d3916691SViresh Kumar 			/* Warn user and fix it */
1358d3916691SViresh Kumar 			pr_warn("%s: CPU%d: Running at unlisted freq: %u KHz\n",
1359d3916691SViresh Kumar 				__func__, policy->cpu, policy->cur);
1360d3916691SViresh Kumar 			ret = __cpufreq_driver_target(policy, policy->cur - 1,
1361d3916691SViresh Kumar 				CPUFREQ_RELATION_L);
1362d3916691SViresh Kumar 
1363d3916691SViresh Kumar 			/*
1364d3916691SViresh Kumar 			 * Reaching here after boot in a few seconds may not
1365d3916691SViresh Kumar 			 * mean that system will remain stable at "unknown"
1366d3916691SViresh Kumar 			 * frequency for longer duration. Hence, a BUG_ON().
1367d3916691SViresh Kumar 			 */
1368d3916691SViresh Kumar 			BUG_ON(ret);
1369d3916691SViresh Kumar 			pr_warn("%s: CPU%d: Unlisted initial frequency changed to: %u KHz\n",
1370d3916691SViresh Kumar 				__func__, policy->cpu, policy->cur);
1371d3916691SViresh Kumar 		}
1372d3916691SViresh Kumar 	}
1373d3916691SViresh Kumar 
1374a1531acdSThomas Renninger 	blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
1375a1531acdSThomas Renninger 				     CPUFREQ_START, policy);
1376a1531acdSThomas Renninger 
137796bbbe4aSViresh Kumar 	if (!recover_policy) {
1378308b60e7SViresh Kumar 		ret = cpufreq_add_dev_interface(policy, dev);
137919d6f7ecSDave Jones 		if (ret)
1380*8101f997SViresh Kumar 			goto out_exit_policy;
1381fcd7af91SViresh Kumar 		blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
1382fcd7af91SViresh Kumar 				CPUFREQ_CREATE_POLICY, policy);
1383c88a1f8bSLukasz Majewski 
1384c88a1f8bSLukasz Majewski 		write_lock_irqsave(&cpufreq_driver_lock, flags);
1385c88a1f8bSLukasz Majewski 		list_add(&policy->policy_list, &cpufreq_policy_list);
1386c88a1f8bSLukasz Majewski 		write_unlock_irqrestore(&cpufreq_driver_lock, flags);
1387988bed09SViresh Kumar 	}
13888ff69732SDave Jones 
1389e18f1682SSrivatsa S. Bhat 	cpufreq_init_policy(policy);
1390e18f1682SSrivatsa S. Bhat 
139196bbbe4aSViresh Kumar 	if (!recover_policy) {
139208fd8c1cSViresh Kumar 		policy->user_policy.policy = policy->policy;
139308fd8c1cSViresh Kumar 		policy->user_policy.governor = policy->governor;
139408fd8c1cSViresh Kumar 	}
13954e97b631SViresh Kumar 	up_write(&policy->rwsem);
139608fd8c1cSViresh Kumar 
1397038c5b3eSGreg Kroah-Hartman 	kobject_uevent(&policy->kobj, KOBJ_ADD);
13987c45cf31SViresh Kumar 
13996eed9404SViresh Kumar 	up_read(&cpufreq_rwsem);
14006eed9404SViresh Kumar 
14017c45cf31SViresh Kumar 	/* Callback for handling stuff after policy is ready */
14027c45cf31SViresh Kumar 	if (cpufreq_driver->ready)
14037c45cf31SViresh Kumar 		cpufreq_driver->ready(policy);
14047c45cf31SViresh Kumar 
14052d06d8c4SDominik Brodowski 	pr_debug("initialization complete\n");
14061da177e4SLinus Torvalds 
14071da177e4SLinus Torvalds 	return 0;
14081da177e4SLinus Torvalds 
1409*8101f997SViresh Kumar out_exit_policy:
14107106e02bSPrarit Bhargava 	up_write(&policy->rwsem);
14117106e02bSPrarit Bhargava 
1412da60ce9fSViresh Kumar 	if (cpufreq_driver->exit)
1413da60ce9fSViresh Kumar 		cpufreq_driver->exit(policy);
1414*8101f997SViresh Kumar out_free_policy:
14153654c5ccSViresh Kumar 	cpufreq_policy_free(policy, recover_policy);
1416*8101f997SViresh Kumar out_release_rwsem:
14176eed9404SViresh Kumar 	up_read(&cpufreq_rwsem);
14186eed9404SViresh Kumar 
14191da177e4SLinus Torvalds 	return ret;
14201da177e4SLinus Torvalds }
14211da177e4SLinus Torvalds 
1422cedb70afSSrivatsa S. Bhat static int __cpufreq_remove_dev_prepare(struct device *dev,
142396bbbe4aSViresh Kumar 					struct subsys_interface *sif)
14241da177e4SLinus Torvalds {
14259591becbSViresh Kumar 	unsigned int cpu = dev->id;
14269591becbSViresh Kumar 	int ret = 0;
14273a3e9e06SViresh Kumar 	struct cpufreq_policy *policy;
14281da177e4SLinus Torvalds 
1429b8eed8afSViresh Kumar 	pr_debug("%s: unregistering CPU %u\n", __func__, cpu);
14301da177e4SLinus Torvalds 
1431988bed09SViresh Kumar 	policy = cpufreq_cpu_get_raw(cpu);
14323a3e9e06SViresh Kumar 	if (!policy) {
1433b8eed8afSViresh Kumar 		pr_debug("%s: No cpu_data found\n", __func__);
14341da177e4SLinus Torvalds 		return -EINVAL;
14351da177e4SLinus Torvalds 	}
14361da177e4SLinus Torvalds 
14379c0ebcf7SViresh Kumar 	if (has_target()) {
14383de9bdebSViresh Kumar 		ret = __cpufreq_governor(policy, CPUFREQ_GOV_STOP);
14393de9bdebSViresh Kumar 		if (ret) {
14403de9bdebSViresh Kumar 			pr_err("%s: Failed to stop governor\n", __func__);
14413de9bdebSViresh Kumar 			return ret;
14423de9bdebSViresh Kumar 		}
1443db5f2995SViresh Kumar 	}
14441da177e4SLinus Torvalds 
14454573237bSViresh Kumar 	down_write(&policy->rwsem);
14469591becbSViresh Kumar 	cpumask_clear_cpu(cpu, policy->cpus);
14474573237bSViresh Kumar 
14489591becbSViresh Kumar 	if (policy_is_inactive(policy)) {
14499591becbSViresh Kumar 		if (has_target())
14504573237bSViresh Kumar 			strncpy(policy->last_governor, policy->governor->name,
14514573237bSViresh Kumar 				CPUFREQ_NAME_LEN);
14529591becbSViresh Kumar 	} else if (cpu == policy->cpu) {
14539591becbSViresh Kumar 		/* Nominate new CPU */
14549591becbSViresh Kumar 		policy->cpu = cpumask_any(policy->cpus);
14559591becbSViresh Kumar 	}
14564573237bSViresh Kumar 	up_write(&policy->rwsem);
14571da177e4SLinus Torvalds 
14589591becbSViresh Kumar 	/* Start governor again for active policy */
14599591becbSViresh Kumar 	if (!policy_is_inactive(policy)) {
14609591becbSViresh Kumar 		if (has_target()) {
14619591becbSViresh Kumar 			ret = __cpufreq_governor(policy, CPUFREQ_GOV_START);
14629591becbSViresh Kumar 			if (!ret)
14639591becbSViresh Kumar 				ret = __cpufreq_governor(policy, CPUFREQ_GOV_LIMITS);
146487549141SViresh Kumar 
14659591becbSViresh Kumar 			if (ret)
14669591becbSViresh Kumar 				pr_err("%s: Failed to start governor\n", __func__);
14679591becbSViresh Kumar 		}
14689591becbSViresh Kumar 	} else if (cpufreq_driver->stop_cpu) {
1469367dc4aaSDirk Brandewie 		cpufreq_driver->stop_cpu(policy);
14709591becbSViresh Kumar 	}
1471b8eed8afSViresh Kumar 
14729591becbSViresh Kumar 	return ret;
1473cedb70afSSrivatsa S. Bhat }
1474cedb70afSSrivatsa S. Bhat 
1475cedb70afSSrivatsa S. Bhat static int __cpufreq_remove_dev_finish(struct device *dev,
147696bbbe4aSViresh Kumar 				       struct subsys_interface *sif)
1477cedb70afSSrivatsa S. Bhat {
1478988bed09SViresh Kumar 	unsigned int cpu = dev->id;
1479cedb70afSSrivatsa S. Bhat 	int ret;
14809591becbSViresh Kumar 	struct cpufreq_policy *policy = per_cpu(cpufreq_cpu_data, cpu);
1481cedb70afSSrivatsa S. Bhat 
1482cedb70afSSrivatsa S. Bhat 	if (!policy) {
1483cedb70afSSrivatsa S. Bhat 		pr_debug("%s: No cpu_data found\n", __func__);
1484cedb70afSSrivatsa S. Bhat 		return -EINVAL;
1485cedb70afSSrivatsa S. Bhat 	}
1486cedb70afSSrivatsa S. Bhat 
14879591becbSViresh Kumar 	/* Only proceed for inactive policies */
14889591becbSViresh Kumar 	if (!policy_is_inactive(policy))
148987549141SViresh Kumar 		return 0;
149087549141SViresh Kumar 
149187549141SViresh Kumar 	/* If cpu is last user of policy, free policy */
149287549141SViresh Kumar 	if (has_target()) {
149387549141SViresh Kumar 		ret = __cpufreq_governor(policy, CPUFREQ_GOV_POLICY_EXIT);
149487549141SViresh Kumar 		if (ret) {
149587549141SViresh Kumar 			pr_err("%s: Failed to exit governor\n", __func__);
14963de9bdebSViresh Kumar 			return ret;
14973de9bdebSViresh Kumar 		}
14983de9bdebSViresh Kumar 	}
14992a998599SRafael J. Wysocki 
15008414809cSSrivatsa S. Bhat 	/*
15018414809cSSrivatsa S. Bhat 	 * Perform the ->exit() even during light-weight tear-down,
15028414809cSSrivatsa S. Bhat 	 * since this is a core component, and is essential for the
15038414809cSSrivatsa S. Bhat 	 * subsequent light-weight ->init() to succeed.
15048414809cSSrivatsa S. Bhat 	 */
15051c3d85ddSRafael J. Wysocki 	if (cpufreq_driver->exit)
15063a3e9e06SViresh Kumar 		cpufreq_driver->exit(policy);
150727ecddc2SJacob Shin 
15083654c5ccSViresh Kumar 	/* Free the policy only if the driver is getting removed. */
150987549141SViresh Kumar 	if (sif)
15103654c5ccSViresh Kumar 		cpufreq_policy_free(policy, true);
15111da177e4SLinus Torvalds 
15121da177e4SLinus Torvalds 	return 0;
15131da177e4SLinus Torvalds }
15141da177e4SLinus Torvalds 
1515cedb70afSSrivatsa S. Bhat /**
151627a862e9SViresh Kumar  * cpufreq_remove_dev - remove a CPU device
1517cedb70afSSrivatsa S. Bhat  *
1518cedb70afSSrivatsa S. Bhat  * Removes the cpufreq interface for a CPU device.
1519cedb70afSSrivatsa S. Bhat  */
15208a25a2fdSKay Sievers static int cpufreq_remove_dev(struct device *dev, struct subsys_interface *sif)
15215a01f2e8SVenkatesh Pallipadi {
15228a25a2fdSKay Sievers 	unsigned int cpu = dev->id;
152327a862e9SViresh Kumar 	int ret;
1524ec28297aSVenki Pallipadi 
152587549141SViresh Kumar 	/*
152687549141SViresh Kumar 	 * Only possible if 'cpu' is getting physically removed now. A hotplug
152787549141SViresh Kumar 	 * notifier should have already been called and we just need to remove
152887549141SViresh Kumar 	 * link or free policy here.
152987549141SViresh Kumar 	 */
153087549141SViresh Kumar 	if (cpu_is_offline(cpu)) {
153187549141SViresh Kumar 		struct cpufreq_policy *policy = per_cpu(cpufreq_cpu_data, cpu);
153287549141SViresh Kumar 		struct cpumask mask;
153387549141SViresh Kumar 
153487549141SViresh Kumar 		if (!policy)
1535ec28297aSVenki Pallipadi 			return 0;
1536ec28297aSVenki Pallipadi 
153787549141SViresh Kumar 		cpumask_copy(&mask, policy->related_cpus);
153887549141SViresh Kumar 		cpumask_clear_cpu(cpu, &mask);
153987549141SViresh Kumar 
154087549141SViresh Kumar 		/*
154187549141SViresh Kumar 		 * Free policy only if all policy->related_cpus are removed
154287549141SViresh Kumar 		 * physically.
154387549141SViresh Kumar 		 */
154487549141SViresh Kumar 		if (cpumask_intersects(&mask, cpu_present_mask)) {
154587549141SViresh Kumar 			remove_cpu_dev_symlink(policy, cpu);
154687549141SViresh Kumar 			return 0;
154787549141SViresh Kumar 		}
154887549141SViresh Kumar 
15493654c5ccSViresh Kumar 		cpufreq_policy_free(policy, true);
155087549141SViresh Kumar 		return 0;
155187549141SViresh Kumar 	}
155287549141SViresh Kumar 
155396bbbe4aSViresh Kumar 	ret = __cpufreq_remove_dev_prepare(dev, sif);
155427a862e9SViresh Kumar 
155527a862e9SViresh Kumar 	if (!ret)
155696bbbe4aSViresh Kumar 		ret = __cpufreq_remove_dev_finish(dev, sif);
155727a862e9SViresh Kumar 
155827a862e9SViresh Kumar 	return ret;
15595a01f2e8SVenkatesh Pallipadi }
15605a01f2e8SVenkatesh Pallipadi 
156165f27f38SDavid Howells static void handle_update(struct work_struct *work)
15621da177e4SLinus Torvalds {
156365f27f38SDavid Howells 	struct cpufreq_policy *policy =
156465f27f38SDavid Howells 		container_of(work, struct cpufreq_policy, update);
156565f27f38SDavid Howells 	unsigned int cpu = policy->cpu;
15662d06d8c4SDominik Brodowski 	pr_debug("handle_update for cpu %u called\n", cpu);
15671da177e4SLinus Torvalds 	cpufreq_update_policy(cpu);
15681da177e4SLinus Torvalds }
15691da177e4SLinus Torvalds 
15701da177e4SLinus Torvalds /**
1571bb176f7dSViresh Kumar  *	cpufreq_out_of_sync - If actual and saved CPU frequency differs, we're
1572bb176f7dSViresh Kumar  *	in deep trouble.
1573a1e1dc41SViresh Kumar  *	@policy: policy managing CPUs
15741da177e4SLinus Torvalds  *	@new_freq: CPU frequency the CPU actually runs at
15751da177e4SLinus Torvalds  *
157629464f28SDave Jones  *	We adjust to current frequency first, and need to clean up later.
157729464f28SDave Jones  *	So either call to cpufreq_update_policy() or schedule handle_update()).
15781da177e4SLinus Torvalds  */
1579a1e1dc41SViresh Kumar static void cpufreq_out_of_sync(struct cpufreq_policy *policy,
1580e08f5f5bSGautham R Shenoy 				unsigned int new_freq)
15811da177e4SLinus Torvalds {
15821da177e4SLinus Torvalds 	struct cpufreq_freqs freqs;
1583b43a7ffbSViresh Kumar 
1584e837f9b5SJoe Perches 	pr_debug("Warning: CPU frequency out of sync: cpufreq and timing core thinks of %u, is %u kHz\n",
1585a1e1dc41SViresh Kumar 		 policy->cur, new_freq);
15861da177e4SLinus Torvalds 
1587a1e1dc41SViresh Kumar 	freqs.old = policy->cur;
15881da177e4SLinus Torvalds 	freqs.new = new_freq;
1589b43a7ffbSViresh Kumar 
15908fec051eSViresh Kumar 	cpufreq_freq_transition_begin(policy, &freqs);
15918fec051eSViresh Kumar 	cpufreq_freq_transition_end(policy, &freqs, 0);
15921da177e4SLinus Torvalds }
15931da177e4SLinus Torvalds 
15941da177e4SLinus Torvalds /**
15954ab70df4SDhaval Giani  * cpufreq_quick_get - get the CPU frequency (in kHz) from policy->cur
159695235ca2SVenkatesh Pallipadi  * @cpu: CPU number
159795235ca2SVenkatesh Pallipadi  *
159895235ca2SVenkatesh Pallipadi  * This is the last known freq, without actually getting it from the driver.
159995235ca2SVenkatesh Pallipadi  * Return value will be same as what is shown in scaling_cur_freq in sysfs.
160095235ca2SVenkatesh Pallipadi  */
160195235ca2SVenkatesh Pallipadi unsigned int cpufreq_quick_get(unsigned int cpu)
160295235ca2SVenkatesh Pallipadi {
16039e21ba8bSDirk Brandewie 	struct cpufreq_policy *policy;
1604e08f5f5bSGautham R Shenoy 	unsigned int ret_freq = 0;
160595235ca2SVenkatesh Pallipadi 
16061c3d85ddSRafael J. Wysocki 	if (cpufreq_driver && cpufreq_driver->setpolicy && cpufreq_driver->get)
16071c3d85ddSRafael J. Wysocki 		return cpufreq_driver->get(cpu);
16089e21ba8bSDirk Brandewie 
16099e21ba8bSDirk Brandewie 	policy = cpufreq_cpu_get(cpu);
161095235ca2SVenkatesh Pallipadi 	if (policy) {
1611e08f5f5bSGautham R Shenoy 		ret_freq = policy->cur;
161295235ca2SVenkatesh Pallipadi 		cpufreq_cpu_put(policy);
161395235ca2SVenkatesh Pallipadi 	}
161495235ca2SVenkatesh Pallipadi 
16154d34a67dSDave Jones 	return ret_freq;
161695235ca2SVenkatesh Pallipadi }
161795235ca2SVenkatesh Pallipadi EXPORT_SYMBOL(cpufreq_quick_get);
161895235ca2SVenkatesh Pallipadi 
16193d737108SJesse Barnes /**
16203d737108SJesse Barnes  * cpufreq_quick_get_max - get the max reported CPU frequency for this CPU
16213d737108SJesse Barnes  * @cpu: CPU number
16223d737108SJesse Barnes  *
16233d737108SJesse Barnes  * Just return the max possible frequency for a given CPU.
16243d737108SJesse Barnes  */
16253d737108SJesse Barnes unsigned int cpufreq_quick_get_max(unsigned int cpu)
16263d737108SJesse Barnes {
16273d737108SJesse Barnes 	struct cpufreq_policy *policy = cpufreq_cpu_get(cpu);
16283d737108SJesse Barnes 	unsigned int ret_freq = 0;
16293d737108SJesse Barnes 
16303d737108SJesse Barnes 	if (policy) {
16313d737108SJesse Barnes 		ret_freq = policy->max;
16323d737108SJesse Barnes 		cpufreq_cpu_put(policy);
16333d737108SJesse Barnes 	}
16343d737108SJesse Barnes 
16353d737108SJesse Barnes 	return ret_freq;
16363d737108SJesse Barnes }
16373d737108SJesse Barnes EXPORT_SYMBOL(cpufreq_quick_get_max);
16383d737108SJesse Barnes 
1639d92d50a4SViresh Kumar static unsigned int __cpufreq_get(struct cpufreq_policy *policy)
16401da177e4SLinus Torvalds {
1641e08f5f5bSGautham R Shenoy 	unsigned int ret_freq = 0;
16421da177e4SLinus Torvalds 
16431c3d85ddSRafael J. Wysocki 	if (!cpufreq_driver->get)
16444d34a67dSDave Jones 		return ret_freq;
16451da177e4SLinus Torvalds 
1646d92d50a4SViresh Kumar 	ret_freq = cpufreq_driver->get(policy->cpu);
16471da177e4SLinus Torvalds 
164811e584cfSViresh Kumar 	/* Updating inactive policies is invalid, so avoid doing that. */
164911e584cfSViresh Kumar 	if (unlikely(policy_is_inactive(policy)))
165011e584cfSViresh Kumar 		return ret_freq;
165111e584cfSViresh Kumar 
1652e08f5f5bSGautham R Shenoy 	if (ret_freq && policy->cur &&
16531c3d85ddSRafael J. Wysocki 		!(cpufreq_driver->flags & CPUFREQ_CONST_LOOPS)) {
1654e08f5f5bSGautham R Shenoy 		/* verify no discrepancy between actual and
1655e08f5f5bSGautham R Shenoy 					saved value exists */
1656e08f5f5bSGautham R Shenoy 		if (unlikely(ret_freq != policy->cur)) {
1657a1e1dc41SViresh Kumar 			cpufreq_out_of_sync(policy, ret_freq);
16581da177e4SLinus Torvalds 			schedule_work(&policy->update);
16591da177e4SLinus Torvalds 		}
16601da177e4SLinus Torvalds 	}
16611da177e4SLinus Torvalds 
16624d34a67dSDave Jones 	return ret_freq;
16635a01f2e8SVenkatesh Pallipadi }
16641da177e4SLinus Torvalds 
16655a01f2e8SVenkatesh Pallipadi /**
16665a01f2e8SVenkatesh Pallipadi  * cpufreq_get - get the current CPU frequency (in kHz)
16675a01f2e8SVenkatesh Pallipadi  * @cpu: CPU number
16685a01f2e8SVenkatesh Pallipadi  *
16695a01f2e8SVenkatesh Pallipadi  * Get the CPU current (static) CPU frequency
16705a01f2e8SVenkatesh Pallipadi  */
16715a01f2e8SVenkatesh Pallipadi unsigned int cpufreq_get(unsigned int cpu)
16725a01f2e8SVenkatesh Pallipadi {
1673999976e0SAaron Plattner 	struct cpufreq_policy *policy = cpufreq_cpu_get(cpu);
16745a01f2e8SVenkatesh Pallipadi 	unsigned int ret_freq = 0;
16755a01f2e8SVenkatesh Pallipadi 
1676999976e0SAaron Plattner 	if (policy) {
1677ad7722daSviresh kumar 		down_read(&policy->rwsem);
1678d92d50a4SViresh Kumar 		ret_freq = __cpufreq_get(policy);
1679ad7722daSviresh kumar 		up_read(&policy->rwsem);
1680999976e0SAaron Plattner 
1681999976e0SAaron Plattner 		cpufreq_cpu_put(policy);
1682999976e0SAaron Plattner 	}
16836eed9404SViresh Kumar 
16844d34a67dSDave Jones 	return ret_freq;
16851da177e4SLinus Torvalds }
16861da177e4SLinus Torvalds EXPORT_SYMBOL(cpufreq_get);
16871da177e4SLinus Torvalds 
16888a25a2fdSKay Sievers static struct subsys_interface cpufreq_interface = {
16898a25a2fdSKay Sievers 	.name		= "cpufreq",
16908a25a2fdSKay Sievers 	.subsys		= &cpu_subsys,
16918a25a2fdSKay Sievers 	.add_dev	= cpufreq_add_dev,
16928a25a2fdSKay Sievers 	.remove_dev	= cpufreq_remove_dev,
1693e00e56dfSRafael J. Wysocki };
1694e00e56dfSRafael J. Wysocki 
1695e28867eaSViresh Kumar /*
1696e28867eaSViresh Kumar  * In case platform wants some specific frequency to be configured
1697e28867eaSViresh Kumar  * during suspend..
169842d4dc3fSBenjamin Herrenschmidt  */
1699e28867eaSViresh Kumar int cpufreq_generic_suspend(struct cpufreq_policy *policy)
170042d4dc3fSBenjamin Herrenschmidt {
1701e28867eaSViresh Kumar 	int ret;
17024bc5d341SDave Jones 
1703e28867eaSViresh Kumar 	if (!policy->suspend_freq) {
1704e28867eaSViresh Kumar 		pr_err("%s: suspend_freq can't be zero\n", __func__);
1705e28867eaSViresh Kumar 		return -EINVAL;
170642d4dc3fSBenjamin Herrenschmidt 	}
170742d4dc3fSBenjamin Herrenschmidt 
1708e28867eaSViresh Kumar 	pr_debug("%s: Setting suspend-freq: %u\n", __func__,
1709e28867eaSViresh Kumar 			policy->suspend_freq);
1710e28867eaSViresh Kumar 
1711e28867eaSViresh Kumar 	ret = __cpufreq_driver_target(policy, policy->suspend_freq,
1712e28867eaSViresh Kumar 			CPUFREQ_RELATION_H);
1713e28867eaSViresh Kumar 	if (ret)
1714e28867eaSViresh Kumar 		pr_err("%s: unable to set suspend-freq: %u. err: %d\n",
1715e28867eaSViresh Kumar 				__func__, policy->suspend_freq, ret);
1716e28867eaSViresh Kumar 
1717c9060494SDave Jones 	return ret;
171842d4dc3fSBenjamin Herrenschmidt }
1719e28867eaSViresh Kumar EXPORT_SYMBOL(cpufreq_generic_suspend);
172042d4dc3fSBenjamin Herrenschmidt 
172142d4dc3fSBenjamin Herrenschmidt /**
17222f0aea93SViresh Kumar  * cpufreq_suspend() - Suspend CPUFreq governors
17231da177e4SLinus Torvalds  *
17242f0aea93SViresh Kumar  * Called during system wide Suspend/Hibernate cycles for suspending governors
17252f0aea93SViresh Kumar  * as some platforms can't change frequency after this point in suspend cycle.
17262f0aea93SViresh Kumar  * Because some of the devices (like: i2c, regulators, etc) they use for
17272f0aea93SViresh Kumar  * changing frequency are suspended quickly after this point.
17281da177e4SLinus Torvalds  */
17292f0aea93SViresh Kumar void cpufreq_suspend(void)
17301da177e4SLinus Torvalds {
17313a3e9e06SViresh Kumar 	struct cpufreq_policy *policy;
17321da177e4SLinus Torvalds 
17332f0aea93SViresh Kumar 	if (!cpufreq_driver)
1734e00e56dfSRafael J. Wysocki 		return;
17351da177e4SLinus Torvalds 
17362f0aea93SViresh Kumar 	if (!has_target())
1737b1b12babSViresh Kumar 		goto suspend;
17381da177e4SLinus Torvalds 
17392f0aea93SViresh Kumar 	pr_debug("%s: Suspending Governors\n", __func__);
17402f0aea93SViresh Kumar 
1741f963735aSViresh Kumar 	for_each_active_policy(policy) {
17422f0aea93SViresh Kumar 		if (__cpufreq_governor(policy, CPUFREQ_GOV_STOP))
17432f0aea93SViresh Kumar 			pr_err("%s: Failed to stop governor for policy: %p\n",
17442f0aea93SViresh Kumar 				__func__, policy);
17452f0aea93SViresh Kumar 		else if (cpufreq_driver->suspend
17462f0aea93SViresh Kumar 		    && cpufreq_driver->suspend(policy))
17472f0aea93SViresh Kumar 			pr_err("%s: Failed to suspend driver: %p\n", __func__,
17482f0aea93SViresh Kumar 				policy);
17491da177e4SLinus Torvalds 	}
1750b1b12babSViresh Kumar 
1751b1b12babSViresh Kumar suspend:
1752b1b12babSViresh Kumar 	cpufreq_suspended = true;
17531da177e4SLinus Torvalds }
17541da177e4SLinus Torvalds 
17551da177e4SLinus Torvalds /**
17562f0aea93SViresh Kumar  * cpufreq_resume() - Resume CPUFreq governors
17571da177e4SLinus Torvalds  *
17582f0aea93SViresh Kumar  * Called during system wide Suspend/Hibernate cycle for resuming governors that
17592f0aea93SViresh Kumar  * are suspended with cpufreq_suspend().
17601da177e4SLinus Torvalds  */
17612f0aea93SViresh Kumar void cpufreq_resume(void)
17621da177e4SLinus Torvalds {
17631da177e4SLinus Torvalds 	struct cpufreq_policy *policy;
17641da177e4SLinus Torvalds 
17652f0aea93SViresh Kumar 	if (!cpufreq_driver)
17661da177e4SLinus Torvalds 		return;
17671da177e4SLinus Torvalds 
17688e30444eSLan Tianyu 	cpufreq_suspended = false;
17698e30444eSLan Tianyu 
17702f0aea93SViresh Kumar 	if (!has_target())
17712f0aea93SViresh Kumar 		return;
17721da177e4SLinus Torvalds 
17732f0aea93SViresh Kumar 	pr_debug("%s: Resuming Governors\n", __func__);
17742f0aea93SViresh Kumar 
1775f963735aSViresh Kumar 	for_each_active_policy(policy) {
17760c5aa405SViresh Kumar 		if (cpufreq_driver->resume && cpufreq_driver->resume(policy))
17770c5aa405SViresh Kumar 			pr_err("%s: Failed to resume driver: %p\n", __func__,
17780c5aa405SViresh Kumar 				policy);
17790c5aa405SViresh Kumar 		else if (__cpufreq_governor(policy, CPUFREQ_GOV_START)
17802f0aea93SViresh Kumar 		    || __cpufreq_governor(policy, CPUFREQ_GOV_LIMITS))
17812f0aea93SViresh Kumar 			pr_err("%s: Failed to start governor for policy: %p\n",
17822f0aea93SViresh Kumar 				__func__, policy);
1783c75de0acSViresh Kumar 	}
17842f0aea93SViresh Kumar 
17852f0aea93SViresh Kumar 	/*
1786c75de0acSViresh Kumar 	 * schedule call cpufreq_update_policy() for first-online CPU, as that
1787c75de0acSViresh Kumar 	 * wouldn't be hotplugged-out on suspend. It will verify that the
1788c75de0acSViresh Kumar 	 * current freq is in sync with what we believe it to be.
17892f0aea93SViresh Kumar 	 */
1790c75de0acSViresh Kumar 	policy = cpufreq_cpu_get_raw(cpumask_first(cpu_online_mask));
1791c75de0acSViresh Kumar 	if (WARN_ON(!policy))
1792c75de0acSViresh Kumar 		return;
1793c75de0acSViresh Kumar 
17943a3e9e06SViresh Kumar 	schedule_work(&policy->update);
17951da177e4SLinus Torvalds }
17961da177e4SLinus Torvalds 
17979d95046eSBorislav Petkov /**
17989d95046eSBorislav Petkov  *	cpufreq_get_current_driver - return current driver's name
17999d95046eSBorislav Petkov  *
18009d95046eSBorislav Petkov  *	Return the name string of the currently loaded cpufreq driver
18019d95046eSBorislav Petkov  *	or NULL, if none.
18029d95046eSBorislav Petkov  */
18039d95046eSBorislav Petkov const char *cpufreq_get_current_driver(void)
18049d95046eSBorislav Petkov {
18051c3d85ddSRafael J. Wysocki 	if (cpufreq_driver)
18061c3d85ddSRafael J. Wysocki 		return cpufreq_driver->name;
18071c3d85ddSRafael J. Wysocki 
18081c3d85ddSRafael J. Wysocki 	return NULL;
18099d95046eSBorislav Petkov }
18109d95046eSBorislav Petkov EXPORT_SYMBOL_GPL(cpufreq_get_current_driver);
18111da177e4SLinus Torvalds 
181251315cdfSThomas Petazzoni /**
181351315cdfSThomas Petazzoni  *	cpufreq_get_driver_data - return current driver data
181451315cdfSThomas Petazzoni  *
181551315cdfSThomas Petazzoni  *	Return the private data of the currently loaded cpufreq
181651315cdfSThomas Petazzoni  *	driver, or NULL if no cpufreq driver is loaded.
181751315cdfSThomas Petazzoni  */
181851315cdfSThomas Petazzoni void *cpufreq_get_driver_data(void)
181951315cdfSThomas Petazzoni {
182051315cdfSThomas Petazzoni 	if (cpufreq_driver)
182151315cdfSThomas Petazzoni 		return cpufreq_driver->driver_data;
182251315cdfSThomas Petazzoni 
182351315cdfSThomas Petazzoni 	return NULL;
182451315cdfSThomas Petazzoni }
182551315cdfSThomas Petazzoni EXPORT_SYMBOL_GPL(cpufreq_get_driver_data);
182651315cdfSThomas Petazzoni 
18271da177e4SLinus Torvalds /*********************************************************************
18281da177e4SLinus Torvalds  *                     NOTIFIER LISTS INTERFACE                      *
18291da177e4SLinus Torvalds  *********************************************************************/
18301da177e4SLinus Torvalds 
18311da177e4SLinus Torvalds /**
18321da177e4SLinus Torvalds  *	cpufreq_register_notifier - register a driver with cpufreq
18331da177e4SLinus Torvalds  *	@nb: notifier function to register
18341da177e4SLinus Torvalds  *      @list: CPUFREQ_TRANSITION_NOTIFIER or CPUFREQ_POLICY_NOTIFIER
18351da177e4SLinus Torvalds  *
18361da177e4SLinus Torvalds  *	Add a driver to one of two lists: either a list of drivers that
18371da177e4SLinus Torvalds  *      are notified about clock rate changes (once before and once after
18381da177e4SLinus Torvalds  *      the transition), or a list of drivers that are notified about
18391da177e4SLinus Torvalds  *      changes in cpufreq policy.
18401da177e4SLinus Torvalds  *
18411da177e4SLinus Torvalds  *	This function may sleep, and has the same return conditions as
1842e041c683SAlan Stern  *	blocking_notifier_chain_register.
18431da177e4SLinus Torvalds  */
18441da177e4SLinus Torvalds int cpufreq_register_notifier(struct notifier_block *nb, unsigned int list)
18451da177e4SLinus Torvalds {
18461da177e4SLinus Torvalds 	int ret;
18471da177e4SLinus Torvalds 
1848d5aaffa9SDirk Brandewie 	if (cpufreq_disabled())
1849d5aaffa9SDirk Brandewie 		return -EINVAL;
1850d5aaffa9SDirk Brandewie 
185174212ca4SCesar Eduardo Barros 	WARN_ON(!init_cpufreq_transition_notifier_list_called);
185274212ca4SCesar Eduardo Barros 
18531da177e4SLinus Torvalds 	switch (list) {
18541da177e4SLinus Torvalds 	case CPUFREQ_TRANSITION_NOTIFIER:
1855b4dfdbb3SAlan Stern 		ret = srcu_notifier_chain_register(
1856e041c683SAlan Stern 				&cpufreq_transition_notifier_list, nb);
18571da177e4SLinus Torvalds 		break;
18581da177e4SLinus Torvalds 	case CPUFREQ_POLICY_NOTIFIER:
1859e041c683SAlan Stern 		ret = blocking_notifier_chain_register(
1860e041c683SAlan Stern 				&cpufreq_policy_notifier_list, nb);
18611da177e4SLinus Torvalds 		break;
18621da177e4SLinus Torvalds 	default:
18631da177e4SLinus Torvalds 		ret = -EINVAL;
18641da177e4SLinus Torvalds 	}
18651da177e4SLinus Torvalds 
18661da177e4SLinus Torvalds 	return ret;
18671da177e4SLinus Torvalds }
18681da177e4SLinus Torvalds EXPORT_SYMBOL(cpufreq_register_notifier);
18691da177e4SLinus Torvalds 
18701da177e4SLinus Torvalds /**
18711da177e4SLinus Torvalds  *	cpufreq_unregister_notifier - unregister a driver with cpufreq
18721da177e4SLinus Torvalds  *	@nb: notifier block to be unregistered
18731da177e4SLinus Torvalds  *	@list: CPUFREQ_TRANSITION_NOTIFIER or CPUFREQ_POLICY_NOTIFIER
18741da177e4SLinus Torvalds  *
18751da177e4SLinus Torvalds  *	Remove a driver from the CPU frequency notifier list.
18761da177e4SLinus Torvalds  *
18771da177e4SLinus Torvalds  *	This function may sleep, and has the same return conditions as
1878e041c683SAlan Stern  *	blocking_notifier_chain_unregister.
18791da177e4SLinus Torvalds  */
18801da177e4SLinus Torvalds int cpufreq_unregister_notifier(struct notifier_block *nb, unsigned int list)
18811da177e4SLinus Torvalds {
18821da177e4SLinus Torvalds 	int ret;
18831da177e4SLinus Torvalds 
1884d5aaffa9SDirk Brandewie 	if (cpufreq_disabled())
1885d5aaffa9SDirk Brandewie 		return -EINVAL;
1886d5aaffa9SDirk Brandewie 
18871da177e4SLinus Torvalds 	switch (list) {
18881da177e4SLinus Torvalds 	case CPUFREQ_TRANSITION_NOTIFIER:
1889b4dfdbb3SAlan Stern 		ret = srcu_notifier_chain_unregister(
1890e041c683SAlan Stern 				&cpufreq_transition_notifier_list, nb);
18911da177e4SLinus Torvalds 		break;
18921da177e4SLinus Torvalds 	case CPUFREQ_POLICY_NOTIFIER:
1893e041c683SAlan Stern 		ret = blocking_notifier_chain_unregister(
1894e041c683SAlan Stern 				&cpufreq_policy_notifier_list, nb);
18951da177e4SLinus Torvalds 		break;
18961da177e4SLinus Torvalds 	default:
18971da177e4SLinus Torvalds 		ret = -EINVAL;
18981da177e4SLinus Torvalds 	}
18991da177e4SLinus Torvalds 
19001da177e4SLinus Torvalds 	return ret;
19011da177e4SLinus Torvalds }
19021da177e4SLinus Torvalds EXPORT_SYMBOL(cpufreq_unregister_notifier);
19031da177e4SLinus Torvalds 
19041da177e4SLinus Torvalds 
19051da177e4SLinus Torvalds /*********************************************************************
19061da177e4SLinus Torvalds  *                              GOVERNORS                            *
19071da177e4SLinus Torvalds  *********************************************************************/
19081da177e4SLinus Torvalds 
19091c03a2d0SViresh Kumar /* Must set freqs->new to intermediate frequency */
19101c03a2d0SViresh Kumar static int __target_intermediate(struct cpufreq_policy *policy,
19111c03a2d0SViresh Kumar 				 struct cpufreq_freqs *freqs, int index)
19121c03a2d0SViresh Kumar {
19131c03a2d0SViresh Kumar 	int ret;
19141c03a2d0SViresh Kumar 
19151c03a2d0SViresh Kumar 	freqs->new = cpufreq_driver->get_intermediate(policy, index);
19161c03a2d0SViresh Kumar 
19171c03a2d0SViresh Kumar 	/* We don't need to switch to intermediate freq */
19181c03a2d0SViresh Kumar 	if (!freqs->new)
19191c03a2d0SViresh Kumar 		return 0;
19201c03a2d0SViresh Kumar 
19211c03a2d0SViresh Kumar 	pr_debug("%s: cpu: %d, switching to intermediate freq: oldfreq: %u, intermediate freq: %u\n",
19221c03a2d0SViresh Kumar 		 __func__, policy->cpu, freqs->old, freqs->new);
19231c03a2d0SViresh Kumar 
19241c03a2d0SViresh Kumar 	cpufreq_freq_transition_begin(policy, freqs);
19251c03a2d0SViresh Kumar 	ret = cpufreq_driver->target_intermediate(policy, index);
19261c03a2d0SViresh Kumar 	cpufreq_freq_transition_end(policy, freqs, ret);
19271c03a2d0SViresh Kumar 
19281c03a2d0SViresh Kumar 	if (ret)
19291c03a2d0SViresh Kumar 		pr_err("%s: Failed to change to intermediate frequency: %d\n",
19301c03a2d0SViresh Kumar 		       __func__, ret);
19311c03a2d0SViresh Kumar 
19321c03a2d0SViresh Kumar 	return ret;
19331c03a2d0SViresh Kumar }
19341c03a2d0SViresh Kumar 
19358d65775dSViresh Kumar static int __target_index(struct cpufreq_policy *policy,
19368d65775dSViresh Kumar 			  struct cpufreq_frequency_table *freq_table, int index)
19378d65775dSViresh Kumar {
19381c03a2d0SViresh Kumar 	struct cpufreq_freqs freqs = {.old = policy->cur, .flags = 0};
19391c03a2d0SViresh Kumar 	unsigned int intermediate_freq = 0;
19408d65775dSViresh Kumar 	int retval = -EINVAL;
19418d65775dSViresh Kumar 	bool notify;
19428d65775dSViresh Kumar 
19438d65775dSViresh Kumar 	notify = !(cpufreq_driver->flags & CPUFREQ_ASYNC_NOTIFICATION);
19448d65775dSViresh Kumar 	if (notify) {
19451c03a2d0SViresh Kumar 		/* Handle switching to intermediate frequency */
19461c03a2d0SViresh Kumar 		if (cpufreq_driver->get_intermediate) {
19471c03a2d0SViresh Kumar 			retval = __target_intermediate(policy, &freqs, index);
19481c03a2d0SViresh Kumar 			if (retval)
19491c03a2d0SViresh Kumar 				return retval;
19508d65775dSViresh Kumar 
19511c03a2d0SViresh Kumar 			intermediate_freq = freqs.new;
19521c03a2d0SViresh Kumar 			/* Set old freq to intermediate */
19531c03a2d0SViresh Kumar 			if (intermediate_freq)
19541c03a2d0SViresh Kumar 				freqs.old = freqs.new;
19551c03a2d0SViresh Kumar 		}
19561c03a2d0SViresh Kumar 
19571c03a2d0SViresh Kumar 		freqs.new = freq_table[index].frequency;
19588d65775dSViresh Kumar 		pr_debug("%s: cpu: %d, oldfreq: %u, new freq: %u\n",
19598d65775dSViresh Kumar 			 __func__, policy->cpu, freqs.old, freqs.new);
19608d65775dSViresh Kumar 
19618d65775dSViresh Kumar 		cpufreq_freq_transition_begin(policy, &freqs);
19628d65775dSViresh Kumar 	}
19638d65775dSViresh Kumar 
19648d65775dSViresh Kumar 	retval = cpufreq_driver->target_index(policy, index);
19658d65775dSViresh Kumar 	if (retval)
19668d65775dSViresh Kumar 		pr_err("%s: Failed to change cpu frequency: %d\n", __func__,
19678d65775dSViresh Kumar 		       retval);
19688d65775dSViresh Kumar 
19691c03a2d0SViresh Kumar 	if (notify) {
19708d65775dSViresh Kumar 		cpufreq_freq_transition_end(policy, &freqs, retval);
19718d65775dSViresh Kumar 
19721c03a2d0SViresh Kumar 		/*
19731c03a2d0SViresh Kumar 		 * Failed after setting to intermediate freq? Driver should have
19741c03a2d0SViresh Kumar 		 * reverted back to initial frequency and so should we. Check
19751c03a2d0SViresh Kumar 		 * here for intermediate_freq instead of get_intermediate, in
197658405af6SShailendra Verma 		 * case we haven't switched to intermediate freq at all.
19771c03a2d0SViresh Kumar 		 */
19781c03a2d0SViresh Kumar 		if (unlikely(retval && intermediate_freq)) {
19791c03a2d0SViresh Kumar 			freqs.old = intermediate_freq;
19801c03a2d0SViresh Kumar 			freqs.new = policy->restore_freq;
19811c03a2d0SViresh Kumar 			cpufreq_freq_transition_begin(policy, &freqs);
19821c03a2d0SViresh Kumar 			cpufreq_freq_transition_end(policy, &freqs, 0);
19831c03a2d0SViresh Kumar 		}
19841c03a2d0SViresh Kumar 	}
19851c03a2d0SViresh Kumar 
19868d65775dSViresh Kumar 	return retval;
19878d65775dSViresh Kumar }
19888d65775dSViresh Kumar 
19891da177e4SLinus Torvalds int __cpufreq_driver_target(struct cpufreq_policy *policy,
19901da177e4SLinus Torvalds 			    unsigned int target_freq,
19911da177e4SLinus Torvalds 			    unsigned int relation)
19921da177e4SLinus Torvalds {
19937249924eSViresh Kumar 	unsigned int old_target_freq = target_freq;
19948d65775dSViresh Kumar 	int retval = -EINVAL;
1995c32b6b8eSAshok Raj 
1996a7b422cdSKonrad Rzeszutek Wilk 	if (cpufreq_disabled())
1997a7b422cdSKonrad Rzeszutek Wilk 		return -ENODEV;
1998a7b422cdSKonrad Rzeszutek Wilk 
19997249924eSViresh Kumar 	/* Make sure that target_freq is within supported range */
20007249924eSViresh Kumar 	if (target_freq > policy->max)
20017249924eSViresh Kumar 		target_freq = policy->max;
20027249924eSViresh Kumar 	if (target_freq < policy->min)
20037249924eSViresh Kumar 		target_freq = policy->min;
20047249924eSViresh Kumar 
20057249924eSViresh Kumar 	pr_debug("target for CPU %u: %u kHz, relation %u, requested %u kHz\n",
20067249924eSViresh Kumar 		 policy->cpu, target_freq, relation, old_target_freq);
20075a1c0228SViresh Kumar 
20089c0ebcf7SViresh Kumar 	/*
20099c0ebcf7SViresh Kumar 	 * This might look like a redundant call as we are checking it again
20109c0ebcf7SViresh Kumar 	 * after finding index. But it is left intentionally for cases where
20119c0ebcf7SViresh Kumar 	 * exactly same freq is called again and so we can save on few function
20129c0ebcf7SViresh Kumar 	 * calls.
20139c0ebcf7SViresh Kumar 	 */
20145a1c0228SViresh Kumar 	if (target_freq == policy->cur)
20155a1c0228SViresh Kumar 		return 0;
20165a1c0228SViresh Kumar 
20171c03a2d0SViresh Kumar 	/* Save last value to restore later on errors */
20181c03a2d0SViresh Kumar 	policy->restore_freq = policy->cur;
20191c03a2d0SViresh Kumar 
20201c3d85ddSRafael J. Wysocki 	if (cpufreq_driver->target)
20211c3d85ddSRafael J. Wysocki 		retval = cpufreq_driver->target(policy, target_freq, relation);
20229c0ebcf7SViresh Kumar 	else if (cpufreq_driver->target_index) {
20239c0ebcf7SViresh Kumar 		struct cpufreq_frequency_table *freq_table;
20249c0ebcf7SViresh Kumar 		int index;
202590d45d17SAshok Raj 
20269c0ebcf7SViresh Kumar 		freq_table = cpufreq_frequency_get_table(policy->cpu);
20279c0ebcf7SViresh Kumar 		if (unlikely(!freq_table)) {
20289c0ebcf7SViresh Kumar 			pr_err("%s: Unable to find freq_table\n", __func__);
20299c0ebcf7SViresh Kumar 			goto out;
20309c0ebcf7SViresh Kumar 		}
20319c0ebcf7SViresh Kumar 
20329c0ebcf7SViresh Kumar 		retval = cpufreq_frequency_table_target(policy, freq_table,
20339c0ebcf7SViresh Kumar 				target_freq, relation, &index);
20349c0ebcf7SViresh Kumar 		if (unlikely(retval)) {
20359c0ebcf7SViresh Kumar 			pr_err("%s: Unable to find matching freq\n", __func__);
20369c0ebcf7SViresh Kumar 			goto out;
20379c0ebcf7SViresh Kumar 		}
20389c0ebcf7SViresh Kumar 
2039d4019f0aSViresh Kumar 		if (freq_table[index].frequency == policy->cur) {
20409c0ebcf7SViresh Kumar 			retval = 0;
2041d4019f0aSViresh Kumar 			goto out;
2042d4019f0aSViresh Kumar 		}
2043d4019f0aSViresh Kumar 
20448d65775dSViresh Kumar 		retval = __target_index(policy, freq_table, index);
20459c0ebcf7SViresh Kumar 	}
20469c0ebcf7SViresh Kumar 
20479c0ebcf7SViresh Kumar out:
20481da177e4SLinus Torvalds 	return retval;
20491da177e4SLinus Torvalds }
20501da177e4SLinus Torvalds EXPORT_SYMBOL_GPL(__cpufreq_driver_target);
20511da177e4SLinus Torvalds 
20521da177e4SLinus Torvalds int cpufreq_driver_target(struct cpufreq_policy *policy,
20531da177e4SLinus Torvalds 			  unsigned int target_freq,
20541da177e4SLinus Torvalds 			  unsigned int relation)
20551da177e4SLinus Torvalds {
2056f1829e4aSJulia Lawall 	int ret = -EINVAL;
20571da177e4SLinus Torvalds 
2058ad7722daSviresh kumar 	down_write(&policy->rwsem);
20591da177e4SLinus Torvalds 
20601da177e4SLinus Torvalds 	ret = __cpufreq_driver_target(policy, target_freq, relation);
20611da177e4SLinus Torvalds 
2062ad7722daSviresh kumar 	up_write(&policy->rwsem);
20631da177e4SLinus Torvalds 
20641da177e4SLinus Torvalds 	return ret;
20651da177e4SLinus Torvalds }
20661da177e4SLinus Torvalds EXPORT_SYMBOL_GPL(cpufreq_driver_target);
20671da177e4SLinus Torvalds 
2068e08f5f5bSGautham R Shenoy static int __cpufreq_governor(struct cpufreq_policy *policy,
2069e08f5f5bSGautham R Shenoy 					unsigned int event)
20701da177e4SLinus Torvalds {
2071cc993cabSDave Jones 	int ret;
20726afde10cSThomas Renninger 
20736afde10cSThomas Renninger 	/* Only must be defined when default governor is known to have latency
20746afde10cSThomas Renninger 	   restrictions, like e.g. conservative or ondemand.
20756afde10cSThomas Renninger 	   That this is the case is already ensured in Kconfig
20766afde10cSThomas Renninger 	*/
20776afde10cSThomas Renninger #ifdef CONFIG_CPU_FREQ_GOV_PERFORMANCE
20786afde10cSThomas Renninger 	struct cpufreq_governor *gov = &cpufreq_gov_performance;
20796afde10cSThomas Renninger #else
20806afde10cSThomas Renninger 	struct cpufreq_governor *gov = NULL;
20816afde10cSThomas Renninger #endif
20821c256245SThomas Renninger 
20832f0aea93SViresh Kumar 	/* Don't start any governor operations if we are entering suspend */
20842f0aea93SViresh Kumar 	if (cpufreq_suspended)
20852f0aea93SViresh Kumar 		return 0;
2086cb57720bSEthan Zhao 	/*
2087cb57720bSEthan Zhao 	 * Governor might not be initiated here if ACPI _PPC changed
2088cb57720bSEthan Zhao 	 * notification happened, so check it.
2089cb57720bSEthan Zhao 	 */
2090cb57720bSEthan Zhao 	if (!policy->governor)
2091cb57720bSEthan Zhao 		return -EINVAL;
20922f0aea93SViresh Kumar 
20931c256245SThomas Renninger 	if (policy->governor->max_transition_latency &&
20941c256245SThomas Renninger 	    policy->cpuinfo.transition_latency >
20951c256245SThomas Renninger 	    policy->governor->max_transition_latency) {
20966afde10cSThomas Renninger 		if (!gov)
20976afde10cSThomas Renninger 			return -EINVAL;
20986afde10cSThomas Renninger 		else {
2099e837f9b5SJoe Perches 			pr_warn("%s governor failed, too long transition latency of HW, fallback to %s governor\n",
2100e837f9b5SJoe Perches 				policy->governor->name, gov->name);
21011c256245SThomas Renninger 			policy->governor = gov;
21021c256245SThomas Renninger 		}
21036afde10cSThomas Renninger 	}
21041da177e4SLinus Torvalds 
2105fe492f3fSViresh Kumar 	if (event == CPUFREQ_GOV_POLICY_INIT)
21061da177e4SLinus Torvalds 		if (!try_module_get(policy->governor->owner))
21071da177e4SLinus Torvalds 			return -EINVAL;
21081da177e4SLinus Torvalds 
21092d06d8c4SDominik Brodowski 	pr_debug("__cpufreq_governor for CPU %u, event %u\n",
2110e08f5f5bSGautham R Shenoy 		 policy->cpu, event);
211195731ebbSXiaoguang Chen 
211295731ebbSXiaoguang Chen 	mutex_lock(&cpufreq_governor_lock);
211356d07db2SSrivatsa S. Bhat 	if ((policy->governor_enabled && event == CPUFREQ_GOV_START)
2114f73d3933SViresh Kumar 	    || (!policy->governor_enabled
2115f73d3933SViresh Kumar 	    && (event == CPUFREQ_GOV_LIMITS || event == CPUFREQ_GOV_STOP))) {
211695731ebbSXiaoguang Chen 		mutex_unlock(&cpufreq_governor_lock);
211795731ebbSXiaoguang Chen 		return -EBUSY;
211895731ebbSXiaoguang Chen 	}
211995731ebbSXiaoguang Chen 
212095731ebbSXiaoguang Chen 	if (event == CPUFREQ_GOV_STOP)
212195731ebbSXiaoguang Chen 		policy->governor_enabled = false;
212295731ebbSXiaoguang Chen 	else if (event == CPUFREQ_GOV_START)
212395731ebbSXiaoguang Chen 		policy->governor_enabled = true;
212495731ebbSXiaoguang Chen 
212595731ebbSXiaoguang Chen 	mutex_unlock(&cpufreq_governor_lock);
212695731ebbSXiaoguang Chen 
21271da177e4SLinus Torvalds 	ret = policy->governor->governor(policy, event);
21281da177e4SLinus Torvalds 
21294d5dcc42SViresh Kumar 	if (!ret) {
21304d5dcc42SViresh Kumar 		if (event == CPUFREQ_GOV_POLICY_INIT)
21318e53695fSViresh Kumar 			policy->governor->initialized++;
21324d5dcc42SViresh Kumar 		else if (event == CPUFREQ_GOV_POLICY_EXIT)
21338e53695fSViresh Kumar 			policy->governor->initialized--;
213495731ebbSXiaoguang Chen 	} else {
213595731ebbSXiaoguang Chen 		/* Restore original values */
213695731ebbSXiaoguang Chen 		mutex_lock(&cpufreq_governor_lock);
213795731ebbSXiaoguang Chen 		if (event == CPUFREQ_GOV_STOP)
213895731ebbSXiaoguang Chen 			policy->governor_enabled = true;
213995731ebbSXiaoguang Chen 		else if (event == CPUFREQ_GOV_START)
214095731ebbSXiaoguang Chen 			policy->governor_enabled = false;
214195731ebbSXiaoguang Chen 		mutex_unlock(&cpufreq_governor_lock);
21424d5dcc42SViresh Kumar 	}
2143b394058fSViresh Kumar 
2144fe492f3fSViresh Kumar 	if (((event == CPUFREQ_GOV_POLICY_INIT) && ret) ||
2145fe492f3fSViresh Kumar 			((event == CPUFREQ_GOV_POLICY_EXIT) && !ret))
21461da177e4SLinus Torvalds 		module_put(policy->governor->owner);
21471da177e4SLinus Torvalds 
21481da177e4SLinus Torvalds 	return ret;
21491da177e4SLinus Torvalds }
21501da177e4SLinus Torvalds 
21511da177e4SLinus Torvalds int cpufreq_register_governor(struct cpufreq_governor *governor)
21521da177e4SLinus Torvalds {
21533bcb09a3SJeremy Fitzhardinge 	int err;
21541da177e4SLinus Torvalds 
21551da177e4SLinus Torvalds 	if (!governor)
21561da177e4SLinus Torvalds 		return -EINVAL;
21571da177e4SLinus Torvalds 
2158a7b422cdSKonrad Rzeszutek Wilk 	if (cpufreq_disabled())
2159a7b422cdSKonrad Rzeszutek Wilk 		return -ENODEV;
2160a7b422cdSKonrad Rzeszutek Wilk 
21613fc54d37Sakpm@osdl.org 	mutex_lock(&cpufreq_governor_mutex);
21621da177e4SLinus Torvalds 
2163b394058fSViresh Kumar 	governor->initialized = 0;
21643bcb09a3SJeremy Fitzhardinge 	err = -EBUSY;
216542f91fa1SViresh Kumar 	if (!find_governor(governor->name)) {
21663bcb09a3SJeremy Fitzhardinge 		err = 0;
21671da177e4SLinus Torvalds 		list_add(&governor->governor_list, &cpufreq_governor_list);
21683bcb09a3SJeremy Fitzhardinge 	}
21691da177e4SLinus Torvalds 
21703fc54d37Sakpm@osdl.org 	mutex_unlock(&cpufreq_governor_mutex);
21713bcb09a3SJeremy Fitzhardinge 	return err;
21721da177e4SLinus Torvalds }
21731da177e4SLinus Torvalds EXPORT_SYMBOL_GPL(cpufreq_register_governor);
21741da177e4SLinus Torvalds 
21751da177e4SLinus Torvalds void cpufreq_unregister_governor(struct cpufreq_governor *governor)
21761da177e4SLinus Torvalds {
21774573237bSViresh Kumar 	struct cpufreq_policy *policy;
21784573237bSViresh Kumar 	unsigned long flags;
217990e41bacSPrarit Bhargava 
21801da177e4SLinus Torvalds 	if (!governor)
21811da177e4SLinus Torvalds 		return;
21821da177e4SLinus Torvalds 
2183a7b422cdSKonrad Rzeszutek Wilk 	if (cpufreq_disabled())
2184a7b422cdSKonrad Rzeszutek Wilk 		return;
2185a7b422cdSKonrad Rzeszutek Wilk 
21864573237bSViresh Kumar 	/* clear last_governor for all inactive policies */
21874573237bSViresh Kumar 	read_lock_irqsave(&cpufreq_driver_lock, flags);
21884573237bSViresh Kumar 	for_each_inactive_policy(policy) {
218918bf3a12SViresh Kumar 		if (!strcmp(policy->last_governor, governor->name)) {
219018bf3a12SViresh Kumar 			policy->governor = NULL;
21914573237bSViresh Kumar 			strcpy(policy->last_governor, "\0");
219290e41bacSPrarit Bhargava 		}
219318bf3a12SViresh Kumar 	}
21944573237bSViresh Kumar 	read_unlock_irqrestore(&cpufreq_driver_lock, flags);
219590e41bacSPrarit Bhargava 
21963fc54d37Sakpm@osdl.org 	mutex_lock(&cpufreq_governor_mutex);
21971da177e4SLinus Torvalds 	list_del(&governor->governor_list);
21983fc54d37Sakpm@osdl.org 	mutex_unlock(&cpufreq_governor_mutex);
21991da177e4SLinus Torvalds 	return;
22001da177e4SLinus Torvalds }
22011da177e4SLinus Torvalds EXPORT_SYMBOL_GPL(cpufreq_unregister_governor);
22021da177e4SLinus Torvalds 
22031da177e4SLinus Torvalds 
22041da177e4SLinus Torvalds /*********************************************************************
22051da177e4SLinus Torvalds  *                          POLICY INTERFACE                         *
22061da177e4SLinus Torvalds  *********************************************************************/
22071da177e4SLinus Torvalds 
22081da177e4SLinus Torvalds /**
22091da177e4SLinus Torvalds  * cpufreq_get_policy - get the current cpufreq_policy
221029464f28SDave Jones  * @policy: struct cpufreq_policy into which the current cpufreq_policy
221129464f28SDave Jones  *	is written
22121da177e4SLinus Torvalds  *
22131da177e4SLinus Torvalds  * Reads the current cpufreq policy.
22141da177e4SLinus Torvalds  */
22151da177e4SLinus Torvalds int cpufreq_get_policy(struct cpufreq_policy *policy, unsigned int cpu)
22161da177e4SLinus Torvalds {
22171da177e4SLinus Torvalds 	struct cpufreq_policy *cpu_policy;
22181da177e4SLinus Torvalds 	if (!policy)
22191da177e4SLinus Torvalds 		return -EINVAL;
22201da177e4SLinus Torvalds 
22211da177e4SLinus Torvalds 	cpu_policy = cpufreq_cpu_get(cpu);
22221da177e4SLinus Torvalds 	if (!cpu_policy)
22231da177e4SLinus Torvalds 		return -EINVAL;
22241da177e4SLinus Torvalds 
2225d5b73cd8SViresh Kumar 	memcpy(policy, cpu_policy, sizeof(*policy));
22261da177e4SLinus Torvalds 
22271da177e4SLinus Torvalds 	cpufreq_cpu_put(cpu_policy);
22281da177e4SLinus Torvalds 	return 0;
22291da177e4SLinus Torvalds }
22301da177e4SLinus Torvalds EXPORT_SYMBOL(cpufreq_get_policy);
22311da177e4SLinus Torvalds 
2232153d7f3fSArjan van de Ven /*
2233037ce839SViresh Kumar  * policy : current policy.
2234037ce839SViresh Kumar  * new_policy: policy to be set.
2235153d7f3fSArjan van de Ven  */
2236037ce839SViresh Kumar static int cpufreq_set_policy(struct cpufreq_policy *policy,
22373a3e9e06SViresh Kumar 				struct cpufreq_policy *new_policy)
22381da177e4SLinus Torvalds {
2239d9a789c7SRafael J. Wysocki 	struct cpufreq_governor *old_gov;
2240d9a789c7SRafael J. Wysocki 	int ret;
22411da177e4SLinus Torvalds 
2242e837f9b5SJoe Perches 	pr_debug("setting new policy for CPU %u: %u - %u kHz\n",
2243e837f9b5SJoe Perches 		 new_policy->cpu, new_policy->min, new_policy->max);
22441da177e4SLinus Torvalds 
2245d5b73cd8SViresh Kumar 	memcpy(&new_policy->cpuinfo, &policy->cpuinfo, sizeof(policy->cpuinfo));
22461da177e4SLinus Torvalds 
2247d9a789c7SRafael J. Wysocki 	if (new_policy->min > policy->max || new_policy->max < policy->min)
2248d9a789c7SRafael J. Wysocki 		return -EINVAL;
22499c9a43edSMattia Dongili 
22501da177e4SLinus Torvalds 	/* verify the cpu speed can be set within this limit */
22513a3e9e06SViresh Kumar 	ret = cpufreq_driver->verify(new_policy);
22521da177e4SLinus Torvalds 	if (ret)
2253d9a789c7SRafael J. Wysocki 		return ret;
22541da177e4SLinus Torvalds 
22551da177e4SLinus Torvalds 	/* adjust if necessary - all reasons */
2256e041c683SAlan Stern 	blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
22573a3e9e06SViresh Kumar 			CPUFREQ_ADJUST, new_policy);
22581da177e4SLinus Torvalds 
22591da177e4SLinus Torvalds 	/* adjust if necessary - hardware incompatibility*/
2260e041c683SAlan Stern 	blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
22613a3e9e06SViresh Kumar 			CPUFREQ_INCOMPATIBLE, new_policy);
22621da177e4SLinus Torvalds 
2263bb176f7dSViresh Kumar 	/*
2264bb176f7dSViresh Kumar 	 * verify the cpu speed can be set within this limit, which might be
2265bb176f7dSViresh Kumar 	 * different to the first one
2266bb176f7dSViresh Kumar 	 */
22673a3e9e06SViresh Kumar 	ret = cpufreq_driver->verify(new_policy);
2268e041c683SAlan Stern 	if (ret)
2269d9a789c7SRafael J. Wysocki 		return ret;
22701da177e4SLinus Torvalds 
22711da177e4SLinus Torvalds 	/* notification of the new policy */
2272e041c683SAlan Stern 	blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
22733a3e9e06SViresh Kumar 			CPUFREQ_NOTIFY, new_policy);
22741da177e4SLinus Torvalds 
22753a3e9e06SViresh Kumar 	policy->min = new_policy->min;
22763a3e9e06SViresh Kumar 	policy->max = new_policy->max;
22771da177e4SLinus Torvalds 
22782d06d8c4SDominik Brodowski 	pr_debug("new min and max freqs are %u - %u kHz\n",
22793a3e9e06SViresh Kumar 		 policy->min, policy->max);
22801da177e4SLinus Torvalds 
22811c3d85ddSRafael J. Wysocki 	if (cpufreq_driver->setpolicy) {
22823a3e9e06SViresh Kumar 		policy->policy = new_policy->policy;
22832d06d8c4SDominik Brodowski 		pr_debug("setting range\n");
2284d9a789c7SRafael J. Wysocki 		return cpufreq_driver->setpolicy(new_policy);
2285d9a789c7SRafael J. Wysocki 	}
2286d9a789c7SRafael J. Wysocki 
2287d9a789c7SRafael J. Wysocki 	if (new_policy->governor == policy->governor)
2288d9a789c7SRafael J. Wysocki 		goto out;
22891da177e4SLinus Torvalds 
22902d06d8c4SDominik Brodowski 	pr_debug("governor switch\n");
22911da177e4SLinus Torvalds 
2292d9a789c7SRafael J. Wysocki 	/* save old, working values */
2293d9a789c7SRafael J. Wysocki 	old_gov = policy->governor;
22941da177e4SLinus Torvalds 	/* end old governor */
2295d9a789c7SRafael J. Wysocki 	if (old_gov) {
22963a3e9e06SViresh Kumar 		__cpufreq_governor(policy, CPUFREQ_GOV_STOP);
2297ad7722daSviresh kumar 		up_write(&policy->rwsem);
2298d9a789c7SRafael J. Wysocki 		__cpufreq_governor(policy, CPUFREQ_GOV_POLICY_EXIT);
2299ad7722daSviresh kumar 		down_write(&policy->rwsem);
23007bd353a9SViresh Kumar 	}
23011da177e4SLinus Torvalds 
23021da177e4SLinus Torvalds 	/* start new governor */
23033a3e9e06SViresh Kumar 	policy->governor = new_policy->governor;
23043a3e9e06SViresh Kumar 	if (!__cpufreq_governor(policy, CPUFREQ_GOV_POLICY_INIT)) {
2305d9a789c7SRafael J. Wysocki 		if (!__cpufreq_governor(policy, CPUFREQ_GOV_START))
2306d9a789c7SRafael J. Wysocki 			goto out;
2307d9a789c7SRafael J. Wysocki 
2308ad7722daSviresh kumar 		up_write(&policy->rwsem);
2309d9a789c7SRafael J. Wysocki 		__cpufreq_governor(policy, CPUFREQ_GOV_POLICY_EXIT);
2310ad7722daSviresh kumar 		down_write(&policy->rwsem);
2311955ef483SViresh Kumar 	}
23127bd353a9SViresh Kumar 
23131da177e4SLinus Torvalds 	/* new governor failed, so re-start old one */
2314d9a789c7SRafael J. Wysocki 	pr_debug("starting governor %s failed\n", policy->governor->name);
23151da177e4SLinus Torvalds 	if (old_gov) {
23163a3e9e06SViresh Kumar 		policy->governor = old_gov;
2317d9a789c7SRafael J. Wysocki 		__cpufreq_governor(policy, CPUFREQ_GOV_POLICY_INIT);
2318d9a789c7SRafael J. Wysocki 		__cpufreq_governor(policy, CPUFREQ_GOV_START);
23191da177e4SLinus Torvalds 	}
23201da177e4SLinus Torvalds 
2321d9a789c7SRafael J. Wysocki 	return -EINVAL;
2322d9a789c7SRafael J. Wysocki 
2323d9a789c7SRafael J. Wysocki  out:
2324d9a789c7SRafael J. Wysocki 	pr_debug("governor: change or update limits\n");
2325d9a789c7SRafael J. Wysocki 	return __cpufreq_governor(policy, CPUFREQ_GOV_LIMITS);
23261da177e4SLinus Torvalds }
23271da177e4SLinus Torvalds 
23281da177e4SLinus Torvalds /**
23291da177e4SLinus Torvalds  *	cpufreq_update_policy - re-evaluate an existing cpufreq policy
23301da177e4SLinus Torvalds  *	@cpu: CPU which shall be re-evaluated
23311da177e4SLinus Torvalds  *
233225985edcSLucas De Marchi  *	Useful for policy notifiers which have different necessities
23331da177e4SLinus Torvalds  *	at different times.
23341da177e4SLinus Torvalds  */
23351da177e4SLinus Torvalds int cpufreq_update_policy(unsigned int cpu)
23361da177e4SLinus Torvalds {
23373a3e9e06SViresh Kumar 	struct cpufreq_policy *policy = cpufreq_cpu_get(cpu);
23383a3e9e06SViresh Kumar 	struct cpufreq_policy new_policy;
2339f1829e4aSJulia Lawall 	int ret;
23401da177e4SLinus Torvalds 
2341fefa8ff8SAaron Plattner 	if (!policy)
2342fefa8ff8SAaron Plattner 		return -ENODEV;
23431da177e4SLinus Torvalds 
2344ad7722daSviresh kumar 	down_write(&policy->rwsem);
23451da177e4SLinus Torvalds 
23462d06d8c4SDominik Brodowski 	pr_debug("updating policy for CPU %u\n", cpu);
2347d5b73cd8SViresh Kumar 	memcpy(&new_policy, policy, sizeof(*policy));
23483a3e9e06SViresh Kumar 	new_policy.min = policy->user_policy.min;
23493a3e9e06SViresh Kumar 	new_policy.max = policy->user_policy.max;
23503a3e9e06SViresh Kumar 	new_policy.policy = policy->user_policy.policy;
23513a3e9e06SViresh Kumar 	new_policy.governor = policy->user_policy.governor;
23521da177e4SLinus Torvalds 
2353bb176f7dSViresh Kumar 	/*
2354bb176f7dSViresh Kumar 	 * BIOS might change freq behind our back
2355bb176f7dSViresh Kumar 	 * -> ask driver for current freq and notify governors about a change
2356bb176f7dSViresh Kumar 	 */
23572ed99e39SRafael J. Wysocki 	if (cpufreq_driver->get && !cpufreq_driver->setpolicy) {
23583a3e9e06SViresh Kumar 		new_policy.cur = cpufreq_driver->get(cpu);
2359bd0fa9bbSViresh Kumar 		if (WARN_ON(!new_policy.cur)) {
2360bd0fa9bbSViresh Kumar 			ret = -EIO;
2361fefa8ff8SAaron Plattner 			goto unlock;
2362bd0fa9bbSViresh Kumar 		}
2363bd0fa9bbSViresh Kumar 
23643a3e9e06SViresh Kumar 		if (!policy->cur) {
2365e837f9b5SJoe Perches 			pr_debug("Driver did not initialize current freq\n");
23663a3e9e06SViresh Kumar 			policy->cur = new_policy.cur;
2367a85f7bd3SThomas Renninger 		} else {
23689c0ebcf7SViresh Kumar 			if (policy->cur != new_policy.cur && has_target())
2369a1e1dc41SViresh Kumar 				cpufreq_out_of_sync(policy, new_policy.cur);
23700961dd0dSThomas Renninger 		}
2371a85f7bd3SThomas Renninger 	}
23720961dd0dSThomas Renninger 
2373037ce839SViresh Kumar 	ret = cpufreq_set_policy(policy, &new_policy);
23741da177e4SLinus Torvalds 
2375fefa8ff8SAaron Plattner unlock:
2376ad7722daSviresh kumar 	up_write(&policy->rwsem);
23775a01f2e8SVenkatesh Pallipadi 
23783a3e9e06SViresh Kumar 	cpufreq_cpu_put(policy);
23791da177e4SLinus Torvalds 	return ret;
23801da177e4SLinus Torvalds }
23811da177e4SLinus Torvalds EXPORT_SYMBOL(cpufreq_update_policy);
23821da177e4SLinus Torvalds 
23832760984fSPaul Gortmaker static int cpufreq_cpu_callback(struct notifier_block *nfb,
2384c32b6b8eSAshok Raj 					unsigned long action, void *hcpu)
2385c32b6b8eSAshok Raj {
2386c32b6b8eSAshok Raj 	unsigned int cpu = (unsigned long)hcpu;
23878a25a2fdSKay Sievers 	struct device *dev;
2388c32b6b8eSAshok Raj 
23898a25a2fdSKay Sievers 	dev = get_cpu_device(cpu);
23908a25a2fdSKay Sievers 	if (dev) {
23915302c3fbSSrivatsa S. Bhat 		switch (action & ~CPU_TASKS_FROZEN) {
2392c32b6b8eSAshok Raj 		case CPU_ONLINE:
239323faf0b7SViresh Kumar 			cpufreq_add_dev(dev, NULL);
2394c32b6b8eSAshok Raj 			break;
23955302c3fbSSrivatsa S. Bhat 
2396c32b6b8eSAshok Raj 		case CPU_DOWN_PREPARE:
239796bbbe4aSViresh Kumar 			__cpufreq_remove_dev_prepare(dev, NULL);
23981aee40acSSrivatsa S. Bhat 			break;
23991aee40acSSrivatsa S. Bhat 
24001aee40acSSrivatsa S. Bhat 		case CPU_POST_DEAD:
240196bbbe4aSViresh Kumar 			__cpufreq_remove_dev_finish(dev, NULL);
2402c32b6b8eSAshok Raj 			break;
24035302c3fbSSrivatsa S. Bhat 
24045a01f2e8SVenkatesh Pallipadi 		case CPU_DOWN_FAILED:
240523faf0b7SViresh Kumar 			cpufreq_add_dev(dev, NULL);
2406c32b6b8eSAshok Raj 			break;
2407c32b6b8eSAshok Raj 		}
2408c32b6b8eSAshok Raj 	}
2409c32b6b8eSAshok Raj 	return NOTIFY_OK;
2410c32b6b8eSAshok Raj }
2411c32b6b8eSAshok Raj 
24129c36f746SNeal Buckendahl static struct notifier_block __refdata cpufreq_cpu_notifier = {
2413c32b6b8eSAshok Raj 	.notifier_call = cpufreq_cpu_callback,
2414c32b6b8eSAshok Raj };
24151da177e4SLinus Torvalds 
24161da177e4SLinus Torvalds /*********************************************************************
24176f19efc0SLukasz Majewski  *               BOOST						     *
24186f19efc0SLukasz Majewski  *********************************************************************/
24196f19efc0SLukasz Majewski static int cpufreq_boost_set_sw(int state)
24206f19efc0SLukasz Majewski {
24216f19efc0SLukasz Majewski 	struct cpufreq_frequency_table *freq_table;
24226f19efc0SLukasz Majewski 	struct cpufreq_policy *policy;
24236f19efc0SLukasz Majewski 	int ret = -EINVAL;
24246f19efc0SLukasz Majewski 
2425f963735aSViresh Kumar 	for_each_active_policy(policy) {
24266f19efc0SLukasz Majewski 		freq_table = cpufreq_frequency_get_table(policy->cpu);
24276f19efc0SLukasz Majewski 		if (freq_table) {
24286f19efc0SLukasz Majewski 			ret = cpufreq_frequency_table_cpuinfo(policy,
24296f19efc0SLukasz Majewski 							freq_table);
24306f19efc0SLukasz Majewski 			if (ret) {
24316f19efc0SLukasz Majewski 				pr_err("%s: Policy frequency update failed\n",
24326f19efc0SLukasz Majewski 				       __func__);
24336f19efc0SLukasz Majewski 				break;
24346f19efc0SLukasz Majewski 			}
24356f19efc0SLukasz Majewski 			policy->user_policy.max = policy->max;
24366f19efc0SLukasz Majewski 			__cpufreq_governor(policy, CPUFREQ_GOV_LIMITS);
24376f19efc0SLukasz Majewski 		}
24386f19efc0SLukasz Majewski 	}
24396f19efc0SLukasz Majewski 
24406f19efc0SLukasz Majewski 	return ret;
24416f19efc0SLukasz Majewski }
24426f19efc0SLukasz Majewski 
24436f19efc0SLukasz Majewski int cpufreq_boost_trigger_state(int state)
24446f19efc0SLukasz Majewski {
24456f19efc0SLukasz Majewski 	unsigned long flags;
24466f19efc0SLukasz Majewski 	int ret = 0;
24476f19efc0SLukasz Majewski 
24486f19efc0SLukasz Majewski 	if (cpufreq_driver->boost_enabled == state)
24496f19efc0SLukasz Majewski 		return 0;
24506f19efc0SLukasz Majewski 
24516f19efc0SLukasz Majewski 	write_lock_irqsave(&cpufreq_driver_lock, flags);
24526f19efc0SLukasz Majewski 	cpufreq_driver->boost_enabled = state;
24536f19efc0SLukasz Majewski 	write_unlock_irqrestore(&cpufreq_driver_lock, flags);
24546f19efc0SLukasz Majewski 
24556f19efc0SLukasz Majewski 	ret = cpufreq_driver->set_boost(state);
24566f19efc0SLukasz Majewski 	if (ret) {
24576f19efc0SLukasz Majewski 		write_lock_irqsave(&cpufreq_driver_lock, flags);
24586f19efc0SLukasz Majewski 		cpufreq_driver->boost_enabled = !state;
24596f19efc0SLukasz Majewski 		write_unlock_irqrestore(&cpufreq_driver_lock, flags);
24606f19efc0SLukasz Majewski 
2461e837f9b5SJoe Perches 		pr_err("%s: Cannot %s BOOST\n",
2462e837f9b5SJoe Perches 		       __func__, state ? "enable" : "disable");
24636f19efc0SLukasz Majewski 	}
24646f19efc0SLukasz Majewski 
24656f19efc0SLukasz Majewski 	return ret;
24666f19efc0SLukasz Majewski }
24676f19efc0SLukasz Majewski 
24686f19efc0SLukasz Majewski int cpufreq_boost_supported(void)
24696f19efc0SLukasz Majewski {
24706f19efc0SLukasz Majewski 	if (likely(cpufreq_driver))
24716f19efc0SLukasz Majewski 		return cpufreq_driver->boost_supported;
24726f19efc0SLukasz Majewski 
24736f19efc0SLukasz Majewski 	return 0;
24746f19efc0SLukasz Majewski }
24756f19efc0SLukasz Majewski EXPORT_SYMBOL_GPL(cpufreq_boost_supported);
24766f19efc0SLukasz Majewski 
24776f19efc0SLukasz Majewski int cpufreq_boost_enabled(void)
24786f19efc0SLukasz Majewski {
24796f19efc0SLukasz Majewski 	return cpufreq_driver->boost_enabled;
24806f19efc0SLukasz Majewski }
24816f19efc0SLukasz Majewski EXPORT_SYMBOL_GPL(cpufreq_boost_enabled);
24826f19efc0SLukasz Majewski 
24836f19efc0SLukasz Majewski /*********************************************************************
24841da177e4SLinus Torvalds  *               REGISTER / UNREGISTER CPUFREQ DRIVER                *
24851da177e4SLinus Torvalds  *********************************************************************/
24861da177e4SLinus Torvalds 
24871da177e4SLinus Torvalds /**
24881da177e4SLinus Torvalds  * cpufreq_register_driver - register a CPU Frequency driver
24891da177e4SLinus Torvalds  * @driver_data: A struct cpufreq_driver containing the values#
24901da177e4SLinus Torvalds  * submitted by the CPU Frequency driver.
24911da177e4SLinus Torvalds  *
24921da177e4SLinus Torvalds  * Registers a CPU Frequency driver to this core code. This code
24931da177e4SLinus Torvalds  * returns zero on success, -EBUSY when another driver got here first
24941da177e4SLinus Torvalds  * (and isn't unregistered in the meantime).
24951da177e4SLinus Torvalds  *
24961da177e4SLinus Torvalds  */
2497221dee28SLinus Torvalds int cpufreq_register_driver(struct cpufreq_driver *driver_data)
24981da177e4SLinus Torvalds {
24991da177e4SLinus Torvalds 	unsigned long flags;
25001da177e4SLinus Torvalds 	int ret;
25011da177e4SLinus Torvalds 
2502a7b422cdSKonrad Rzeszutek Wilk 	if (cpufreq_disabled())
2503a7b422cdSKonrad Rzeszutek Wilk 		return -ENODEV;
2504a7b422cdSKonrad Rzeszutek Wilk 
25051da177e4SLinus Torvalds 	if (!driver_data || !driver_data->verify || !driver_data->init ||
25069c0ebcf7SViresh Kumar 	    !(driver_data->setpolicy || driver_data->target_index ||
25079832235fSRafael J. Wysocki 		    driver_data->target) ||
25089832235fSRafael J. Wysocki 	     (driver_data->setpolicy && (driver_data->target_index ||
25091c03a2d0SViresh Kumar 		    driver_data->target)) ||
25101c03a2d0SViresh Kumar 	     (!!driver_data->get_intermediate != !!driver_data->target_intermediate))
25111da177e4SLinus Torvalds 		return -EINVAL;
25121da177e4SLinus Torvalds 
25132d06d8c4SDominik Brodowski 	pr_debug("trying to register driver %s\n", driver_data->name);
25141da177e4SLinus Torvalds 
25150d1857a1SNathan Zimmer 	write_lock_irqsave(&cpufreq_driver_lock, flags);
25161c3d85ddSRafael J. Wysocki 	if (cpufreq_driver) {
25170d1857a1SNathan Zimmer 		write_unlock_irqrestore(&cpufreq_driver_lock, flags);
25184dea5806SYinghai Lu 		return -EEXIST;
25191da177e4SLinus Torvalds 	}
25201c3d85ddSRafael J. Wysocki 	cpufreq_driver = driver_data;
25210d1857a1SNathan Zimmer 	write_unlock_irqrestore(&cpufreq_driver_lock, flags);
25221da177e4SLinus Torvalds 
2523bc68b7dfSViresh Kumar 	if (driver_data->setpolicy)
2524bc68b7dfSViresh Kumar 		driver_data->flags |= CPUFREQ_CONST_LOOPS;
2525bc68b7dfSViresh Kumar 
25266f19efc0SLukasz Majewski 	if (cpufreq_boost_supported()) {
25276f19efc0SLukasz Majewski 		/*
25286f19efc0SLukasz Majewski 		 * Check if driver provides function to enable boost -
25296f19efc0SLukasz Majewski 		 * if not, use cpufreq_boost_set_sw as default
25306f19efc0SLukasz Majewski 		 */
25316f19efc0SLukasz Majewski 		if (!cpufreq_driver->set_boost)
25326f19efc0SLukasz Majewski 			cpufreq_driver->set_boost = cpufreq_boost_set_sw;
25336f19efc0SLukasz Majewski 
25346f19efc0SLukasz Majewski 		ret = cpufreq_sysfs_create_file(&boost.attr);
25356f19efc0SLukasz Majewski 		if (ret) {
25366f19efc0SLukasz Majewski 			pr_err("%s: cannot register global BOOST sysfs file\n",
25376f19efc0SLukasz Majewski 			       __func__);
25386f19efc0SLukasz Majewski 			goto err_null_driver;
25396f19efc0SLukasz Majewski 		}
25406f19efc0SLukasz Majewski 	}
25416f19efc0SLukasz Majewski 
25428a25a2fdSKay Sievers 	ret = subsys_interface_register(&cpufreq_interface);
25438f5bc2abSJiri Slaby 	if (ret)
25446f19efc0SLukasz Majewski 		goto err_boost_unreg;
25451da177e4SLinus Torvalds 
2546ce1bcfe9SViresh Kumar 	if (!(cpufreq_driver->flags & CPUFREQ_STICKY) &&
2547ce1bcfe9SViresh Kumar 	    list_empty(&cpufreq_policy_list)) {
25481da177e4SLinus Torvalds 		/* if all ->init() calls failed, unregister */
2549ce1bcfe9SViresh Kumar 		pr_debug("%s: No CPU initialized for driver %s\n", __func__,
2550e08f5f5bSGautham R Shenoy 			 driver_data->name);
25518a25a2fdSKay Sievers 		goto err_if_unreg;
25521da177e4SLinus Torvalds 	}
25531da177e4SLinus Torvalds 
255465edc68cSChandra Seetharaman 	register_hotcpu_notifier(&cpufreq_cpu_notifier);
25552d06d8c4SDominik Brodowski 	pr_debug("driver %s up and running\n", driver_data->name);
25561da177e4SLinus Torvalds 
25578f5bc2abSJiri Slaby 	return 0;
25588a25a2fdSKay Sievers err_if_unreg:
25598a25a2fdSKay Sievers 	subsys_interface_unregister(&cpufreq_interface);
25606f19efc0SLukasz Majewski err_boost_unreg:
25616f19efc0SLukasz Majewski 	if (cpufreq_boost_supported())
25626f19efc0SLukasz Majewski 		cpufreq_sysfs_remove_file(&boost.attr);
25638f5bc2abSJiri Slaby err_null_driver:
25640d1857a1SNathan Zimmer 	write_lock_irqsave(&cpufreq_driver_lock, flags);
25651c3d85ddSRafael J. Wysocki 	cpufreq_driver = NULL;
25660d1857a1SNathan Zimmer 	write_unlock_irqrestore(&cpufreq_driver_lock, flags);
25674d34a67dSDave Jones 	return ret;
25681da177e4SLinus Torvalds }
25691da177e4SLinus Torvalds EXPORT_SYMBOL_GPL(cpufreq_register_driver);
25701da177e4SLinus Torvalds 
25711da177e4SLinus Torvalds /**
25721da177e4SLinus Torvalds  * cpufreq_unregister_driver - unregister the current CPUFreq driver
25731da177e4SLinus Torvalds  *
25741da177e4SLinus Torvalds  * Unregister the current CPUFreq driver. Only call this if you have
25751da177e4SLinus Torvalds  * the right to do so, i.e. if you have succeeded in initialising before!
25761da177e4SLinus Torvalds  * Returns zero if successful, and -EINVAL if the cpufreq_driver is
25771da177e4SLinus Torvalds  * currently not initialised.
25781da177e4SLinus Torvalds  */
2579221dee28SLinus Torvalds int cpufreq_unregister_driver(struct cpufreq_driver *driver)
25801da177e4SLinus Torvalds {
25811da177e4SLinus Torvalds 	unsigned long flags;
25821da177e4SLinus Torvalds 
25831c3d85ddSRafael J. Wysocki 	if (!cpufreq_driver || (driver != cpufreq_driver))
25841da177e4SLinus Torvalds 		return -EINVAL;
25851da177e4SLinus Torvalds 
25862d06d8c4SDominik Brodowski 	pr_debug("unregistering driver %s\n", driver->name);
25871da177e4SLinus Torvalds 
25888a25a2fdSKay Sievers 	subsys_interface_unregister(&cpufreq_interface);
25896f19efc0SLukasz Majewski 	if (cpufreq_boost_supported())
25906f19efc0SLukasz Majewski 		cpufreq_sysfs_remove_file(&boost.attr);
25916f19efc0SLukasz Majewski 
259265edc68cSChandra Seetharaman 	unregister_hotcpu_notifier(&cpufreq_cpu_notifier);
25931da177e4SLinus Torvalds 
25946eed9404SViresh Kumar 	down_write(&cpufreq_rwsem);
25950d1857a1SNathan Zimmer 	write_lock_irqsave(&cpufreq_driver_lock, flags);
25966eed9404SViresh Kumar 
25971c3d85ddSRafael J. Wysocki 	cpufreq_driver = NULL;
25986eed9404SViresh Kumar 
25990d1857a1SNathan Zimmer 	write_unlock_irqrestore(&cpufreq_driver_lock, flags);
26006eed9404SViresh Kumar 	up_write(&cpufreq_rwsem);
26011da177e4SLinus Torvalds 
26021da177e4SLinus Torvalds 	return 0;
26031da177e4SLinus Torvalds }
26041da177e4SLinus Torvalds EXPORT_SYMBOL_GPL(cpufreq_unregister_driver);
26055a01f2e8SVenkatesh Pallipadi 
260690de2a4aSDoug Anderson /*
260790de2a4aSDoug Anderson  * Stop cpufreq at shutdown to make sure it isn't holding any locks
260890de2a4aSDoug Anderson  * or mutexes when secondary CPUs are halted.
260990de2a4aSDoug Anderson  */
261090de2a4aSDoug Anderson static struct syscore_ops cpufreq_syscore_ops = {
261190de2a4aSDoug Anderson 	.shutdown = cpufreq_suspend,
261290de2a4aSDoug Anderson };
261390de2a4aSDoug Anderson 
26145a01f2e8SVenkatesh Pallipadi static int __init cpufreq_core_init(void)
26155a01f2e8SVenkatesh Pallipadi {
2616a7b422cdSKonrad Rzeszutek Wilk 	if (cpufreq_disabled())
2617a7b422cdSKonrad Rzeszutek Wilk 		return -ENODEV;
2618a7b422cdSKonrad Rzeszutek Wilk 
26192361be23SViresh Kumar 	cpufreq_global_kobject = kobject_create();
26208aa84ad8SThomas Renninger 	BUG_ON(!cpufreq_global_kobject);
26218aa84ad8SThomas Renninger 
262290de2a4aSDoug Anderson 	register_syscore_ops(&cpufreq_syscore_ops);
262390de2a4aSDoug Anderson 
26245a01f2e8SVenkatesh Pallipadi 	return 0;
26255a01f2e8SVenkatesh Pallipadi }
26265a01f2e8SVenkatesh Pallipadi core_initcall(cpufreq_core_init);
2627