xref: /openbmc/linux/drivers/cpufreq/cpufreq.c (revision 0b275352872b2641ed5c94d0f0f8c7e907bf3e3f)
11da177e4SLinus Torvalds /*
21da177e4SLinus Torvalds  *  linux/drivers/cpufreq/cpufreq.c
31da177e4SLinus Torvalds  *
41da177e4SLinus Torvalds  *  Copyright (C) 2001 Russell King
51da177e4SLinus Torvalds  *            (C) 2002 - 2003 Dominik Brodowski <linux@brodo.de>
6bb176f7dSViresh Kumar  *            (C) 2013 Viresh Kumar <viresh.kumar@linaro.org>
71da177e4SLinus Torvalds  *
8c32b6b8eSAshok Raj  *  Oct 2005 - Ashok Raj <ashok.raj@intel.com>
9c32b6b8eSAshok Raj  *	Added handling for CPU hotplug
108ff69732SDave Jones  *  Feb 2006 - Jacob Shin <jacob.shin@amd.com>
118ff69732SDave Jones  *	Fix handling for CPU hotplug -- affected CPUs
12c32b6b8eSAshok Raj  *
131da177e4SLinus Torvalds  * This program is free software; you can redistribute it and/or modify
141da177e4SLinus Torvalds  * it under the terms of the GNU General Public License version 2 as
151da177e4SLinus Torvalds  * published by the Free Software Foundation.
161da177e4SLinus Torvalds  */
171da177e4SLinus Torvalds 
18db701151SViresh Kumar #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
19db701151SViresh Kumar 
205ff0a268SViresh Kumar #include <linux/cpu.h>
211da177e4SLinus Torvalds #include <linux/cpufreq.h>
221da177e4SLinus Torvalds #include <linux/delay.h>
231da177e4SLinus Torvalds #include <linux/device.h>
245ff0a268SViresh Kumar #include <linux/init.h>
255ff0a268SViresh Kumar #include <linux/kernel_stat.h>
265ff0a268SViresh Kumar #include <linux/module.h>
273fc54d37Sakpm@osdl.org #include <linux/mutex.h>
285ff0a268SViresh Kumar #include <linux/slab.h>
292f0aea93SViresh Kumar #include <linux/suspend.h>
3090de2a4aSDoug Anderson #include <linux/syscore_ops.h>
315ff0a268SViresh Kumar #include <linux/tick.h>
326f4f2723SThomas Renninger #include <trace/events/power.h>
336f4f2723SThomas Renninger 
34b4f0676fSViresh Kumar static LIST_HEAD(cpufreq_policy_list);
35f963735aSViresh Kumar 
36f963735aSViresh Kumar static inline bool policy_is_inactive(struct cpufreq_policy *policy)
37f963735aSViresh Kumar {
38f963735aSViresh Kumar 	return cpumask_empty(policy->cpus);
39f963735aSViresh Kumar }
40f963735aSViresh Kumar 
41f963735aSViresh Kumar static bool suitable_policy(struct cpufreq_policy *policy, bool active)
42f963735aSViresh Kumar {
43f963735aSViresh Kumar 	return active == !policy_is_inactive(policy);
44f963735aSViresh Kumar }
45f963735aSViresh Kumar 
46f963735aSViresh Kumar /* Finds Next Acive/Inactive policy */
47f963735aSViresh Kumar static struct cpufreq_policy *next_policy(struct cpufreq_policy *policy,
48f963735aSViresh Kumar 					  bool active)
49f963735aSViresh Kumar {
50f963735aSViresh Kumar 	do {
51f963735aSViresh Kumar 		policy = list_next_entry(policy, policy_list);
52f963735aSViresh Kumar 
53f963735aSViresh Kumar 		/* No more policies in the list */
54f963735aSViresh Kumar 		if (&policy->policy_list == &cpufreq_policy_list)
55f963735aSViresh Kumar 			return NULL;
56f963735aSViresh Kumar 	} while (!suitable_policy(policy, active));
57f963735aSViresh Kumar 
58f963735aSViresh Kumar 	return policy;
59f963735aSViresh Kumar }
60f963735aSViresh Kumar 
61f963735aSViresh Kumar static struct cpufreq_policy *first_policy(bool active)
62f963735aSViresh Kumar {
63f963735aSViresh Kumar 	struct cpufreq_policy *policy;
64f963735aSViresh Kumar 
65f963735aSViresh Kumar 	/* No policies in the list */
66f963735aSViresh Kumar 	if (list_empty(&cpufreq_policy_list))
67f963735aSViresh Kumar 		return NULL;
68f963735aSViresh Kumar 
69f963735aSViresh Kumar 	policy = list_first_entry(&cpufreq_policy_list, typeof(*policy),
70f963735aSViresh Kumar 				  policy_list);
71f963735aSViresh Kumar 
72f963735aSViresh Kumar 	if (!suitable_policy(policy, active))
73f963735aSViresh Kumar 		policy = next_policy(policy, active);
74f963735aSViresh Kumar 
75f963735aSViresh Kumar 	return policy;
76f963735aSViresh Kumar }
77f963735aSViresh Kumar 
78f963735aSViresh Kumar /* Macros to iterate over CPU policies */
79f963735aSViresh Kumar #define for_each_suitable_policy(__policy, __active)	\
80f963735aSViresh Kumar 	for (__policy = first_policy(__active);		\
81f963735aSViresh Kumar 	     __policy;					\
82f963735aSViresh Kumar 	     __policy = next_policy(__policy, __active))
83f963735aSViresh Kumar 
84f963735aSViresh Kumar #define for_each_active_policy(__policy)		\
85f963735aSViresh Kumar 	for_each_suitable_policy(__policy, true)
86f963735aSViresh Kumar #define for_each_inactive_policy(__policy)		\
87f963735aSViresh Kumar 	for_each_suitable_policy(__policy, false)
88f963735aSViresh Kumar 
89b4f0676fSViresh Kumar #define for_each_policy(__policy)			\
90b4f0676fSViresh Kumar 	list_for_each_entry(__policy, &cpufreq_policy_list, policy_list)
91b4f0676fSViresh Kumar 
92f7b27061SViresh Kumar /* Iterate over governors */
93f7b27061SViresh Kumar static LIST_HEAD(cpufreq_governor_list);
94f7b27061SViresh Kumar #define for_each_governor(__governor)				\
95f7b27061SViresh Kumar 	list_for_each_entry(__governor, &cpufreq_governor_list, governor_list)
96f7b27061SViresh Kumar 
971da177e4SLinus Torvalds /**
98cd878479SDave Jones  * The "cpufreq driver" - the arch- or hardware-dependent low
991da177e4SLinus Torvalds  * level driver of CPUFreq support, and its spinlock. This lock
1001da177e4SLinus Torvalds  * also protects the cpufreq_cpu_data array.
1011da177e4SLinus Torvalds  */
1021c3d85ddSRafael J. Wysocki static struct cpufreq_driver *cpufreq_driver;
1037a6aedfaSMike Travis static DEFINE_PER_CPU(struct cpufreq_policy *, cpufreq_cpu_data);
104bb176f7dSViresh Kumar static DEFINE_RWLOCK(cpufreq_driver_lock);
1056f1e4efdSJane Li DEFINE_MUTEX(cpufreq_governor_lock);
106bb176f7dSViresh Kumar 
1072f0aea93SViresh Kumar /* Flag to suspend/resume CPUFreq governors */
1082f0aea93SViresh Kumar static bool cpufreq_suspended;
1091da177e4SLinus Torvalds 
1109c0ebcf7SViresh Kumar static inline bool has_target(void)
1119c0ebcf7SViresh Kumar {
1129c0ebcf7SViresh Kumar 	return cpufreq_driver->target_index || cpufreq_driver->target;
1139c0ebcf7SViresh Kumar }
1149c0ebcf7SViresh Kumar 
1151da177e4SLinus Torvalds /* internal prototypes */
11629464f28SDave Jones static int __cpufreq_governor(struct cpufreq_policy *policy,
11729464f28SDave Jones 		unsigned int event);
118d92d50a4SViresh Kumar static unsigned int __cpufreq_get(struct cpufreq_policy *policy);
11965f27f38SDavid Howells static void handle_update(struct work_struct *work);
1201da177e4SLinus Torvalds 
1211da177e4SLinus Torvalds /**
1221da177e4SLinus Torvalds  * Two notifier lists: the "policy" list is involved in the
1231da177e4SLinus Torvalds  * validation process for a new CPU frequency policy; the
1241da177e4SLinus Torvalds  * "transition" list for kernel code that needs to handle
1251da177e4SLinus Torvalds  * changes to devices when the CPU clock speed changes.
1261da177e4SLinus Torvalds  * The mutex locks both lists.
1271da177e4SLinus Torvalds  */
128e041c683SAlan Stern static BLOCKING_NOTIFIER_HEAD(cpufreq_policy_notifier_list);
129b4dfdbb3SAlan Stern static struct srcu_notifier_head cpufreq_transition_notifier_list;
1301da177e4SLinus Torvalds 
13174212ca4SCesar Eduardo Barros static bool init_cpufreq_transition_notifier_list_called;
132b4dfdbb3SAlan Stern static int __init init_cpufreq_transition_notifier_list(void)
133b4dfdbb3SAlan Stern {
134b4dfdbb3SAlan Stern 	srcu_init_notifier_head(&cpufreq_transition_notifier_list);
13574212ca4SCesar Eduardo Barros 	init_cpufreq_transition_notifier_list_called = true;
136b4dfdbb3SAlan Stern 	return 0;
137b4dfdbb3SAlan Stern }
138b3438f82SLinus Torvalds pure_initcall(init_cpufreq_transition_notifier_list);
1391da177e4SLinus Torvalds 
140a7b422cdSKonrad Rzeszutek Wilk static int off __read_mostly;
141da584455SViresh Kumar static int cpufreq_disabled(void)
142a7b422cdSKonrad Rzeszutek Wilk {
143a7b422cdSKonrad Rzeszutek Wilk 	return off;
144a7b422cdSKonrad Rzeszutek Wilk }
145a7b422cdSKonrad Rzeszutek Wilk void disable_cpufreq(void)
146a7b422cdSKonrad Rzeszutek Wilk {
147a7b422cdSKonrad Rzeszutek Wilk 	off = 1;
148a7b422cdSKonrad Rzeszutek Wilk }
1493fc54d37Sakpm@osdl.org static DEFINE_MUTEX(cpufreq_governor_mutex);
1501da177e4SLinus Torvalds 
1514d5dcc42SViresh Kumar bool have_governor_per_policy(void)
1524d5dcc42SViresh Kumar {
1530b981e70SViresh Kumar 	return !!(cpufreq_driver->flags & CPUFREQ_HAVE_GOVERNOR_PER_POLICY);
1544d5dcc42SViresh Kumar }
1553f869d6dSViresh Kumar EXPORT_SYMBOL_GPL(have_governor_per_policy);
1564d5dcc42SViresh Kumar 
157944e9a03SViresh Kumar struct kobject *get_governor_parent_kobj(struct cpufreq_policy *policy)
158944e9a03SViresh Kumar {
159944e9a03SViresh Kumar 	if (have_governor_per_policy())
160944e9a03SViresh Kumar 		return &policy->kobj;
161944e9a03SViresh Kumar 	else
162944e9a03SViresh Kumar 		return cpufreq_global_kobject;
163944e9a03SViresh Kumar }
164944e9a03SViresh Kumar EXPORT_SYMBOL_GPL(get_governor_parent_kobj);
165944e9a03SViresh Kumar 
1665a31d594SViresh Kumar struct cpufreq_frequency_table *cpufreq_frequency_get_table(unsigned int cpu)
1675a31d594SViresh Kumar {
1685a31d594SViresh Kumar 	struct cpufreq_policy *policy = per_cpu(cpufreq_cpu_data, cpu);
1695a31d594SViresh Kumar 
1705a31d594SViresh Kumar 	return policy && !policy_is_inactive(policy) ?
1715a31d594SViresh Kumar 		policy->freq_table : NULL;
1725a31d594SViresh Kumar }
1735a31d594SViresh Kumar EXPORT_SYMBOL_GPL(cpufreq_frequency_get_table);
1745a31d594SViresh Kumar 
17572a4ce34SViresh Kumar static inline u64 get_cpu_idle_time_jiffy(unsigned int cpu, u64 *wall)
17672a4ce34SViresh Kumar {
17772a4ce34SViresh Kumar 	u64 idle_time;
17872a4ce34SViresh Kumar 	u64 cur_wall_time;
17972a4ce34SViresh Kumar 	u64 busy_time;
18072a4ce34SViresh Kumar 
18172a4ce34SViresh Kumar 	cur_wall_time = jiffies64_to_cputime64(get_jiffies_64());
18272a4ce34SViresh Kumar 
18372a4ce34SViresh Kumar 	busy_time = kcpustat_cpu(cpu).cpustat[CPUTIME_USER];
18472a4ce34SViresh Kumar 	busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_SYSTEM];
18572a4ce34SViresh Kumar 	busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_IRQ];
18672a4ce34SViresh Kumar 	busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_SOFTIRQ];
18772a4ce34SViresh Kumar 	busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_STEAL];
18872a4ce34SViresh Kumar 	busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_NICE];
18972a4ce34SViresh Kumar 
19072a4ce34SViresh Kumar 	idle_time = cur_wall_time - busy_time;
19172a4ce34SViresh Kumar 	if (wall)
19272a4ce34SViresh Kumar 		*wall = cputime_to_usecs(cur_wall_time);
19372a4ce34SViresh Kumar 
19472a4ce34SViresh Kumar 	return cputime_to_usecs(idle_time);
19572a4ce34SViresh Kumar }
19672a4ce34SViresh Kumar 
19772a4ce34SViresh Kumar u64 get_cpu_idle_time(unsigned int cpu, u64 *wall, int io_busy)
19872a4ce34SViresh Kumar {
19972a4ce34SViresh Kumar 	u64 idle_time = get_cpu_idle_time_us(cpu, io_busy ? wall : NULL);
20072a4ce34SViresh Kumar 
20172a4ce34SViresh Kumar 	if (idle_time == -1ULL)
20272a4ce34SViresh Kumar 		return get_cpu_idle_time_jiffy(cpu, wall);
20372a4ce34SViresh Kumar 	else if (!io_busy)
20472a4ce34SViresh Kumar 		idle_time += get_cpu_iowait_time_us(cpu, wall);
20572a4ce34SViresh Kumar 
20672a4ce34SViresh Kumar 	return idle_time;
20772a4ce34SViresh Kumar }
20872a4ce34SViresh Kumar EXPORT_SYMBOL_GPL(get_cpu_idle_time);
20972a4ce34SViresh Kumar 
21070e9e778SViresh Kumar /*
21170e9e778SViresh Kumar  * This is a generic cpufreq init() routine which can be used by cpufreq
21270e9e778SViresh Kumar  * drivers of SMP systems. It will do following:
21370e9e778SViresh Kumar  * - validate & show freq table passed
21470e9e778SViresh Kumar  * - set policies transition latency
21570e9e778SViresh Kumar  * - policy->cpus with all possible CPUs
21670e9e778SViresh Kumar  */
21770e9e778SViresh Kumar int cpufreq_generic_init(struct cpufreq_policy *policy,
21870e9e778SViresh Kumar 		struct cpufreq_frequency_table *table,
21970e9e778SViresh Kumar 		unsigned int transition_latency)
22070e9e778SViresh Kumar {
22170e9e778SViresh Kumar 	int ret;
22270e9e778SViresh Kumar 
22370e9e778SViresh Kumar 	ret = cpufreq_table_validate_and_show(policy, table);
22470e9e778SViresh Kumar 	if (ret) {
22570e9e778SViresh Kumar 		pr_err("%s: invalid frequency table: %d\n", __func__, ret);
22670e9e778SViresh Kumar 		return ret;
22770e9e778SViresh Kumar 	}
22870e9e778SViresh Kumar 
22970e9e778SViresh Kumar 	policy->cpuinfo.transition_latency = transition_latency;
23070e9e778SViresh Kumar 
23170e9e778SViresh Kumar 	/*
23258405af6SShailendra Verma 	 * The driver only supports the SMP configuration where all processors
23370e9e778SViresh Kumar 	 * share the clock and voltage and clock.
23470e9e778SViresh Kumar 	 */
23570e9e778SViresh Kumar 	cpumask_setall(policy->cpus);
23670e9e778SViresh Kumar 
23770e9e778SViresh Kumar 	return 0;
23870e9e778SViresh Kumar }
23970e9e778SViresh Kumar EXPORT_SYMBOL_GPL(cpufreq_generic_init);
24070e9e778SViresh Kumar 
241988bed09SViresh Kumar /* Only for cpufreq core internal use */
242988bed09SViresh Kumar struct cpufreq_policy *cpufreq_cpu_get_raw(unsigned int cpu)
243652ed95dSViresh Kumar {
244652ed95dSViresh Kumar 	struct cpufreq_policy *policy = per_cpu(cpufreq_cpu_data, cpu);
245652ed95dSViresh Kumar 
246988bed09SViresh Kumar 	return policy && cpumask_test_cpu(cpu, policy->cpus) ? policy : NULL;
247988bed09SViresh Kumar }
248988bed09SViresh Kumar 
249988bed09SViresh Kumar unsigned int cpufreq_generic_get(unsigned int cpu)
250988bed09SViresh Kumar {
251988bed09SViresh Kumar 	struct cpufreq_policy *policy = cpufreq_cpu_get_raw(cpu);
252988bed09SViresh Kumar 
253652ed95dSViresh Kumar 	if (!policy || IS_ERR(policy->clk)) {
254e837f9b5SJoe Perches 		pr_err("%s: No %s associated to cpu: %d\n",
255e837f9b5SJoe Perches 		       __func__, policy ? "clk" : "policy", cpu);
256652ed95dSViresh Kumar 		return 0;
257652ed95dSViresh Kumar 	}
258652ed95dSViresh Kumar 
259652ed95dSViresh Kumar 	return clk_get_rate(policy->clk) / 1000;
260652ed95dSViresh Kumar }
261652ed95dSViresh Kumar EXPORT_SYMBOL_GPL(cpufreq_generic_get);
262652ed95dSViresh Kumar 
26350e9c852SViresh Kumar /**
26450e9c852SViresh Kumar  * cpufreq_cpu_get: returns policy for a cpu and marks it busy.
26550e9c852SViresh Kumar  *
26650e9c852SViresh Kumar  * @cpu: cpu to find policy for.
26750e9c852SViresh Kumar  *
26850e9c852SViresh Kumar  * This returns policy for 'cpu', returns NULL if it doesn't exist.
26950e9c852SViresh Kumar  * It also increments the kobject reference count to mark it busy and so would
27050e9c852SViresh Kumar  * require a corresponding call to cpufreq_cpu_put() to decrement it back.
27150e9c852SViresh Kumar  * If corresponding call cpufreq_cpu_put() isn't made, the policy wouldn't be
27250e9c852SViresh Kumar  * freed as that depends on the kobj count.
27350e9c852SViresh Kumar  *
27450e9c852SViresh Kumar  * Return: A valid policy on success, otherwise NULL on failure.
27550e9c852SViresh Kumar  */
2766eed9404SViresh Kumar struct cpufreq_policy *cpufreq_cpu_get(unsigned int cpu)
2771da177e4SLinus Torvalds {
2786eed9404SViresh Kumar 	struct cpufreq_policy *policy = NULL;
2791da177e4SLinus Torvalds 	unsigned long flags;
2801da177e4SLinus Torvalds 
2811b947c90SViresh Kumar 	if (WARN_ON(cpu >= nr_cpu_ids))
2826eed9404SViresh Kumar 		return NULL;
2836eed9404SViresh Kumar 
2841da177e4SLinus Torvalds 	/* get the cpufreq driver */
2850d1857a1SNathan Zimmer 	read_lock_irqsave(&cpufreq_driver_lock, flags);
2861da177e4SLinus Torvalds 
2876eed9404SViresh Kumar 	if (cpufreq_driver) {
2881da177e4SLinus Torvalds 		/* get the CPU */
289988bed09SViresh Kumar 		policy = cpufreq_cpu_get_raw(cpu);
2906eed9404SViresh Kumar 		if (policy)
2916eed9404SViresh Kumar 			kobject_get(&policy->kobj);
2926eed9404SViresh Kumar 	}
2936eed9404SViresh Kumar 
2946eed9404SViresh Kumar 	read_unlock_irqrestore(&cpufreq_driver_lock, flags);
2951da177e4SLinus Torvalds 
2963a3e9e06SViresh Kumar 	return policy;
297a9144436SStephen Boyd }
2981da177e4SLinus Torvalds EXPORT_SYMBOL_GPL(cpufreq_cpu_get);
2991da177e4SLinus Torvalds 
30050e9c852SViresh Kumar /**
30150e9c852SViresh Kumar  * cpufreq_cpu_put: Decrements the usage count of a policy
30250e9c852SViresh Kumar  *
30350e9c852SViresh Kumar  * @policy: policy earlier returned by cpufreq_cpu_get().
30450e9c852SViresh Kumar  *
30550e9c852SViresh Kumar  * This decrements the kobject reference count incremented earlier by calling
30650e9c852SViresh Kumar  * cpufreq_cpu_get().
30750e9c852SViresh Kumar  */
3083a3e9e06SViresh Kumar void cpufreq_cpu_put(struct cpufreq_policy *policy)
309a9144436SStephen Boyd {
3106eed9404SViresh Kumar 	kobject_put(&policy->kobj);
311a9144436SStephen Boyd }
3121da177e4SLinus Torvalds EXPORT_SYMBOL_GPL(cpufreq_cpu_put);
3131da177e4SLinus Torvalds 
3141da177e4SLinus Torvalds /*********************************************************************
3151da177e4SLinus Torvalds  *            EXTERNALLY AFFECTING FREQUENCY CHANGES                 *
3161da177e4SLinus Torvalds  *********************************************************************/
3171da177e4SLinus Torvalds 
3181da177e4SLinus Torvalds /**
3191da177e4SLinus Torvalds  * adjust_jiffies - adjust the system "loops_per_jiffy"
3201da177e4SLinus Torvalds  *
3211da177e4SLinus Torvalds  * This function alters the system "loops_per_jiffy" for the clock
3221da177e4SLinus Torvalds  * speed change. Note that loops_per_jiffy cannot be updated on SMP
3231da177e4SLinus Torvalds  * systems as each CPU might be scaled differently. So, use the arch
3241da177e4SLinus Torvalds  * per-CPU loops_per_jiffy value wherever possible.
3251da177e4SLinus Torvalds  */
32639c132eeSViresh Kumar static void adjust_jiffies(unsigned long val, struct cpufreq_freqs *ci)
32739c132eeSViresh Kumar {
3281da177e4SLinus Torvalds #ifndef CONFIG_SMP
3291da177e4SLinus Torvalds 	static unsigned long l_p_j_ref;
3301da177e4SLinus Torvalds 	static unsigned int l_p_j_ref_freq;
3311da177e4SLinus Torvalds 
3321da177e4SLinus Torvalds 	if (ci->flags & CPUFREQ_CONST_LOOPS)
3331da177e4SLinus Torvalds 		return;
3341da177e4SLinus Torvalds 
3351da177e4SLinus Torvalds 	if (!l_p_j_ref_freq) {
3361da177e4SLinus Torvalds 		l_p_j_ref = loops_per_jiffy;
3371da177e4SLinus Torvalds 		l_p_j_ref_freq = ci->old;
338e837f9b5SJoe Perches 		pr_debug("saving %lu as reference value for loops_per_jiffy; freq is %u kHz\n",
339e837f9b5SJoe Perches 			 l_p_j_ref, l_p_j_ref_freq);
3401da177e4SLinus Torvalds 	}
3410b443eadSViresh Kumar 	if (val == CPUFREQ_POSTCHANGE && ci->old != ci->new) {
342e08f5f5bSGautham R Shenoy 		loops_per_jiffy = cpufreq_scale(l_p_j_ref, l_p_j_ref_freq,
343e08f5f5bSGautham R Shenoy 								ci->new);
344e837f9b5SJoe Perches 		pr_debug("scaling loops_per_jiffy to %lu for frequency %u kHz\n",
345e837f9b5SJoe Perches 			 loops_per_jiffy, ci->new);
3461da177e4SLinus Torvalds 	}
3471da177e4SLinus Torvalds #endif
34839c132eeSViresh Kumar }
3491da177e4SLinus Torvalds 
3500956df9cSViresh Kumar static void __cpufreq_notify_transition(struct cpufreq_policy *policy,
351b43a7ffbSViresh Kumar 		struct cpufreq_freqs *freqs, unsigned int state)
3521da177e4SLinus Torvalds {
3531da177e4SLinus Torvalds 	BUG_ON(irqs_disabled());
3541da177e4SLinus Torvalds 
355d5aaffa9SDirk Brandewie 	if (cpufreq_disabled())
356d5aaffa9SDirk Brandewie 		return;
357d5aaffa9SDirk Brandewie 
3581c3d85ddSRafael J. Wysocki 	freqs->flags = cpufreq_driver->flags;
3592d06d8c4SDominik Brodowski 	pr_debug("notification %u of frequency transition to %u kHz\n",
360e4472cb3SDave Jones 		 state, freqs->new);
3611da177e4SLinus Torvalds 
3621da177e4SLinus Torvalds 	switch (state) {
363e4472cb3SDave Jones 
3641da177e4SLinus Torvalds 	case CPUFREQ_PRECHANGE:
365e4472cb3SDave Jones 		/* detect if the driver reported a value as "old frequency"
366e4472cb3SDave Jones 		 * which is not equal to what the cpufreq core thinks is
367e4472cb3SDave Jones 		 * "old frequency".
3681da177e4SLinus Torvalds 		 */
3691c3d85ddSRafael J. Wysocki 		if (!(cpufreq_driver->flags & CPUFREQ_CONST_LOOPS)) {
370e4472cb3SDave Jones 			if ((policy) && (policy->cpu == freqs->cpu) &&
371e4472cb3SDave Jones 			    (policy->cur) && (policy->cur != freqs->old)) {
372e837f9b5SJoe Perches 				pr_debug("Warning: CPU frequency is %u, cpufreq assumed %u kHz\n",
373e4472cb3SDave Jones 					 freqs->old, policy->cur);
374e4472cb3SDave Jones 				freqs->old = policy->cur;
3751da177e4SLinus Torvalds 			}
3761da177e4SLinus Torvalds 		}
377b4dfdbb3SAlan Stern 		srcu_notifier_call_chain(&cpufreq_transition_notifier_list,
378e4472cb3SDave Jones 				CPUFREQ_PRECHANGE, freqs);
3791da177e4SLinus Torvalds 		adjust_jiffies(CPUFREQ_PRECHANGE, freqs);
3801da177e4SLinus Torvalds 		break;
381e4472cb3SDave Jones 
3821da177e4SLinus Torvalds 	case CPUFREQ_POSTCHANGE:
3831da177e4SLinus Torvalds 		adjust_jiffies(CPUFREQ_POSTCHANGE, freqs);
384e837f9b5SJoe Perches 		pr_debug("FREQ: %lu - CPU: %lu\n",
385e837f9b5SJoe Perches 			 (unsigned long)freqs->new, (unsigned long)freqs->cpu);
38625e41933SThomas Renninger 		trace_cpu_frequency(freqs->new, freqs->cpu);
387b4dfdbb3SAlan Stern 		srcu_notifier_call_chain(&cpufreq_transition_notifier_list,
388e4472cb3SDave Jones 				CPUFREQ_POSTCHANGE, freqs);
389e4472cb3SDave Jones 		if (likely(policy) && likely(policy->cpu == freqs->cpu))
390e4472cb3SDave Jones 			policy->cur = freqs->new;
3911da177e4SLinus Torvalds 		break;
3921da177e4SLinus Torvalds 	}
3931da177e4SLinus Torvalds }
394bb176f7dSViresh Kumar 
395b43a7ffbSViresh Kumar /**
396b43a7ffbSViresh Kumar  * cpufreq_notify_transition - call notifier chain and adjust_jiffies
397b43a7ffbSViresh Kumar  * on frequency transition.
398b43a7ffbSViresh Kumar  *
399b43a7ffbSViresh Kumar  * This function calls the transition notifiers and the "adjust_jiffies"
400b43a7ffbSViresh Kumar  * function. It is called twice on all CPU frequency changes that have
401b43a7ffbSViresh Kumar  * external effects.
402b43a7ffbSViresh Kumar  */
403236a9800SViresh Kumar static void cpufreq_notify_transition(struct cpufreq_policy *policy,
404b43a7ffbSViresh Kumar 		struct cpufreq_freqs *freqs, unsigned int state)
405b43a7ffbSViresh Kumar {
406b43a7ffbSViresh Kumar 	for_each_cpu(freqs->cpu, policy->cpus)
407b43a7ffbSViresh Kumar 		__cpufreq_notify_transition(policy, freqs, state);
408b43a7ffbSViresh Kumar }
4091da177e4SLinus Torvalds 
410f7ba3b41SViresh Kumar /* Do post notifications when there are chances that transition has failed */
411236a9800SViresh Kumar static void cpufreq_notify_post_transition(struct cpufreq_policy *policy,
412f7ba3b41SViresh Kumar 		struct cpufreq_freqs *freqs, int transition_failed)
413f7ba3b41SViresh Kumar {
414f7ba3b41SViresh Kumar 	cpufreq_notify_transition(policy, freqs, CPUFREQ_POSTCHANGE);
415f7ba3b41SViresh Kumar 	if (!transition_failed)
416f7ba3b41SViresh Kumar 		return;
417f7ba3b41SViresh Kumar 
418f7ba3b41SViresh Kumar 	swap(freqs->old, freqs->new);
419f7ba3b41SViresh Kumar 	cpufreq_notify_transition(policy, freqs, CPUFREQ_PRECHANGE);
420f7ba3b41SViresh Kumar 	cpufreq_notify_transition(policy, freqs, CPUFREQ_POSTCHANGE);
421f7ba3b41SViresh Kumar }
422f7ba3b41SViresh Kumar 
42312478cf0SSrivatsa S. Bhat void cpufreq_freq_transition_begin(struct cpufreq_policy *policy,
42412478cf0SSrivatsa S. Bhat 		struct cpufreq_freqs *freqs)
42512478cf0SSrivatsa S. Bhat {
426ca654dc3SSrivatsa S. Bhat 
427ca654dc3SSrivatsa S. Bhat 	/*
428ca654dc3SSrivatsa S. Bhat 	 * Catch double invocations of _begin() which lead to self-deadlock.
429ca654dc3SSrivatsa S. Bhat 	 * ASYNC_NOTIFICATION drivers are left out because the cpufreq core
430ca654dc3SSrivatsa S. Bhat 	 * doesn't invoke _begin() on their behalf, and hence the chances of
431ca654dc3SSrivatsa S. Bhat 	 * double invocations are very low. Moreover, there are scenarios
432ca654dc3SSrivatsa S. Bhat 	 * where these checks can emit false-positive warnings in these
433ca654dc3SSrivatsa S. Bhat 	 * drivers; so we avoid that by skipping them altogether.
434ca654dc3SSrivatsa S. Bhat 	 */
435ca654dc3SSrivatsa S. Bhat 	WARN_ON(!(cpufreq_driver->flags & CPUFREQ_ASYNC_NOTIFICATION)
436ca654dc3SSrivatsa S. Bhat 				&& current == policy->transition_task);
437ca654dc3SSrivatsa S. Bhat 
43812478cf0SSrivatsa S. Bhat wait:
43912478cf0SSrivatsa S. Bhat 	wait_event(policy->transition_wait, !policy->transition_ongoing);
44012478cf0SSrivatsa S. Bhat 
44112478cf0SSrivatsa S. Bhat 	spin_lock(&policy->transition_lock);
44212478cf0SSrivatsa S. Bhat 
44312478cf0SSrivatsa S. Bhat 	if (unlikely(policy->transition_ongoing)) {
44412478cf0SSrivatsa S. Bhat 		spin_unlock(&policy->transition_lock);
44512478cf0SSrivatsa S. Bhat 		goto wait;
44612478cf0SSrivatsa S. Bhat 	}
44712478cf0SSrivatsa S. Bhat 
44812478cf0SSrivatsa S. Bhat 	policy->transition_ongoing = true;
449ca654dc3SSrivatsa S. Bhat 	policy->transition_task = current;
45012478cf0SSrivatsa S. Bhat 
45112478cf0SSrivatsa S. Bhat 	spin_unlock(&policy->transition_lock);
45212478cf0SSrivatsa S. Bhat 
45312478cf0SSrivatsa S. Bhat 	cpufreq_notify_transition(policy, freqs, CPUFREQ_PRECHANGE);
45412478cf0SSrivatsa S. Bhat }
45512478cf0SSrivatsa S. Bhat EXPORT_SYMBOL_GPL(cpufreq_freq_transition_begin);
45612478cf0SSrivatsa S. Bhat 
45712478cf0SSrivatsa S. Bhat void cpufreq_freq_transition_end(struct cpufreq_policy *policy,
45812478cf0SSrivatsa S. Bhat 		struct cpufreq_freqs *freqs, int transition_failed)
45912478cf0SSrivatsa S. Bhat {
46012478cf0SSrivatsa S. Bhat 	if (unlikely(WARN_ON(!policy->transition_ongoing)))
46112478cf0SSrivatsa S. Bhat 		return;
46212478cf0SSrivatsa S. Bhat 
46312478cf0SSrivatsa S. Bhat 	cpufreq_notify_post_transition(policy, freqs, transition_failed);
46412478cf0SSrivatsa S. Bhat 
46512478cf0SSrivatsa S. Bhat 	policy->transition_ongoing = false;
466ca654dc3SSrivatsa S. Bhat 	policy->transition_task = NULL;
46712478cf0SSrivatsa S. Bhat 
46812478cf0SSrivatsa S. Bhat 	wake_up(&policy->transition_wait);
46912478cf0SSrivatsa S. Bhat }
47012478cf0SSrivatsa S. Bhat EXPORT_SYMBOL_GPL(cpufreq_freq_transition_end);
47112478cf0SSrivatsa S. Bhat 
4721da177e4SLinus Torvalds 
4731da177e4SLinus Torvalds /*********************************************************************
4741da177e4SLinus Torvalds  *                          SYSFS INTERFACE                          *
4751da177e4SLinus Torvalds  *********************************************************************/
4768a5c74a1SRashika Kheria static ssize_t show_boost(struct kobject *kobj,
4776f19efc0SLukasz Majewski 				 struct attribute *attr, char *buf)
4786f19efc0SLukasz Majewski {
4796f19efc0SLukasz Majewski 	return sprintf(buf, "%d\n", cpufreq_driver->boost_enabled);
4806f19efc0SLukasz Majewski }
4816f19efc0SLukasz Majewski 
4826f19efc0SLukasz Majewski static ssize_t store_boost(struct kobject *kobj, struct attribute *attr,
4836f19efc0SLukasz Majewski 				  const char *buf, size_t count)
4846f19efc0SLukasz Majewski {
4856f19efc0SLukasz Majewski 	int ret, enable;
4866f19efc0SLukasz Majewski 
4876f19efc0SLukasz Majewski 	ret = sscanf(buf, "%d", &enable);
4886f19efc0SLukasz Majewski 	if (ret != 1 || enable < 0 || enable > 1)
4896f19efc0SLukasz Majewski 		return -EINVAL;
4906f19efc0SLukasz Majewski 
4916f19efc0SLukasz Majewski 	if (cpufreq_boost_trigger_state(enable)) {
492e837f9b5SJoe Perches 		pr_err("%s: Cannot %s BOOST!\n",
493e837f9b5SJoe Perches 		       __func__, enable ? "enable" : "disable");
4946f19efc0SLukasz Majewski 		return -EINVAL;
4956f19efc0SLukasz Majewski 	}
4966f19efc0SLukasz Majewski 
497e837f9b5SJoe Perches 	pr_debug("%s: cpufreq BOOST %s\n",
498e837f9b5SJoe Perches 		 __func__, enable ? "enabled" : "disabled");
4996f19efc0SLukasz Majewski 
5006f19efc0SLukasz Majewski 	return count;
5016f19efc0SLukasz Majewski }
5026f19efc0SLukasz Majewski define_one_global_rw(boost);
5031da177e4SLinus Torvalds 
50442f91fa1SViresh Kumar static struct cpufreq_governor *find_governor(const char *str_governor)
5053bcb09a3SJeremy Fitzhardinge {
5063bcb09a3SJeremy Fitzhardinge 	struct cpufreq_governor *t;
5073bcb09a3SJeremy Fitzhardinge 
508f7b27061SViresh Kumar 	for_each_governor(t)
5097c4f4539SRasmus Villemoes 		if (!strncasecmp(str_governor, t->name, CPUFREQ_NAME_LEN))
5103bcb09a3SJeremy Fitzhardinge 			return t;
5113bcb09a3SJeremy Fitzhardinge 
5123bcb09a3SJeremy Fitzhardinge 	return NULL;
5133bcb09a3SJeremy Fitzhardinge }
5143bcb09a3SJeremy Fitzhardinge 
5151da177e4SLinus Torvalds /**
5161da177e4SLinus Torvalds  * cpufreq_parse_governor - parse a governor string
5171da177e4SLinus Torvalds  */
5181da177e4SLinus Torvalds static int cpufreq_parse_governor(char *str_governor, unsigned int *policy,
5191da177e4SLinus Torvalds 				struct cpufreq_governor **governor)
5201da177e4SLinus Torvalds {
5213bcb09a3SJeremy Fitzhardinge 	int err = -EINVAL;
5223bcb09a3SJeremy Fitzhardinge 
5231c3d85ddSRafael J. Wysocki 	if (!cpufreq_driver)
5243bcb09a3SJeremy Fitzhardinge 		goto out;
5253bcb09a3SJeremy Fitzhardinge 
5261c3d85ddSRafael J. Wysocki 	if (cpufreq_driver->setpolicy) {
5277c4f4539SRasmus Villemoes 		if (!strncasecmp(str_governor, "performance", CPUFREQ_NAME_LEN)) {
5281da177e4SLinus Torvalds 			*policy = CPUFREQ_POLICY_PERFORMANCE;
5293bcb09a3SJeremy Fitzhardinge 			err = 0;
5307c4f4539SRasmus Villemoes 		} else if (!strncasecmp(str_governor, "powersave",
531e08f5f5bSGautham R Shenoy 						CPUFREQ_NAME_LEN)) {
5321da177e4SLinus Torvalds 			*policy = CPUFREQ_POLICY_POWERSAVE;
5333bcb09a3SJeremy Fitzhardinge 			err = 0;
5341da177e4SLinus Torvalds 		}
5352e1cc3a5SViresh Kumar 	} else {
5361da177e4SLinus Torvalds 		struct cpufreq_governor *t;
5373bcb09a3SJeremy Fitzhardinge 
5383fc54d37Sakpm@osdl.org 		mutex_lock(&cpufreq_governor_mutex);
5393bcb09a3SJeremy Fitzhardinge 
54042f91fa1SViresh Kumar 		t = find_governor(str_governor);
5413bcb09a3SJeremy Fitzhardinge 
542ea714970SJeremy Fitzhardinge 		if (t == NULL) {
543ea714970SJeremy Fitzhardinge 			int ret;
544ea714970SJeremy Fitzhardinge 
545ea714970SJeremy Fitzhardinge 			mutex_unlock(&cpufreq_governor_mutex);
5461a8e1463SKees Cook 			ret = request_module("cpufreq_%s", str_governor);
547ea714970SJeremy Fitzhardinge 			mutex_lock(&cpufreq_governor_mutex);
548ea714970SJeremy Fitzhardinge 
549ea714970SJeremy Fitzhardinge 			if (ret == 0)
55042f91fa1SViresh Kumar 				t = find_governor(str_governor);
551ea714970SJeremy Fitzhardinge 		}
552ea714970SJeremy Fitzhardinge 
5533bcb09a3SJeremy Fitzhardinge 		if (t != NULL) {
5541da177e4SLinus Torvalds 			*governor = t;
5553bcb09a3SJeremy Fitzhardinge 			err = 0;
5561da177e4SLinus Torvalds 		}
5573bcb09a3SJeremy Fitzhardinge 
5583bcb09a3SJeremy Fitzhardinge 		mutex_unlock(&cpufreq_governor_mutex);
5591da177e4SLinus Torvalds 	}
5601da177e4SLinus Torvalds out:
5613bcb09a3SJeremy Fitzhardinge 	return err;
5621da177e4SLinus Torvalds }
5631da177e4SLinus Torvalds 
5641da177e4SLinus Torvalds /**
565e08f5f5bSGautham R Shenoy  * cpufreq_per_cpu_attr_read() / show_##file_name() -
566e08f5f5bSGautham R Shenoy  * print out cpufreq information
5671da177e4SLinus Torvalds  *
5681da177e4SLinus Torvalds  * Write out information from cpufreq_driver->policy[cpu]; object must be
5691da177e4SLinus Torvalds  * "unsigned int".
5701da177e4SLinus Torvalds  */
5711da177e4SLinus Torvalds 
5721da177e4SLinus Torvalds #define show_one(file_name, object)			\
5731da177e4SLinus Torvalds static ssize_t show_##file_name				\
5741da177e4SLinus Torvalds (struct cpufreq_policy *policy, char *buf)		\
5751da177e4SLinus Torvalds {							\
5761da177e4SLinus Torvalds 	return sprintf(buf, "%u\n", policy->object);	\
5771da177e4SLinus Torvalds }
5781da177e4SLinus Torvalds 
5791da177e4SLinus Torvalds show_one(cpuinfo_min_freq, cpuinfo.min_freq);
5801da177e4SLinus Torvalds show_one(cpuinfo_max_freq, cpuinfo.max_freq);
581ed129784SThomas Renninger show_one(cpuinfo_transition_latency, cpuinfo.transition_latency);
5821da177e4SLinus Torvalds show_one(scaling_min_freq, min);
5831da177e4SLinus Torvalds show_one(scaling_max_freq, max);
584c034b02eSDirk Brandewie 
58509347b29SViresh Kumar static ssize_t show_scaling_cur_freq(struct cpufreq_policy *policy, char *buf)
586c034b02eSDirk Brandewie {
587c034b02eSDirk Brandewie 	ssize_t ret;
588c034b02eSDirk Brandewie 
589c034b02eSDirk Brandewie 	if (cpufreq_driver && cpufreq_driver->setpolicy && cpufreq_driver->get)
590c034b02eSDirk Brandewie 		ret = sprintf(buf, "%u\n", cpufreq_driver->get(policy->cpu));
591c034b02eSDirk Brandewie 	else
592c034b02eSDirk Brandewie 		ret = sprintf(buf, "%u\n", policy->cur);
593c034b02eSDirk Brandewie 	return ret;
594c034b02eSDirk Brandewie }
5951da177e4SLinus Torvalds 
596037ce839SViresh Kumar static int cpufreq_set_policy(struct cpufreq_policy *policy,
5973a3e9e06SViresh Kumar 				struct cpufreq_policy *new_policy);
5987970e08bSThomas Renninger 
5991da177e4SLinus Torvalds /**
6001da177e4SLinus Torvalds  * cpufreq_per_cpu_attr_write() / store_##file_name() - sysfs write access
6011da177e4SLinus Torvalds  */
6021da177e4SLinus Torvalds #define store_one(file_name, object)			\
6031da177e4SLinus Torvalds static ssize_t store_##file_name					\
6041da177e4SLinus Torvalds (struct cpufreq_policy *policy, const char *buf, size_t count)		\
6051da177e4SLinus Torvalds {									\
606619c144cSVince Hsu 	int ret, temp;							\
6071da177e4SLinus Torvalds 	struct cpufreq_policy new_policy;				\
6081da177e4SLinus Torvalds 									\
6091da177e4SLinus Torvalds 	ret = cpufreq_get_policy(&new_policy, policy->cpu);		\
6101da177e4SLinus Torvalds 	if (ret)							\
6111da177e4SLinus Torvalds 		return -EINVAL;						\
6121da177e4SLinus Torvalds 									\
6131da177e4SLinus Torvalds 	ret = sscanf(buf, "%u", &new_policy.object);			\
6141da177e4SLinus Torvalds 	if (ret != 1)							\
6151da177e4SLinus Torvalds 		return -EINVAL;						\
6161da177e4SLinus Torvalds 									\
617619c144cSVince Hsu 	temp = new_policy.object;					\
618037ce839SViresh Kumar 	ret = cpufreq_set_policy(policy, &new_policy);		\
619619c144cSVince Hsu 	if (!ret)							\
620619c144cSVince Hsu 		policy->user_policy.object = temp;			\
6211da177e4SLinus Torvalds 									\
6221da177e4SLinus Torvalds 	return ret ? ret : count;					\
6231da177e4SLinus Torvalds }
6241da177e4SLinus Torvalds 
6251da177e4SLinus Torvalds store_one(scaling_min_freq, min);
6261da177e4SLinus Torvalds store_one(scaling_max_freq, max);
6271da177e4SLinus Torvalds 
6281da177e4SLinus Torvalds /**
6291da177e4SLinus Torvalds  * show_cpuinfo_cur_freq - current CPU frequency as detected by hardware
6301da177e4SLinus Torvalds  */
631e08f5f5bSGautham R Shenoy static ssize_t show_cpuinfo_cur_freq(struct cpufreq_policy *policy,
632e08f5f5bSGautham R Shenoy 					char *buf)
6331da177e4SLinus Torvalds {
634d92d50a4SViresh Kumar 	unsigned int cur_freq = __cpufreq_get(policy);
6351da177e4SLinus Torvalds 	if (!cur_freq)
6361da177e4SLinus Torvalds 		return sprintf(buf, "<unknown>");
6371da177e4SLinus Torvalds 	return sprintf(buf, "%u\n", cur_freq);
6381da177e4SLinus Torvalds }
6391da177e4SLinus Torvalds 
6401da177e4SLinus Torvalds /**
6411da177e4SLinus Torvalds  * show_scaling_governor - show the current policy for the specified CPU
6421da177e4SLinus Torvalds  */
643905d77cdSDave Jones static ssize_t show_scaling_governor(struct cpufreq_policy *policy, char *buf)
6441da177e4SLinus Torvalds {
6451da177e4SLinus Torvalds 	if (policy->policy == CPUFREQ_POLICY_POWERSAVE)
6461da177e4SLinus Torvalds 		return sprintf(buf, "powersave\n");
6471da177e4SLinus Torvalds 	else if (policy->policy == CPUFREQ_POLICY_PERFORMANCE)
6481da177e4SLinus Torvalds 		return sprintf(buf, "performance\n");
6491da177e4SLinus Torvalds 	else if (policy->governor)
6504b972f0bSviresh kumar 		return scnprintf(buf, CPUFREQ_NAME_PLEN, "%s\n",
65129464f28SDave Jones 				policy->governor->name);
6521da177e4SLinus Torvalds 	return -EINVAL;
6531da177e4SLinus Torvalds }
6541da177e4SLinus Torvalds 
6551da177e4SLinus Torvalds /**
6561da177e4SLinus Torvalds  * store_scaling_governor - store policy for the specified CPU
6571da177e4SLinus Torvalds  */
6581da177e4SLinus Torvalds static ssize_t store_scaling_governor(struct cpufreq_policy *policy,
6591da177e4SLinus Torvalds 					const char *buf, size_t count)
6601da177e4SLinus Torvalds {
6615136fa56SSrivatsa S. Bhat 	int ret;
6621da177e4SLinus Torvalds 	char	str_governor[16];
6631da177e4SLinus Torvalds 	struct cpufreq_policy new_policy;
6641da177e4SLinus Torvalds 
6651da177e4SLinus Torvalds 	ret = cpufreq_get_policy(&new_policy, policy->cpu);
6661da177e4SLinus Torvalds 	if (ret)
6671da177e4SLinus Torvalds 		return ret;
6681da177e4SLinus Torvalds 
6691da177e4SLinus Torvalds 	ret = sscanf(buf, "%15s", str_governor);
6701da177e4SLinus Torvalds 	if (ret != 1)
6711da177e4SLinus Torvalds 		return -EINVAL;
6721da177e4SLinus Torvalds 
673e08f5f5bSGautham R Shenoy 	if (cpufreq_parse_governor(str_governor, &new_policy.policy,
674e08f5f5bSGautham R Shenoy 						&new_policy.governor))
6751da177e4SLinus Torvalds 		return -EINVAL;
6761da177e4SLinus Torvalds 
677037ce839SViresh Kumar 	ret = cpufreq_set_policy(policy, &new_policy);
6787970e08bSThomas Renninger 
6797970e08bSThomas Renninger 	policy->user_policy.policy = policy->policy;
6807970e08bSThomas Renninger 	policy->user_policy.governor = policy->governor;
6817970e08bSThomas Renninger 
682e08f5f5bSGautham R Shenoy 	if (ret)
683e08f5f5bSGautham R Shenoy 		return ret;
684e08f5f5bSGautham R Shenoy 	else
685e08f5f5bSGautham R Shenoy 		return count;
6861da177e4SLinus Torvalds }
6871da177e4SLinus Torvalds 
6881da177e4SLinus Torvalds /**
6891da177e4SLinus Torvalds  * show_scaling_driver - show the cpufreq driver currently loaded
6901da177e4SLinus Torvalds  */
6911da177e4SLinus Torvalds static ssize_t show_scaling_driver(struct cpufreq_policy *policy, char *buf)
6921da177e4SLinus Torvalds {
6931c3d85ddSRafael J. Wysocki 	return scnprintf(buf, CPUFREQ_NAME_PLEN, "%s\n", cpufreq_driver->name);
6941da177e4SLinus Torvalds }
6951da177e4SLinus Torvalds 
6961da177e4SLinus Torvalds /**
6971da177e4SLinus Torvalds  * show_scaling_available_governors - show the available CPUfreq governors
6981da177e4SLinus Torvalds  */
6991da177e4SLinus Torvalds static ssize_t show_scaling_available_governors(struct cpufreq_policy *policy,
7001da177e4SLinus Torvalds 						char *buf)
7011da177e4SLinus Torvalds {
7021da177e4SLinus Torvalds 	ssize_t i = 0;
7031da177e4SLinus Torvalds 	struct cpufreq_governor *t;
7041da177e4SLinus Torvalds 
7059c0ebcf7SViresh Kumar 	if (!has_target()) {
7061da177e4SLinus Torvalds 		i += sprintf(buf, "performance powersave");
7071da177e4SLinus Torvalds 		goto out;
7081da177e4SLinus Torvalds 	}
7091da177e4SLinus Torvalds 
710f7b27061SViresh Kumar 	for_each_governor(t) {
71129464f28SDave Jones 		if (i >= (ssize_t) ((PAGE_SIZE / sizeof(char))
71229464f28SDave Jones 		    - (CPUFREQ_NAME_LEN + 2)))
7131da177e4SLinus Torvalds 			goto out;
7144b972f0bSviresh kumar 		i += scnprintf(&buf[i], CPUFREQ_NAME_PLEN, "%s ", t->name);
7151da177e4SLinus Torvalds 	}
7161da177e4SLinus Torvalds out:
7171da177e4SLinus Torvalds 	i += sprintf(&buf[i], "\n");
7181da177e4SLinus Torvalds 	return i;
7191da177e4SLinus Torvalds }
720e8628dd0SDarrick J. Wong 
721f4fd3797SLan Tianyu ssize_t cpufreq_show_cpus(const struct cpumask *mask, char *buf)
7221da177e4SLinus Torvalds {
7231da177e4SLinus Torvalds 	ssize_t i = 0;
7241da177e4SLinus Torvalds 	unsigned int cpu;
7251da177e4SLinus Torvalds 
726835481d9SRusty Russell 	for_each_cpu(cpu, mask) {
7271da177e4SLinus Torvalds 		if (i)
7281da177e4SLinus Torvalds 			i += scnprintf(&buf[i], (PAGE_SIZE - i - 2), " ");
7291da177e4SLinus Torvalds 		i += scnprintf(&buf[i], (PAGE_SIZE - i - 2), "%u", cpu);
7301da177e4SLinus Torvalds 		if (i >= (PAGE_SIZE - 5))
7311da177e4SLinus Torvalds 			break;
7321da177e4SLinus Torvalds 	}
7331da177e4SLinus Torvalds 	i += sprintf(&buf[i], "\n");
7341da177e4SLinus Torvalds 	return i;
7351da177e4SLinus Torvalds }
736f4fd3797SLan Tianyu EXPORT_SYMBOL_GPL(cpufreq_show_cpus);
7371da177e4SLinus Torvalds 
738e8628dd0SDarrick J. Wong /**
739e8628dd0SDarrick J. Wong  * show_related_cpus - show the CPUs affected by each transition even if
740e8628dd0SDarrick J. Wong  * hw coordination is in use
741e8628dd0SDarrick J. Wong  */
742e8628dd0SDarrick J. Wong static ssize_t show_related_cpus(struct cpufreq_policy *policy, char *buf)
743e8628dd0SDarrick J. Wong {
744f4fd3797SLan Tianyu 	return cpufreq_show_cpus(policy->related_cpus, buf);
745e8628dd0SDarrick J. Wong }
746e8628dd0SDarrick J. Wong 
747e8628dd0SDarrick J. Wong /**
748e8628dd0SDarrick J. Wong  * show_affected_cpus - show the CPUs affected by each transition
749e8628dd0SDarrick J. Wong  */
750e8628dd0SDarrick J. Wong static ssize_t show_affected_cpus(struct cpufreq_policy *policy, char *buf)
751e8628dd0SDarrick J. Wong {
752f4fd3797SLan Tianyu 	return cpufreq_show_cpus(policy->cpus, buf);
753e8628dd0SDarrick J. Wong }
754e8628dd0SDarrick J. Wong 
7559e76988eSVenki Pallipadi static ssize_t store_scaling_setspeed(struct cpufreq_policy *policy,
7569e76988eSVenki Pallipadi 					const char *buf, size_t count)
7579e76988eSVenki Pallipadi {
7589e76988eSVenki Pallipadi 	unsigned int freq = 0;
7599e76988eSVenki Pallipadi 	unsigned int ret;
7609e76988eSVenki Pallipadi 
761879000f9SCHIKAMA masaki 	if (!policy->governor || !policy->governor->store_setspeed)
7629e76988eSVenki Pallipadi 		return -EINVAL;
7639e76988eSVenki Pallipadi 
7649e76988eSVenki Pallipadi 	ret = sscanf(buf, "%u", &freq);
7659e76988eSVenki Pallipadi 	if (ret != 1)
7669e76988eSVenki Pallipadi 		return -EINVAL;
7679e76988eSVenki Pallipadi 
7689e76988eSVenki Pallipadi 	policy->governor->store_setspeed(policy, freq);
7699e76988eSVenki Pallipadi 
7709e76988eSVenki Pallipadi 	return count;
7719e76988eSVenki Pallipadi }
7729e76988eSVenki Pallipadi 
7739e76988eSVenki Pallipadi static ssize_t show_scaling_setspeed(struct cpufreq_policy *policy, char *buf)
7749e76988eSVenki Pallipadi {
775879000f9SCHIKAMA masaki 	if (!policy->governor || !policy->governor->show_setspeed)
7769e76988eSVenki Pallipadi 		return sprintf(buf, "<unsupported>\n");
7779e76988eSVenki Pallipadi 
7789e76988eSVenki Pallipadi 	return policy->governor->show_setspeed(policy, buf);
7799e76988eSVenki Pallipadi }
7801da177e4SLinus Torvalds 
781e2f74f35SThomas Renninger /**
7828bf1ac72Sviresh kumar  * show_bios_limit - show the current cpufreq HW/BIOS limitation
783e2f74f35SThomas Renninger  */
784e2f74f35SThomas Renninger static ssize_t show_bios_limit(struct cpufreq_policy *policy, char *buf)
785e2f74f35SThomas Renninger {
786e2f74f35SThomas Renninger 	unsigned int limit;
787e2f74f35SThomas Renninger 	int ret;
7881c3d85ddSRafael J. Wysocki 	if (cpufreq_driver->bios_limit) {
7891c3d85ddSRafael J. Wysocki 		ret = cpufreq_driver->bios_limit(policy->cpu, &limit);
790e2f74f35SThomas Renninger 		if (!ret)
791e2f74f35SThomas Renninger 			return sprintf(buf, "%u\n", limit);
792e2f74f35SThomas Renninger 	}
793e2f74f35SThomas Renninger 	return sprintf(buf, "%u\n", policy->cpuinfo.max_freq);
794e2f74f35SThomas Renninger }
795e2f74f35SThomas Renninger 
7966dad2a29SBorislav Petkov cpufreq_freq_attr_ro_perm(cpuinfo_cur_freq, 0400);
7976dad2a29SBorislav Petkov cpufreq_freq_attr_ro(cpuinfo_min_freq);
7986dad2a29SBorislav Petkov cpufreq_freq_attr_ro(cpuinfo_max_freq);
7996dad2a29SBorislav Petkov cpufreq_freq_attr_ro(cpuinfo_transition_latency);
8006dad2a29SBorislav Petkov cpufreq_freq_attr_ro(scaling_available_governors);
8016dad2a29SBorislav Petkov cpufreq_freq_attr_ro(scaling_driver);
8026dad2a29SBorislav Petkov cpufreq_freq_attr_ro(scaling_cur_freq);
8036dad2a29SBorislav Petkov cpufreq_freq_attr_ro(bios_limit);
8046dad2a29SBorislav Petkov cpufreq_freq_attr_ro(related_cpus);
8056dad2a29SBorislav Petkov cpufreq_freq_attr_ro(affected_cpus);
8066dad2a29SBorislav Petkov cpufreq_freq_attr_rw(scaling_min_freq);
8076dad2a29SBorislav Petkov cpufreq_freq_attr_rw(scaling_max_freq);
8086dad2a29SBorislav Petkov cpufreq_freq_attr_rw(scaling_governor);
8096dad2a29SBorislav Petkov cpufreq_freq_attr_rw(scaling_setspeed);
8101da177e4SLinus Torvalds 
8111da177e4SLinus Torvalds static struct attribute *default_attrs[] = {
8121da177e4SLinus Torvalds 	&cpuinfo_min_freq.attr,
8131da177e4SLinus Torvalds 	&cpuinfo_max_freq.attr,
814ed129784SThomas Renninger 	&cpuinfo_transition_latency.attr,
8151da177e4SLinus Torvalds 	&scaling_min_freq.attr,
8161da177e4SLinus Torvalds 	&scaling_max_freq.attr,
8171da177e4SLinus Torvalds 	&affected_cpus.attr,
818e8628dd0SDarrick J. Wong 	&related_cpus.attr,
8191da177e4SLinus Torvalds 	&scaling_governor.attr,
8201da177e4SLinus Torvalds 	&scaling_driver.attr,
8211da177e4SLinus Torvalds 	&scaling_available_governors.attr,
8229e76988eSVenki Pallipadi 	&scaling_setspeed.attr,
8231da177e4SLinus Torvalds 	NULL
8241da177e4SLinus Torvalds };
8251da177e4SLinus Torvalds 
8261da177e4SLinus Torvalds #define to_policy(k) container_of(k, struct cpufreq_policy, kobj)
8271da177e4SLinus Torvalds #define to_attr(a) container_of(a, struct freq_attr, attr)
8281da177e4SLinus Torvalds 
8291da177e4SLinus Torvalds static ssize_t show(struct kobject *kobj, struct attribute *attr, char *buf)
8301da177e4SLinus Torvalds {
8311da177e4SLinus Torvalds 	struct cpufreq_policy *policy = to_policy(kobj);
8321da177e4SLinus Torvalds 	struct freq_attr *fattr = to_attr(attr);
8331b750e3bSViresh Kumar 	ssize_t ret;
8346eed9404SViresh Kumar 
835ad7722daSviresh kumar 	down_read(&policy->rwsem);
8365a01f2e8SVenkatesh Pallipadi 
837e08f5f5bSGautham R Shenoy 	if (fattr->show)
838e08f5f5bSGautham R Shenoy 		ret = fattr->show(policy, buf);
839e08f5f5bSGautham R Shenoy 	else
840e08f5f5bSGautham R Shenoy 		ret = -EIO;
841e08f5f5bSGautham R Shenoy 
842ad7722daSviresh kumar 	up_read(&policy->rwsem);
8431b750e3bSViresh Kumar 
8441da177e4SLinus Torvalds 	return ret;
8451da177e4SLinus Torvalds }
8461da177e4SLinus Torvalds 
8471da177e4SLinus Torvalds static ssize_t store(struct kobject *kobj, struct attribute *attr,
8481da177e4SLinus Torvalds 		     const char *buf, size_t count)
8491da177e4SLinus Torvalds {
8501da177e4SLinus Torvalds 	struct cpufreq_policy *policy = to_policy(kobj);
8511da177e4SLinus Torvalds 	struct freq_attr *fattr = to_attr(attr);
852a07530b4SDave Jones 	ssize_t ret = -EINVAL;
8536eed9404SViresh Kumar 
8544f750c93SSrivatsa S. Bhat 	get_online_cpus();
8554f750c93SSrivatsa S. Bhat 
8564f750c93SSrivatsa S. Bhat 	if (!cpu_online(policy->cpu))
8574f750c93SSrivatsa S. Bhat 		goto unlock;
8584f750c93SSrivatsa S. Bhat 
859ad7722daSviresh kumar 	down_write(&policy->rwsem);
8605a01f2e8SVenkatesh Pallipadi 
86111e584cfSViresh Kumar 	/* Updating inactive policies is invalid, so avoid doing that. */
86211e584cfSViresh Kumar 	if (unlikely(policy_is_inactive(policy))) {
86311e584cfSViresh Kumar 		ret = -EBUSY;
86411e584cfSViresh Kumar 		goto unlock_policy_rwsem;
86511e584cfSViresh Kumar 	}
86611e584cfSViresh Kumar 
867e08f5f5bSGautham R Shenoy 	if (fattr->store)
868e08f5f5bSGautham R Shenoy 		ret = fattr->store(policy, buf, count);
869e08f5f5bSGautham R Shenoy 	else
870e08f5f5bSGautham R Shenoy 		ret = -EIO;
871e08f5f5bSGautham R Shenoy 
87211e584cfSViresh Kumar unlock_policy_rwsem:
873ad7722daSviresh kumar 	up_write(&policy->rwsem);
8744f750c93SSrivatsa S. Bhat unlock:
8754f750c93SSrivatsa S. Bhat 	put_online_cpus();
8764f750c93SSrivatsa S. Bhat 
8771da177e4SLinus Torvalds 	return ret;
8781da177e4SLinus Torvalds }
8791da177e4SLinus Torvalds 
8801da177e4SLinus Torvalds static void cpufreq_sysfs_release(struct kobject *kobj)
8811da177e4SLinus Torvalds {
8821da177e4SLinus Torvalds 	struct cpufreq_policy *policy = to_policy(kobj);
8832d06d8c4SDominik Brodowski 	pr_debug("last reference is dropped\n");
8841da177e4SLinus Torvalds 	complete(&policy->kobj_unregister);
8851da177e4SLinus Torvalds }
8861da177e4SLinus Torvalds 
88752cf25d0SEmese Revfy static const struct sysfs_ops sysfs_ops = {
8881da177e4SLinus Torvalds 	.show	= show,
8891da177e4SLinus Torvalds 	.store	= store,
8901da177e4SLinus Torvalds };
8911da177e4SLinus Torvalds 
8921da177e4SLinus Torvalds static struct kobj_type ktype_cpufreq = {
8931da177e4SLinus Torvalds 	.sysfs_ops	= &sysfs_ops,
8941da177e4SLinus Torvalds 	.default_attrs	= default_attrs,
8951da177e4SLinus Torvalds 	.release	= cpufreq_sysfs_release,
8961da177e4SLinus Torvalds };
8971da177e4SLinus Torvalds 
8982361be23SViresh Kumar struct kobject *cpufreq_global_kobject;
8992361be23SViresh Kumar EXPORT_SYMBOL(cpufreq_global_kobject);
9002361be23SViresh Kumar 
9012361be23SViresh Kumar static int cpufreq_global_kobject_usage;
9022361be23SViresh Kumar 
9032361be23SViresh Kumar int cpufreq_get_global_kobject(void)
9042361be23SViresh Kumar {
9052361be23SViresh Kumar 	if (!cpufreq_global_kobject_usage++)
9062361be23SViresh Kumar 		return kobject_add(cpufreq_global_kobject,
9072361be23SViresh Kumar 				&cpu_subsys.dev_root->kobj, "%s", "cpufreq");
9082361be23SViresh Kumar 
9092361be23SViresh Kumar 	return 0;
9102361be23SViresh Kumar }
9112361be23SViresh Kumar EXPORT_SYMBOL(cpufreq_get_global_kobject);
9122361be23SViresh Kumar 
9132361be23SViresh Kumar void cpufreq_put_global_kobject(void)
9142361be23SViresh Kumar {
9152361be23SViresh Kumar 	if (!--cpufreq_global_kobject_usage)
9162361be23SViresh Kumar 		kobject_del(cpufreq_global_kobject);
9172361be23SViresh Kumar }
9182361be23SViresh Kumar EXPORT_SYMBOL(cpufreq_put_global_kobject);
9192361be23SViresh Kumar 
9202361be23SViresh Kumar int cpufreq_sysfs_create_file(const struct attribute *attr)
9212361be23SViresh Kumar {
9222361be23SViresh Kumar 	int ret = cpufreq_get_global_kobject();
9232361be23SViresh Kumar 
9242361be23SViresh Kumar 	if (!ret) {
9252361be23SViresh Kumar 		ret = sysfs_create_file(cpufreq_global_kobject, attr);
9262361be23SViresh Kumar 		if (ret)
9272361be23SViresh Kumar 			cpufreq_put_global_kobject();
9282361be23SViresh Kumar 	}
9292361be23SViresh Kumar 
9302361be23SViresh Kumar 	return ret;
9312361be23SViresh Kumar }
9322361be23SViresh Kumar EXPORT_SYMBOL(cpufreq_sysfs_create_file);
9332361be23SViresh Kumar 
9342361be23SViresh Kumar void cpufreq_sysfs_remove_file(const struct attribute *attr)
9352361be23SViresh Kumar {
9362361be23SViresh Kumar 	sysfs_remove_file(cpufreq_global_kobject, attr);
9372361be23SViresh Kumar 	cpufreq_put_global_kobject();
9382361be23SViresh Kumar }
9392361be23SViresh Kumar EXPORT_SYMBOL(cpufreq_sysfs_remove_file);
9402361be23SViresh Kumar 
94187549141SViresh Kumar static int add_cpu_dev_symlink(struct cpufreq_policy *policy, int cpu)
94287549141SViresh Kumar {
94387549141SViresh Kumar 	struct device *cpu_dev;
94487549141SViresh Kumar 
94587549141SViresh Kumar 	pr_debug("%s: Adding symlink for CPU: %u\n", __func__, cpu);
94687549141SViresh Kumar 
94787549141SViresh Kumar 	if (!policy)
94887549141SViresh Kumar 		return 0;
94987549141SViresh Kumar 
95087549141SViresh Kumar 	cpu_dev = get_cpu_device(cpu);
95187549141SViresh Kumar 	if (WARN_ON(!cpu_dev))
95287549141SViresh Kumar 		return 0;
95387549141SViresh Kumar 
95487549141SViresh Kumar 	return sysfs_create_link(&cpu_dev->kobj, &policy->kobj, "cpufreq");
95587549141SViresh Kumar }
95687549141SViresh Kumar 
95787549141SViresh Kumar static void remove_cpu_dev_symlink(struct cpufreq_policy *policy, int cpu)
95887549141SViresh Kumar {
95987549141SViresh Kumar 	struct device *cpu_dev;
96087549141SViresh Kumar 
96187549141SViresh Kumar 	pr_debug("%s: Removing symlink for CPU: %u\n", __func__, cpu);
96287549141SViresh Kumar 
96387549141SViresh Kumar 	cpu_dev = get_cpu_device(cpu);
96487549141SViresh Kumar 	if (WARN_ON(!cpu_dev))
96587549141SViresh Kumar 		return;
96687549141SViresh Kumar 
96787549141SViresh Kumar 	sysfs_remove_link(&cpu_dev->kobj, "cpufreq");
96887549141SViresh Kumar }
96987549141SViresh Kumar 
97087549141SViresh Kumar /* Add/remove symlinks for all related CPUs */
971308b60e7SViresh Kumar static int cpufreq_add_dev_symlink(struct cpufreq_policy *policy)
97219d6f7ecSDave Jones {
97319d6f7ecSDave Jones 	unsigned int j;
97419d6f7ecSDave Jones 	int ret = 0;
97519d6f7ecSDave Jones 
97687549141SViresh Kumar 	/* Some related CPUs might not be present (physically hotplugged) */
977559ed407SRafael J. Wysocki 	for_each_cpu(j, policy->real_cpus) {
9789d16f207SSaravana Kannan 		if (j == policy->kobj_cpu)
97919d6f7ecSDave Jones 			continue;
98019d6f7ecSDave Jones 
98187549141SViresh Kumar 		ret = add_cpu_dev_symlink(policy, j);
98271c3461eSRafael J. Wysocki 		if (ret)
98371c3461eSRafael J. Wysocki 			break;
98419d6f7ecSDave Jones 	}
98587549141SViresh Kumar 
98619d6f7ecSDave Jones 	return ret;
98719d6f7ecSDave Jones }
98819d6f7ecSDave Jones 
98987549141SViresh Kumar static void cpufreq_remove_dev_symlink(struct cpufreq_policy *policy)
99087549141SViresh Kumar {
99187549141SViresh Kumar 	unsigned int j;
99287549141SViresh Kumar 
99387549141SViresh Kumar 	/* Some related CPUs might not be present (physically hotplugged) */
994559ed407SRafael J. Wysocki 	for_each_cpu(j, policy->real_cpus) {
99587549141SViresh Kumar 		if (j == policy->kobj_cpu)
99687549141SViresh Kumar 			continue;
99787549141SViresh Kumar 
99887549141SViresh Kumar 		remove_cpu_dev_symlink(policy, j);
99987549141SViresh Kumar 	}
100087549141SViresh Kumar }
100187549141SViresh Kumar 
1002d9612a49SRafael J. Wysocki static int cpufreq_add_dev_interface(struct cpufreq_policy *policy)
1003909a694eSDave Jones {
1004909a694eSDave Jones 	struct freq_attr **drv_attr;
1005909a694eSDave Jones 	int ret = 0;
1006909a694eSDave Jones 
1007909a694eSDave Jones 	/* set up files for this cpu device */
10081c3d85ddSRafael J. Wysocki 	drv_attr = cpufreq_driver->attr;
1009f13f1184SViresh Kumar 	while (drv_attr && *drv_attr) {
1010909a694eSDave Jones 		ret = sysfs_create_file(&policy->kobj, &((*drv_attr)->attr));
1011909a694eSDave Jones 		if (ret)
10126d4e81edSTomeu Vizoso 			return ret;
1013909a694eSDave Jones 		drv_attr++;
1014909a694eSDave Jones 	}
10151c3d85ddSRafael J. Wysocki 	if (cpufreq_driver->get) {
1016909a694eSDave Jones 		ret = sysfs_create_file(&policy->kobj, &cpuinfo_cur_freq.attr);
1017909a694eSDave Jones 		if (ret)
10186d4e81edSTomeu Vizoso 			return ret;
1019909a694eSDave Jones 	}
1020c034b02eSDirk Brandewie 
1021909a694eSDave Jones 	ret = sysfs_create_file(&policy->kobj, &scaling_cur_freq.attr);
1022909a694eSDave Jones 	if (ret)
10236d4e81edSTomeu Vizoso 		return ret;
1024c034b02eSDirk Brandewie 
10251c3d85ddSRafael J. Wysocki 	if (cpufreq_driver->bios_limit) {
1026e2f74f35SThomas Renninger 		ret = sysfs_create_file(&policy->kobj, &bios_limit.attr);
1027e2f74f35SThomas Renninger 		if (ret)
10286d4e81edSTomeu Vizoso 			return ret;
1029e2f74f35SThomas Renninger 	}
1030909a694eSDave Jones 
10316d4e81edSTomeu Vizoso 	return cpufreq_add_dev_symlink(policy);
1032e18f1682SSrivatsa S. Bhat }
1033e18f1682SSrivatsa S. Bhat 
10347f0fa40fSViresh Kumar static int cpufreq_init_policy(struct cpufreq_policy *policy)
1035e18f1682SSrivatsa S. Bhat {
10366e2c89d1Sviresh kumar 	struct cpufreq_governor *gov = NULL;
1037e18f1682SSrivatsa S. Bhat 	struct cpufreq_policy new_policy;
1038e18f1682SSrivatsa S. Bhat 
1039d5b73cd8SViresh Kumar 	memcpy(&new_policy, policy, sizeof(*policy));
1040a27a9ab7SJason Baron 
10416e2c89d1Sviresh kumar 	/* Update governor of new_policy to the governor used before hotplug */
10424573237bSViresh Kumar 	gov = find_governor(policy->last_governor);
10436e2c89d1Sviresh kumar 	if (gov)
10446e2c89d1Sviresh kumar 		pr_debug("Restoring governor %s for cpu %d\n",
10456e2c89d1Sviresh kumar 				policy->governor->name, policy->cpu);
10466e2c89d1Sviresh kumar 	else
10476e2c89d1Sviresh kumar 		gov = CPUFREQ_DEFAULT_GOVERNOR;
10486e2c89d1Sviresh kumar 
10496e2c89d1Sviresh kumar 	new_policy.governor = gov;
10506e2c89d1Sviresh kumar 
1051a27a9ab7SJason Baron 	/* Use the default policy if its valid. */
1052a27a9ab7SJason Baron 	if (cpufreq_driver->setpolicy)
10536e2c89d1Sviresh kumar 		cpufreq_parse_governor(gov->name, &new_policy.policy, NULL);
1054ecf7e461SDave Jones 
1055ecf7e461SDave Jones 	/* set default policy */
10567f0fa40fSViresh Kumar 	return cpufreq_set_policy(policy, &new_policy);
1057909a694eSDave Jones }
1058909a694eSDave Jones 
1059d9612a49SRafael J. Wysocki static int cpufreq_add_policy_cpu(struct cpufreq_policy *policy, unsigned int cpu)
1060fcf80582SViresh Kumar {
10619c0ebcf7SViresh Kumar 	int ret = 0;
1062fcf80582SViresh Kumar 
1063bb29ae15SViresh Kumar 	/* Has this CPU been taken care of already? */
1064bb29ae15SViresh Kumar 	if (cpumask_test_cpu(cpu, policy->cpus))
1065bb29ae15SViresh Kumar 		return 0;
1066bb29ae15SViresh Kumar 
10679c0ebcf7SViresh Kumar 	if (has_target()) {
10683de9bdebSViresh Kumar 		ret = __cpufreq_governor(policy, CPUFREQ_GOV_STOP);
10693de9bdebSViresh Kumar 		if (ret) {
10703de9bdebSViresh Kumar 			pr_err("%s: Failed to stop governor\n", __func__);
10713de9bdebSViresh Kumar 			return ret;
10723de9bdebSViresh Kumar 		}
10733de9bdebSViresh Kumar 	}
1074fcf80582SViresh Kumar 
1075ad7722daSviresh kumar 	down_write(&policy->rwsem);
1076fcf80582SViresh Kumar 	cpumask_set_cpu(cpu, policy->cpus);
1077ad7722daSviresh kumar 	up_write(&policy->rwsem);
10782eaa3e2dSViresh Kumar 
10799c0ebcf7SViresh Kumar 	if (has_target()) {
1080e5c87b76SStratos Karafotis 		ret = __cpufreq_governor(policy, CPUFREQ_GOV_START);
1081e5c87b76SStratos Karafotis 		if (!ret)
1082e5c87b76SStratos Karafotis 			ret = __cpufreq_governor(policy, CPUFREQ_GOV_LIMITS);
1083e5c87b76SStratos Karafotis 
1084e5c87b76SStratos Karafotis 		if (ret) {
10853de9bdebSViresh Kumar 			pr_err("%s: Failed to start governor\n", __func__);
10863de9bdebSViresh Kumar 			return ret;
10873de9bdebSViresh Kumar 		}
1088820c6ca2SViresh Kumar 	}
1089fcf80582SViresh Kumar 
109087549141SViresh Kumar 	return 0;
1091fcf80582SViresh Kumar }
10921da177e4SLinus Torvalds 
1093a34e63b1SRafael J. Wysocki static struct cpufreq_policy *cpufreq_policy_alloc(unsigned int cpu)
1094e9698cc5SSrivatsa S. Bhat {
1095a34e63b1SRafael J. Wysocki 	struct device *dev = get_cpu_device(cpu);
1096e9698cc5SSrivatsa S. Bhat 	struct cpufreq_policy *policy;
10972fc3384dSViresh Kumar 	int ret;
1098e9698cc5SSrivatsa S. Bhat 
1099a34e63b1SRafael J. Wysocki 	if (WARN_ON(!dev))
1100a34e63b1SRafael J. Wysocki 		return NULL;
1101a34e63b1SRafael J. Wysocki 
1102e9698cc5SSrivatsa S. Bhat 	policy = kzalloc(sizeof(*policy), GFP_KERNEL);
1103e9698cc5SSrivatsa S. Bhat 	if (!policy)
1104e9698cc5SSrivatsa S. Bhat 		return NULL;
1105e9698cc5SSrivatsa S. Bhat 
1106e9698cc5SSrivatsa S. Bhat 	if (!alloc_cpumask_var(&policy->cpus, GFP_KERNEL))
1107e9698cc5SSrivatsa S. Bhat 		goto err_free_policy;
1108e9698cc5SSrivatsa S. Bhat 
1109e9698cc5SSrivatsa S. Bhat 	if (!zalloc_cpumask_var(&policy->related_cpus, GFP_KERNEL))
1110e9698cc5SSrivatsa S. Bhat 		goto err_free_cpumask;
1111e9698cc5SSrivatsa S. Bhat 
1112559ed407SRafael J. Wysocki 	if (!zalloc_cpumask_var(&policy->real_cpus, GFP_KERNEL))
1113559ed407SRafael J. Wysocki 		goto err_free_rcpumask;
1114559ed407SRafael J. Wysocki 
11152fc3384dSViresh Kumar 	ret = kobject_init_and_add(&policy->kobj, &ktype_cpufreq, &dev->kobj,
11162fc3384dSViresh Kumar 				   "cpufreq");
11172fc3384dSViresh Kumar 	if (ret) {
11182fc3384dSViresh Kumar 		pr_err("%s: failed to init policy->kobj: %d\n", __func__, ret);
1119559ed407SRafael J. Wysocki 		goto err_free_real_cpus;
11202fc3384dSViresh Kumar 	}
11212fc3384dSViresh Kumar 
1122c88a1f8bSLukasz Majewski 	INIT_LIST_HEAD(&policy->policy_list);
1123ad7722daSviresh kumar 	init_rwsem(&policy->rwsem);
112412478cf0SSrivatsa S. Bhat 	spin_lock_init(&policy->transition_lock);
112512478cf0SSrivatsa S. Bhat 	init_waitqueue_head(&policy->transition_wait);
1126818c5712SViresh Kumar 	init_completion(&policy->kobj_unregister);
1127818c5712SViresh Kumar 	INIT_WORK(&policy->update, handle_update);
1128ad7722daSviresh kumar 
1129a34e63b1SRafael J. Wysocki 	policy->cpu = cpu;
113087549141SViresh Kumar 
113187549141SViresh Kumar 	/* Set this once on allocation */
1132a34e63b1SRafael J. Wysocki 	policy->kobj_cpu = cpu;
113387549141SViresh Kumar 
1134e9698cc5SSrivatsa S. Bhat 	return policy;
1135e9698cc5SSrivatsa S. Bhat 
1136559ed407SRafael J. Wysocki err_free_real_cpus:
1137559ed407SRafael J. Wysocki 	free_cpumask_var(policy->real_cpus);
11382fc3384dSViresh Kumar err_free_rcpumask:
11392fc3384dSViresh Kumar 	free_cpumask_var(policy->related_cpus);
1140e9698cc5SSrivatsa S. Bhat err_free_cpumask:
1141e9698cc5SSrivatsa S. Bhat 	free_cpumask_var(policy->cpus);
1142e9698cc5SSrivatsa S. Bhat err_free_policy:
1143e9698cc5SSrivatsa S. Bhat 	kfree(policy);
1144e9698cc5SSrivatsa S. Bhat 
1145e9698cc5SSrivatsa S. Bhat 	return NULL;
1146e9698cc5SSrivatsa S. Bhat }
1147e9698cc5SSrivatsa S. Bhat 
11482fc3384dSViresh Kumar static void cpufreq_policy_put_kobj(struct cpufreq_policy *policy, bool notify)
114942f921a6SViresh Kumar {
115042f921a6SViresh Kumar 	struct kobject *kobj;
115142f921a6SViresh Kumar 	struct completion *cmp;
115242f921a6SViresh Kumar 
11532fc3384dSViresh Kumar 	if (notify)
1154fcd7af91SViresh Kumar 		blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
1155fcd7af91SViresh Kumar 					     CPUFREQ_REMOVE_POLICY, policy);
1156fcd7af91SViresh Kumar 
115787549141SViresh Kumar 	down_write(&policy->rwsem);
115887549141SViresh Kumar 	cpufreq_remove_dev_symlink(policy);
115942f921a6SViresh Kumar 	kobj = &policy->kobj;
116042f921a6SViresh Kumar 	cmp = &policy->kobj_unregister;
116187549141SViresh Kumar 	up_write(&policy->rwsem);
116242f921a6SViresh Kumar 	kobject_put(kobj);
116342f921a6SViresh Kumar 
116442f921a6SViresh Kumar 	/*
116542f921a6SViresh Kumar 	 * We need to make sure that the underlying kobj is
116642f921a6SViresh Kumar 	 * actually not referenced anymore by anybody before we
116742f921a6SViresh Kumar 	 * proceed with unloading.
116842f921a6SViresh Kumar 	 */
116942f921a6SViresh Kumar 	pr_debug("waiting for dropping of refcount\n");
117042f921a6SViresh Kumar 	wait_for_completion(cmp);
117142f921a6SViresh Kumar 	pr_debug("wait complete\n");
117242f921a6SViresh Kumar }
117342f921a6SViresh Kumar 
11743654c5ccSViresh Kumar static void cpufreq_policy_free(struct cpufreq_policy *policy, bool notify)
1175e9698cc5SSrivatsa S. Bhat {
1176988bed09SViresh Kumar 	unsigned long flags;
1177988bed09SViresh Kumar 	int cpu;
1178988bed09SViresh Kumar 
1179988bed09SViresh Kumar 	/* Remove policy from list */
1180988bed09SViresh Kumar 	write_lock_irqsave(&cpufreq_driver_lock, flags);
1181988bed09SViresh Kumar 	list_del(&policy->policy_list);
1182988bed09SViresh Kumar 
1183988bed09SViresh Kumar 	for_each_cpu(cpu, policy->related_cpus)
1184988bed09SViresh Kumar 		per_cpu(cpufreq_cpu_data, cpu) = NULL;
1185988bed09SViresh Kumar 	write_unlock_irqrestore(&cpufreq_driver_lock, flags);
1186988bed09SViresh Kumar 
11873654c5ccSViresh Kumar 	cpufreq_policy_put_kobj(policy, notify);
1188559ed407SRafael J. Wysocki 	free_cpumask_var(policy->real_cpus);
1189e9698cc5SSrivatsa S. Bhat 	free_cpumask_var(policy->related_cpus);
1190e9698cc5SSrivatsa S. Bhat 	free_cpumask_var(policy->cpus);
1191e9698cc5SSrivatsa S. Bhat 	kfree(policy);
1192e9698cc5SSrivatsa S. Bhat }
1193e9698cc5SSrivatsa S. Bhat 
1194*0b275352SRafael J. Wysocki static int cpufreq_online(unsigned int cpu)
11951da177e4SLinus Torvalds {
11967f0c020aSViresh Kumar 	struct cpufreq_policy *policy;
119711ce707eSRafael J. Wysocki 	bool recover_policy;
1198*0b275352SRafael J. Wysocki 	unsigned long flags;
1199*0b275352SRafael J. Wysocki 	unsigned int j;
1200*0b275352SRafael J. Wysocki 	int ret;
1201c32b6b8eSAshok Raj 
1202*0b275352SRafael J. Wysocki 	pr_debug("%s: bringing CPU%u online\n", __func__, cpu);
120387549141SViresh Kumar 
1204bb29ae15SViresh Kumar 	/* Check if this CPU already has a policy to manage it */
12059104bb26SViresh Kumar 	policy = per_cpu(cpufreq_cpu_data, cpu);
120611ce707eSRafael J. Wysocki 	if (policy) {
12079104bb26SViresh Kumar 		WARN_ON(!cpumask_test_cpu(cpu, policy->related_cpus));
120811ce707eSRafael J. Wysocki 		if (!policy_is_inactive(policy))
1209d9612a49SRafael J. Wysocki 			return cpufreq_add_policy_cpu(policy, cpu);
12101da177e4SLinus Torvalds 
121111ce707eSRafael J. Wysocki 		/* This is the only online CPU for the policy.  Start over. */
121211ce707eSRafael J. Wysocki 		recover_policy = true;
121311ce707eSRafael J. Wysocki 		down_write(&policy->rwsem);
121411ce707eSRafael J. Wysocki 		policy->cpu = cpu;
121511ce707eSRafael J. Wysocki 		policy->governor = NULL;
121611ce707eSRafael J. Wysocki 		up_write(&policy->rwsem);
121711ce707eSRafael J. Wysocki 	} else {
121896bbbe4aSViresh Kumar 		recover_policy = false;
1219a34e63b1SRafael J. Wysocki 		policy = cpufreq_policy_alloc(cpu);
1220059019a3SDave Jones 		if (!policy)
1221d4d854d6SRafael J. Wysocki 			return -ENOMEM;
122272368d12SRafael J. Wysocki 	}
12230d66b91eSSrivatsa S. Bhat 
1224835481d9SRusty Russell 	cpumask_copy(policy->cpus, cpumask_of(cpu));
12251da177e4SLinus Torvalds 
12261da177e4SLinus Torvalds 	/* call driver. From then on the cpufreq must be able
12271da177e4SLinus Torvalds 	 * to accept all calls to ->verify and ->setpolicy for this CPU
12281da177e4SLinus Torvalds 	 */
12291c3d85ddSRafael J. Wysocki 	ret = cpufreq_driver->init(policy);
12301da177e4SLinus Torvalds 	if (ret) {
12312d06d8c4SDominik Brodowski 		pr_debug("initialization failed\n");
12328101f997SViresh Kumar 		goto out_free_policy;
12331da177e4SLinus Torvalds 	}
1234643ae6e8SViresh Kumar 
12356d4e81edSTomeu Vizoso 	down_write(&policy->rwsem);
12366d4e81edSTomeu Vizoso 
12374d1f3a5bSRafael J. Wysocki 	if (!recover_policy) {
12384d1f3a5bSRafael J. Wysocki 		/* related_cpus should at least include policy->cpus. */
12395a7e56a5SViresh Kumar 		cpumask_or(policy->related_cpus, policy->related_cpus, policy->cpus);
12404d1f3a5bSRafael J. Wysocki 		/* Remember CPUs present at the policy creation time. */
1241559ed407SRafael J. Wysocki 		cpumask_and(policy->real_cpus, policy->cpus, cpu_present_mask);
12424d1f3a5bSRafael J. Wysocki 	}
1243559ed407SRafael J. Wysocki 
12445a7e56a5SViresh Kumar 	/*
12455a7e56a5SViresh Kumar 	 * affected cpus must always be the one, which are online. We aren't
12465a7e56a5SViresh Kumar 	 * managing offline cpus here.
12475a7e56a5SViresh Kumar 	 */
12485a7e56a5SViresh Kumar 	cpumask_and(policy->cpus, policy->cpus, cpu_online_mask);
12495a7e56a5SViresh Kumar 
125096bbbe4aSViresh Kumar 	if (!recover_policy) {
12515a7e56a5SViresh Kumar 		policy->user_policy.min = policy->min;
12525a7e56a5SViresh Kumar 		policy->user_policy.max = policy->max;
12536d4e81edSTomeu Vizoso 
1254652ed95dSViresh Kumar 		write_lock_irqsave(&cpufreq_driver_lock, flags);
1255988bed09SViresh Kumar 		for_each_cpu(j, policy->related_cpus)
1256652ed95dSViresh Kumar 			per_cpu(cpufreq_cpu_data, j) = policy;
1257652ed95dSViresh Kumar 		write_unlock_irqrestore(&cpufreq_driver_lock, flags);
1258988bed09SViresh Kumar 	}
1259652ed95dSViresh Kumar 
12602ed99e39SRafael J. Wysocki 	if (cpufreq_driver->get && !cpufreq_driver->setpolicy) {
1261da60ce9fSViresh Kumar 		policy->cur = cpufreq_driver->get(policy->cpu);
1262da60ce9fSViresh Kumar 		if (!policy->cur) {
1263da60ce9fSViresh Kumar 			pr_err("%s: ->get() failed\n", __func__);
12648101f997SViresh Kumar 			goto out_exit_policy;
1265da60ce9fSViresh Kumar 		}
1266da60ce9fSViresh Kumar 	}
1267da60ce9fSViresh Kumar 
1268d3916691SViresh Kumar 	/*
1269d3916691SViresh Kumar 	 * Sometimes boot loaders set CPU frequency to a value outside of
1270d3916691SViresh Kumar 	 * frequency table present with cpufreq core. In such cases CPU might be
1271d3916691SViresh Kumar 	 * unstable if it has to run on that frequency for long duration of time
1272d3916691SViresh Kumar 	 * and so its better to set it to a frequency which is specified in
1273d3916691SViresh Kumar 	 * freq-table. This also makes cpufreq stats inconsistent as
1274d3916691SViresh Kumar 	 * cpufreq-stats would fail to register because current frequency of CPU
1275d3916691SViresh Kumar 	 * isn't found in freq-table.
1276d3916691SViresh Kumar 	 *
1277d3916691SViresh Kumar 	 * Because we don't want this change to effect boot process badly, we go
1278d3916691SViresh Kumar 	 * for the next freq which is >= policy->cur ('cur' must be set by now,
1279d3916691SViresh Kumar 	 * otherwise we will end up setting freq to lowest of the table as 'cur'
1280d3916691SViresh Kumar 	 * is initialized to zero).
1281d3916691SViresh Kumar 	 *
1282d3916691SViresh Kumar 	 * We are passing target-freq as "policy->cur - 1" otherwise
1283d3916691SViresh Kumar 	 * __cpufreq_driver_target() would simply fail, as policy->cur will be
1284d3916691SViresh Kumar 	 * equal to target-freq.
1285d3916691SViresh Kumar 	 */
1286d3916691SViresh Kumar 	if ((cpufreq_driver->flags & CPUFREQ_NEED_INITIAL_FREQ_CHECK)
1287d3916691SViresh Kumar 	    && has_target()) {
1288d3916691SViresh Kumar 		/* Are we running at unknown frequency ? */
1289d3916691SViresh Kumar 		ret = cpufreq_frequency_table_get_index(policy, policy->cur);
1290d3916691SViresh Kumar 		if (ret == -EINVAL) {
1291d3916691SViresh Kumar 			/* Warn user and fix it */
1292d3916691SViresh Kumar 			pr_warn("%s: CPU%d: Running at unlisted freq: %u KHz\n",
1293d3916691SViresh Kumar 				__func__, policy->cpu, policy->cur);
1294d3916691SViresh Kumar 			ret = __cpufreq_driver_target(policy, policy->cur - 1,
1295d3916691SViresh Kumar 				CPUFREQ_RELATION_L);
1296d3916691SViresh Kumar 
1297d3916691SViresh Kumar 			/*
1298d3916691SViresh Kumar 			 * Reaching here after boot in a few seconds may not
1299d3916691SViresh Kumar 			 * mean that system will remain stable at "unknown"
1300d3916691SViresh Kumar 			 * frequency for longer duration. Hence, a BUG_ON().
1301d3916691SViresh Kumar 			 */
1302d3916691SViresh Kumar 			BUG_ON(ret);
1303d3916691SViresh Kumar 			pr_warn("%s: CPU%d: Unlisted initial frequency changed to: %u KHz\n",
1304d3916691SViresh Kumar 				__func__, policy->cpu, policy->cur);
1305d3916691SViresh Kumar 		}
1306d3916691SViresh Kumar 	}
1307d3916691SViresh Kumar 
1308a1531acdSThomas Renninger 	blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
1309a1531acdSThomas Renninger 				     CPUFREQ_START, policy);
1310a1531acdSThomas Renninger 
131196bbbe4aSViresh Kumar 	if (!recover_policy) {
1312d9612a49SRafael J. Wysocki 		ret = cpufreq_add_dev_interface(policy);
131319d6f7ecSDave Jones 		if (ret)
13148101f997SViresh Kumar 			goto out_exit_policy;
1315fcd7af91SViresh Kumar 		blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
1316fcd7af91SViresh Kumar 				CPUFREQ_CREATE_POLICY, policy);
1317c88a1f8bSLukasz Majewski 
1318c88a1f8bSLukasz Majewski 		write_lock_irqsave(&cpufreq_driver_lock, flags);
1319c88a1f8bSLukasz Majewski 		list_add(&policy->policy_list, &cpufreq_policy_list);
1320c88a1f8bSLukasz Majewski 		write_unlock_irqrestore(&cpufreq_driver_lock, flags);
1321988bed09SViresh Kumar 	}
13228ff69732SDave Jones 
13237f0fa40fSViresh Kumar 	ret = cpufreq_init_policy(policy);
13247f0fa40fSViresh Kumar 	if (ret) {
13257f0fa40fSViresh Kumar 		pr_err("%s: Failed to initialize policy for cpu: %d (%d)\n",
13267f0fa40fSViresh Kumar 		       __func__, cpu, ret);
13277f0fa40fSViresh Kumar 		goto out_remove_policy_notify;
13287f0fa40fSViresh Kumar 	}
1329e18f1682SSrivatsa S. Bhat 
133096bbbe4aSViresh Kumar 	if (!recover_policy) {
133108fd8c1cSViresh Kumar 		policy->user_policy.policy = policy->policy;
133208fd8c1cSViresh Kumar 		policy->user_policy.governor = policy->governor;
133308fd8c1cSViresh Kumar 	}
13344e97b631SViresh Kumar 	up_write(&policy->rwsem);
133508fd8c1cSViresh Kumar 
1336038c5b3eSGreg Kroah-Hartman 	kobject_uevent(&policy->kobj, KOBJ_ADD);
13377c45cf31SViresh Kumar 
13387c45cf31SViresh Kumar 	/* Callback for handling stuff after policy is ready */
13397c45cf31SViresh Kumar 	if (cpufreq_driver->ready)
13407c45cf31SViresh Kumar 		cpufreq_driver->ready(policy);
13417c45cf31SViresh Kumar 
13422d06d8c4SDominik Brodowski 	pr_debug("initialization complete\n");
13431da177e4SLinus Torvalds 
13441da177e4SLinus Torvalds 	return 0;
13451da177e4SLinus Torvalds 
13467f0fa40fSViresh Kumar out_remove_policy_notify:
13477f0fa40fSViresh Kumar 	/* cpufreq_policy_free() will notify based on this */
13487f0fa40fSViresh Kumar 	recover_policy = true;
13498101f997SViresh Kumar out_exit_policy:
13507106e02bSPrarit Bhargava 	up_write(&policy->rwsem);
13517106e02bSPrarit Bhargava 
1352da60ce9fSViresh Kumar 	if (cpufreq_driver->exit)
1353da60ce9fSViresh Kumar 		cpufreq_driver->exit(policy);
13548101f997SViresh Kumar out_free_policy:
13553654c5ccSViresh Kumar 	cpufreq_policy_free(policy, recover_policy);
13561da177e4SLinus Torvalds 	return ret;
13571da177e4SLinus Torvalds }
13581da177e4SLinus Torvalds 
1359*0b275352SRafael J. Wysocki /**
1360*0b275352SRafael J. Wysocki  * cpufreq_add_dev - the cpufreq interface for a CPU device.
1361*0b275352SRafael J. Wysocki  * @dev: CPU device.
1362*0b275352SRafael J. Wysocki  * @sif: Subsystem interface structure pointer (not used)
1363*0b275352SRafael J. Wysocki  */
1364*0b275352SRafael J. Wysocki static int cpufreq_add_dev(struct device *dev, struct subsys_interface *sif)
1365*0b275352SRafael J. Wysocki {
1366*0b275352SRafael J. Wysocki 	unsigned cpu = dev->id;
1367*0b275352SRafael J. Wysocki 	int ret;
1368*0b275352SRafael J. Wysocki 
1369*0b275352SRafael J. Wysocki 	dev_dbg(dev, "%s: adding CPU%u\n", __func__, cpu);
1370*0b275352SRafael J. Wysocki 
1371*0b275352SRafael J. Wysocki 	if (cpu_online(cpu)) {
1372*0b275352SRafael J. Wysocki 		ret = cpufreq_online(cpu);
1373*0b275352SRafael J. Wysocki 	} else {
1374*0b275352SRafael J. Wysocki 		/*
1375*0b275352SRafael J. Wysocki 		 * A hotplug notifier will follow and we will handle it as CPU
1376*0b275352SRafael J. Wysocki 		 * online then.  For now, just create the sysfs link, unless
1377*0b275352SRafael J. Wysocki 		 * there is no policy or the link is already present.
1378*0b275352SRafael J. Wysocki 		 */
1379*0b275352SRafael J. Wysocki 		struct cpufreq_policy *policy = per_cpu(cpufreq_cpu_data, cpu);
1380*0b275352SRafael J. Wysocki 
1381*0b275352SRafael J. Wysocki 		ret = policy && !cpumask_test_and_set_cpu(cpu, policy->real_cpus)
1382*0b275352SRafael J. Wysocki 			? add_cpu_dev_symlink(policy, cpu) : 0;
1383*0b275352SRafael J. Wysocki 	}
1384*0b275352SRafael J. Wysocki 
1385*0b275352SRafael J. Wysocki 	return ret;
1386*0b275352SRafael J. Wysocki }
1387*0b275352SRafael J. Wysocki 
138815c0b4d2SRafael J. Wysocki static void cpufreq_offline_prepare(unsigned int cpu)
13891da177e4SLinus Torvalds {
13903a3e9e06SViresh Kumar 	struct cpufreq_policy *policy;
13911da177e4SLinus Torvalds 
1392b8eed8afSViresh Kumar 	pr_debug("%s: unregistering CPU %u\n", __func__, cpu);
13931da177e4SLinus Torvalds 
1394988bed09SViresh Kumar 	policy = cpufreq_cpu_get_raw(cpu);
13953a3e9e06SViresh Kumar 	if (!policy) {
1396b8eed8afSViresh Kumar 		pr_debug("%s: No cpu_data found\n", __func__);
139715c0b4d2SRafael J. Wysocki 		return;
13981da177e4SLinus Torvalds 	}
13991da177e4SLinus Torvalds 
14009c0ebcf7SViresh Kumar 	if (has_target()) {
140115c0b4d2SRafael J. Wysocki 		int ret = __cpufreq_governor(policy, CPUFREQ_GOV_STOP);
1402559ed407SRafael J. Wysocki 		if (ret)
14033de9bdebSViresh Kumar 			pr_err("%s: Failed to stop governor\n", __func__);
1404db5f2995SViresh Kumar 	}
14051da177e4SLinus Torvalds 
14064573237bSViresh Kumar 	down_write(&policy->rwsem);
14079591becbSViresh Kumar 	cpumask_clear_cpu(cpu, policy->cpus);
14084573237bSViresh Kumar 
14099591becbSViresh Kumar 	if (policy_is_inactive(policy)) {
14109591becbSViresh Kumar 		if (has_target())
14114573237bSViresh Kumar 			strncpy(policy->last_governor, policy->governor->name,
14124573237bSViresh Kumar 				CPUFREQ_NAME_LEN);
14139591becbSViresh Kumar 	} else if (cpu == policy->cpu) {
14149591becbSViresh Kumar 		/* Nominate new CPU */
14159591becbSViresh Kumar 		policy->cpu = cpumask_any(policy->cpus);
14169591becbSViresh Kumar 	}
14174573237bSViresh Kumar 	up_write(&policy->rwsem);
14181da177e4SLinus Torvalds 
14199591becbSViresh Kumar 	/* Start governor again for active policy */
14209591becbSViresh Kumar 	if (!policy_is_inactive(policy)) {
14219591becbSViresh Kumar 		if (has_target()) {
142215c0b4d2SRafael J. Wysocki 			int ret = __cpufreq_governor(policy, CPUFREQ_GOV_START);
14239591becbSViresh Kumar 			if (!ret)
14249591becbSViresh Kumar 				ret = __cpufreq_governor(policy, CPUFREQ_GOV_LIMITS);
142587549141SViresh Kumar 
14269591becbSViresh Kumar 			if (ret)
14279591becbSViresh Kumar 				pr_err("%s: Failed to start governor\n", __func__);
14289591becbSViresh Kumar 		}
14299591becbSViresh Kumar 	} else if (cpufreq_driver->stop_cpu) {
1430367dc4aaSDirk Brandewie 		cpufreq_driver->stop_cpu(policy);
14319591becbSViresh Kumar 	}
1432cedb70afSSrivatsa S. Bhat }
1433cedb70afSSrivatsa S. Bhat 
143415c0b4d2SRafael J. Wysocki static void cpufreq_offline_finish(unsigned int cpu)
1435cedb70afSSrivatsa S. Bhat {
14369591becbSViresh Kumar 	struct cpufreq_policy *policy = per_cpu(cpufreq_cpu_data, cpu);
1437cedb70afSSrivatsa S. Bhat 
1438cedb70afSSrivatsa S. Bhat 	if (!policy) {
1439cedb70afSSrivatsa S. Bhat 		pr_debug("%s: No cpu_data found\n", __func__);
144015c0b4d2SRafael J. Wysocki 		return;
1441cedb70afSSrivatsa S. Bhat 	}
1442cedb70afSSrivatsa S. Bhat 
14439591becbSViresh Kumar 	/* Only proceed for inactive policies */
14449591becbSViresh Kumar 	if (!policy_is_inactive(policy))
144515c0b4d2SRafael J. Wysocki 		return;
144687549141SViresh Kumar 
144787549141SViresh Kumar 	/* If cpu is last user of policy, free policy */
144887549141SViresh Kumar 	if (has_target()) {
144915c0b4d2SRafael J. Wysocki 		int ret = __cpufreq_governor(policy, CPUFREQ_GOV_POLICY_EXIT);
1450559ed407SRafael J. Wysocki 		if (ret)
145187549141SViresh Kumar 			pr_err("%s: Failed to exit governor\n", __func__);
14523de9bdebSViresh Kumar 	}
14532a998599SRafael J. Wysocki 
14548414809cSSrivatsa S. Bhat 	/*
14558414809cSSrivatsa S. Bhat 	 * Perform the ->exit() even during light-weight tear-down,
14568414809cSSrivatsa S. Bhat 	 * since this is a core component, and is essential for the
14578414809cSSrivatsa S. Bhat 	 * subsequent light-weight ->init() to succeed.
14588414809cSSrivatsa S. Bhat 	 */
14591c3d85ddSRafael J. Wysocki 	if (cpufreq_driver->exit)
14603a3e9e06SViresh Kumar 		cpufreq_driver->exit(policy);
14611da177e4SLinus Torvalds }
14621da177e4SLinus Torvalds 
1463cedb70afSSrivatsa S. Bhat /**
146427a862e9SViresh Kumar  * cpufreq_remove_dev - remove a CPU device
1465cedb70afSSrivatsa S. Bhat  *
1466cedb70afSSrivatsa S. Bhat  * Removes the cpufreq interface for a CPU device.
1467cedb70afSSrivatsa S. Bhat  */
14688a25a2fdSKay Sievers static int cpufreq_remove_dev(struct device *dev, struct subsys_interface *sif)
14695a01f2e8SVenkatesh Pallipadi {
14708a25a2fdSKay Sievers 	unsigned int cpu = dev->id;
147187549141SViresh Kumar 	struct cpufreq_policy *policy = per_cpu(cpufreq_cpu_data, cpu);
147287549141SViresh Kumar 
147387549141SViresh Kumar 	if (!policy)
1474ec28297aSVenki Pallipadi 		return 0;
1475ec28297aSVenki Pallipadi 
1476559ed407SRafael J. Wysocki 	if (cpu_online(cpu)) {
147715c0b4d2SRafael J. Wysocki 		cpufreq_offline_prepare(cpu);
147815c0b4d2SRafael J. Wysocki 		cpufreq_offline_finish(cpu);
147987549141SViresh Kumar 	}
148087549141SViresh Kumar 
1481559ed407SRafael J. Wysocki 	cpumask_clear_cpu(cpu, policy->real_cpus);
1482559ed407SRafael J. Wysocki 
1483559ed407SRafael J. Wysocki 	if (cpumask_empty(policy->real_cpus)) {
14843654c5ccSViresh Kumar 		cpufreq_policy_free(policy, true);
148587549141SViresh Kumar 		return 0;
148687549141SViresh Kumar 	}
148787549141SViresh Kumar 
1488559ed407SRafael J. Wysocki 	if (cpu != policy->kobj_cpu) {
1489559ed407SRafael J. Wysocki 		remove_cpu_dev_symlink(policy, cpu);
1490559ed407SRafael J. Wysocki 	} else {
1491559ed407SRafael J. Wysocki 		/*
1492559ed407SRafael J. Wysocki 		 * The CPU owning the policy object is going away.  Move it to
1493559ed407SRafael J. Wysocki 		 * another suitable CPU.
1494559ed407SRafael J. Wysocki 		 */
1495559ed407SRafael J. Wysocki 		unsigned int new_cpu = cpumask_first(policy->real_cpus);
1496559ed407SRafael J. Wysocki 		struct device *new_dev = get_cpu_device(new_cpu);
149727a862e9SViresh Kumar 
1498559ed407SRafael J. Wysocki 		dev_dbg(dev, "%s: Moving policy object to CPU%u\n", __func__, new_cpu);
149927a862e9SViresh Kumar 
1500559ed407SRafael J. Wysocki 		sysfs_remove_link(&new_dev->kobj, "cpufreq");
1501559ed407SRafael J. Wysocki 		policy->kobj_cpu = new_cpu;
1502559ed407SRafael J. Wysocki 		WARN_ON(kobject_move(&policy->kobj, &new_dev->kobj));
1503559ed407SRafael J. Wysocki 	}
1504559ed407SRafael J. Wysocki 
1505559ed407SRafael J. Wysocki 	return 0;
15065a01f2e8SVenkatesh Pallipadi }
15075a01f2e8SVenkatesh Pallipadi 
150865f27f38SDavid Howells static void handle_update(struct work_struct *work)
15091da177e4SLinus Torvalds {
151065f27f38SDavid Howells 	struct cpufreq_policy *policy =
151165f27f38SDavid Howells 		container_of(work, struct cpufreq_policy, update);
151265f27f38SDavid Howells 	unsigned int cpu = policy->cpu;
15132d06d8c4SDominik Brodowski 	pr_debug("handle_update for cpu %u called\n", cpu);
15141da177e4SLinus Torvalds 	cpufreq_update_policy(cpu);
15151da177e4SLinus Torvalds }
15161da177e4SLinus Torvalds 
15171da177e4SLinus Torvalds /**
1518bb176f7dSViresh Kumar  *	cpufreq_out_of_sync - If actual and saved CPU frequency differs, we're
1519bb176f7dSViresh Kumar  *	in deep trouble.
1520a1e1dc41SViresh Kumar  *	@policy: policy managing CPUs
15211da177e4SLinus Torvalds  *	@new_freq: CPU frequency the CPU actually runs at
15221da177e4SLinus Torvalds  *
152329464f28SDave Jones  *	We adjust to current frequency first, and need to clean up later.
152429464f28SDave Jones  *	So either call to cpufreq_update_policy() or schedule handle_update()).
15251da177e4SLinus Torvalds  */
1526a1e1dc41SViresh Kumar static void cpufreq_out_of_sync(struct cpufreq_policy *policy,
1527e08f5f5bSGautham R Shenoy 				unsigned int new_freq)
15281da177e4SLinus Torvalds {
15291da177e4SLinus Torvalds 	struct cpufreq_freqs freqs;
1530b43a7ffbSViresh Kumar 
1531e837f9b5SJoe Perches 	pr_debug("Warning: CPU frequency out of sync: cpufreq and timing core thinks of %u, is %u kHz\n",
1532a1e1dc41SViresh Kumar 		 policy->cur, new_freq);
15331da177e4SLinus Torvalds 
1534a1e1dc41SViresh Kumar 	freqs.old = policy->cur;
15351da177e4SLinus Torvalds 	freqs.new = new_freq;
1536b43a7ffbSViresh Kumar 
15378fec051eSViresh Kumar 	cpufreq_freq_transition_begin(policy, &freqs);
15388fec051eSViresh Kumar 	cpufreq_freq_transition_end(policy, &freqs, 0);
15391da177e4SLinus Torvalds }
15401da177e4SLinus Torvalds 
15411da177e4SLinus Torvalds /**
15424ab70df4SDhaval Giani  * cpufreq_quick_get - get the CPU frequency (in kHz) from policy->cur
154395235ca2SVenkatesh Pallipadi  * @cpu: CPU number
154495235ca2SVenkatesh Pallipadi  *
154595235ca2SVenkatesh Pallipadi  * This is the last known freq, without actually getting it from the driver.
154695235ca2SVenkatesh Pallipadi  * Return value will be same as what is shown in scaling_cur_freq in sysfs.
154795235ca2SVenkatesh Pallipadi  */
154895235ca2SVenkatesh Pallipadi unsigned int cpufreq_quick_get(unsigned int cpu)
154995235ca2SVenkatesh Pallipadi {
15509e21ba8bSDirk Brandewie 	struct cpufreq_policy *policy;
1551e08f5f5bSGautham R Shenoy 	unsigned int ret_freq = 0;
155295235ca2SVenkatesh Pallipadi 
15531c3d85ddSRafael J. Wysocki 	if (cpufreq_driver && cpufreq_driver->setpolicy && cpufreq_driver->get)
15541c3d85ddSRafael J. Wysocki 		return cpufreq_driver->get(cpu);
15559e21ba8bSDirk Brandewie 
15569e21ba8bSDirk Brandewie 	policy = cpufreq_cpu_get(cpu);
155795235ca2SVenkatesh Pallipadi 	if (policy) {
1558e08f5f5bSGautham R Shenoy 		ret_freq = policy->cur;
155995235ca2SVenkatesh Pallipadi 		cpufreq_cpu_put(policy);
156095235ca2SVenkatesh Pallipadi 	}
156195235ca2SVenkatesh Pallipadi 
15624d34a67dSDave Jones 	return ret_freq;
156395235ca2SVenkatesh Pallipadi }
156495235ca2SVenkatesh Pallipadi EXPORT_SYMBOL(cpufreq_quick_get);
156595235ca2SVenkatesh Pallipadi 
15663d737108SJesse Barnes /**
15673d737108SJesse Barnes  * cpufreq_quick_get_max - get the max reported CPU frequency for this CPU
15683d737108SJesse Barnes  * @cpu: CPU number
15693d737108SJesse Barnes  *
15703d737108SJesse Barnes  * Just return the max possible frequency for a given CPU.
15713d737108SJesse Barnes  */
15723d737108SJesse Barnes unsigned int cpufreq_quick_get_max(unsigned int cpu)
15733d737108SJesse Barnes {
15743d737108SJesse Barnes 	struct cpufreq_policy *policy = cpufreq_cpu_get(cpu);
15753d737108SJesse Barnes 	unsigned int ret_freq = 0;
15763d737108SJesse Barnes 
15773d737108SJesse Barnes 	if (policy) {
15783d737108SJesse Barnes 		ret_freq = policy->max;
15793d737108SJesse Barnes 		cpufreq_cpu_put(policy);
15803d737108SJesse Barnes 	}
15813d737108SJesse Barnes 
15823d737108SJesse Barnes 	return ret_freq;
15833d737108SJesse Barnes }
15843d737108SJesse Barnes EXPORT_SYMBOL(cpufreq_quick_get_max);
15853d737108SJesse Barnes 
1586d92d50a4SViresh Kumar static unsigned int __cpufreq_get(struct cpufreq_policy *policy)
15871da177e4SLinus Torvalds {
1588e08f5f5bSGautham R Shenoy 	unsigned int ret_freq = 0;
15891da177e4SLinus Torvalds 
15901c3d85ddSRafael J. Wysocki 	if (!cpufreq_driver->get)
15914d34a67dSDave Jones 		return ret_freq;
15921da177e4SLinus Torvalds 
1593d92d50a4SViresh Kumar 	ret_freq = cpufreq_driver->get(policy->cpu);
15941da177e4SLinus Torvalds 
159511e584cfSViresh Kumar 	/* Updating inactive policies is invalid, so avoid doing that. */
159611e584cfSViresh Kumar 	if (unlikely(policy_is_inactive(policy)))
159711e584cfSViresh Kumar 		return ret_freq;
159811e584cfSViresh Kumar 
1599e08f5f5bSGautham R Shenoy 	if (ret_freq && policy->cur &&
16001c3d85ddSRafael J. Wysocki 		!(cpufreq_driver->flags & CPUFREQ_CONST_LOOPS)) {
1601e08f5f5bSGautham R Shenoy 		/* verify no discrepancy between actual and
1602e08f5f5bSGautham R Shenoy 					saved value exists */
1603e08f5f5bSGautham R Shenoy 		if (unlikely(ret_freq != policy->cur)) {
1604a1e1dc41SViresh Kumar 			cpufreq_out_of_sync(policy, ret_freq);
16051da177e4SLinus Torvalds 			schedule_work(&policy->update);
16061da177e4SLinus Torvalds 		}
16071da177e4SLinus Torvalds 	}
16081da177e4SLinus Torvalds 
16094d34a67dSDave Jones 	return ret_freq;
16105a01f2e8SVenkatesh Pallipadi }
16111da177e4SLinus Torvalds 
16125a01f2e8SVenkatesh Pallipadi /**
16135a01f2e8SVenkatesh Pallipadi  * cpufreq_get - get the current CPU frequency (in kHz)
16145a01f2e8SVenkatesh Pallipadi  * @cpu: CPU number
16155a01f2e8SVenkatesh Pallipadi  *
16165a01f2e8SVenkatesh Pallipadi  * Get the CPU current (static) CPU frequency
16175a01f2e8SVenkatesh Pallipadi  */
16185a01f2e8SVenkatesh Pallipadi unsigned int cpufreq_get(unsigned int cpu)
16195a01f2e8SVenkatesh Pallipadi {
1620999976e0SAaron Plattner 	struct cpufreq_policy *policy = cpufreq_cpu_get(cpu);
16215a01f2e8SVenkatesh Pallipadi 	unsigned int ret_freq = 0;
16225a01f2e8SVenkatesh Pallipadi 
1623999976e0SAaron Plattner 	if (policy) {
1624ad7722daSviresh kumar 		down_read(&policy->rwsem);
1625d92d50a4SViresh Kumar 		ret_freq = __cpufreq_get(policy);
1626ad7722daSviresh kumar 		up_read(&policy->rwsem);
1627999976e0SAaron Plattner 
1628999976e0SAaron Plattner 		cpufreq_cpu_put(policy);
1629999976e0SAaron Plattner 	}
16306eed9404SViresh Kumar 
16314d34a67dSDave Jones 	return ret_freq;
16321da177e4SLinus Torvalds }
16331da177e4SLinus Torvalds EXPORT_SYMBOL(cpufreq_get);
16341da177e4SLinus Torvalds 
16358a25a2fdSKay Sievers static struct subsys_interface cpufreq_interface = {
16368a25a2fdSKay Sievers 	.name		= "cpufreq",
16378a25a2fdSKay Sievers 	.subsys		= &cpu_subsys,
16388a25a2fdSKay Sievers 	.add_dev	= cpufreq_add_dev,
16398a25a2fdSKay Sievers 	.remove_dev	= cpufreq_remove_dev,
1640e00e56dfSRafael J. Wysocki };
1641e00e56dfSRafael J. Wysocki 
1642e28867eaSViresh Kumar /*
1643e28867eaSViresh Kumar  * In case platform wants some specific frequency to be configured
1644e28867eaSViresh Kumar  * during suspend..
164542d4dc3fSBenjamin Herrenschmidt  */
1646e28867eaSViresh Kumar int cpufreq_generic_suspend(struct cpufreq_policy *policy)
164742d4dc3fSBenjamin Herrenschmidt {
1648e28867eaSViresh Kumar 	int ret;
16494bc5d341SDave Jones 
1650e28867eaSViresh Kumar 	if (!policy->suspend_freq) {
1651e28867eaSViresh Kumar 		pr_err("%s: suspend_freq can't be zero\n", __func__);
1652e28867eaSViresh Kumar 		return -EINVAL;
165342d4dc3fSBenjamin Herrenschmidt 	}
165442d4dc3fSBenjamin Herrenschmidt 
1655e28867eaSViresh Kumar 	pr_debug("%s: Setting suspend-freq: %u\n", __func__,
1656e28867eaSViresh Kumar 			policy->suspend_freq);
1657e28867eaSViresh Kumar 
1658e28867eaSViresh Kumar 	ret = __cpufreq_driver_target(policy, policy->suspend_freq,
1659e28867eaSViresh Kumar 			CPUFREQ_RELATION_H);
1660e28867eaSViresh Kumar 	if (ret)
1661e28867eaSViresh Kumar 		pr_err("%s: unable to set suspend-freq: %u. err: %d\n",
1662e28867eaSViresh Kumar 				__func__, policy->suspend_freq, ret);
1663e28867eaSViresh Kumar 
1664c9060494SDave Jones 	return ret;
166542d4dc3fSBenjamin Herrenschmidt }
1666e28867eaSViresh Kumar EXPORT_SYMBOL(cpufreq_generic_suspend);
166742d4dc3fSBenjamin Herrenschmidt 
166842d4dc3fSBenjamin Herrenschmidt /**
16692f0aea93SViresh Kumar  * cpufreq_suspend() - Suspend CPUFreq governors
16701da177e4SLinus Torvalds  *
16712f0aea93SViresh Kumar  * Called during system wide Suspend/Hibernate cycles for suspending governors
16722f0aea93SViresh Kumar  * as some platforms can't change frequency after this point in suspend cycle.
16732f0aea93SViresh Kumar  * Because some of the devices (like: i2c, regulators, etc) they use for
16742f0aea93SViresh Kumar  * changing frequency are suspended quickly after this point.
16751da177e4SLinus Torvalds  */
16762f0aea93SViresh Kumar void cpufreq_suspend(void)
16771da177e4SLinus Torvalds {
16783a3e9e06SViresh Kumar 	struct cpufreq_policy *policy;
16791da177e4SLinus Torvalds 
16802f0aea93SViresh Kumar 	if (!cpufreq_driver)
1681e00e56dfSRafael J. Wysocki 		return;
16821da177e4SLinus Torvalds 
16832f0aea93SViresh Kumar 	if (!has_target())
1684b1b12babSViresh Kumar 		goto suspend;
16851da177e4SLinus Torvalds 
16862f0aea93SViresh Kumar 	pr_debug("%s: Suspending Governors\n", __func__);
16872f0aea93SViresh Kumar 
1688f963735aSViresh Kumar 	for_each_active_policy(policy) {
16892f0aea93SViresh Kumar 		if (__cpufreq_governor(policy, CPUFREQ_GOV_STOP))
16902f0aea93SViresh Kumar 			pr_err("%s: Failed to stop governor for policy: %p\n",
16912f0aea93SViresh Kumar 				__func__, policy);
16922f0aea93SViresh Kumar 		else if (cpufreq_driver->suspend
16932f0aea93SViresh Kumar 		    && cpufreq_driver->suspend(policy))
16942f0aea93SViresh Kumar 			pr_err("%s: Failed to suspend driver: %p\n", __func__,
16952f0aea93SViresh Kumar 				policy);
16961da177e4SLinus Torvalds 	}
1697b1b12babSViresh Kumar 
1698b1b12babSViresh Kumar suspend:
1699b1b12babSViresh Kumar 	cpufreq_suspended = true;
17001da177e4SLinus Torvalds }
17011da177e4SLinus Torvalds 
17021da177e4SLinus Torvalds /**
17032f0aea93SViresh Kumar  * cpufreq_resume() - Resume CPUFreq governors
17041da177e4SLinus Torvalds  *
17052f0aea93SViresh Kumar  * Called during system wide Suspend/Hibernate cycle for resuming governors that
17062f0aea93SViresh Kumar  * are suspended with cpufreq_suspend().
17071da177e4SLinus Torvalds  */
17082f0aea93SViresh Kumar void cpufreq_resume(void)
17091da177e4SLinus Torvalds {
17101da177e4SLinus Torvalds 	struct cpufreq_policy *policy;
17111da177e4SLinus Torvalds 
17122f0aea93SViresh Kumar 	if (!cpufreq_driver)
17131da177e4SLinus Torvalds 		return;
17141da177e4SLinus Torvalds 
17158e30444eSLan Tianyu 	cpufreq_suspended = false;
17168e30444eSLan Tianyu 
17172f0aea93SViresh Kumar 	if (!has_target())
17182f0aea93SViresh Kumar 		return;
17191da177e4SLinus Torvalds 
17202f0aea93SViresh Kumar 	pr_debug("%s: Resuming Governors\n", __func__);
17212f0aea93SViresh Kumar 
1722f963735aSViresh Kumar 	for_each_active_policy(policy) {
17230c5aa405SViresh Kumar 		if (cpufreq_driver->resume && cpufreq_driver->resume(policy))
17240c5aa405SViresh Kumar 			pr_err("%s: Failed to resume driver: %p\n", __func__,
17250c5aa405SViresh Kumar 				policy);
17260c5aa405SViresh Kumar 		else if (__cpufreq_governor(policy, CPUFREQ_GOV_START)
17272f0aea93SViresh Kumar 		    || __cpufreq_governor(policy, CPUFREQ_GOV_LIMITS))
17282f0aea93SViresh Kumar 			pr_err("%s: Failed to start governor for policy: %p\n",
17292f0aea93SViresh Kumar 				__func__, policy);
1730c75de0acSViresh Kumar 	}
17312f0aea93SViresh Kumar 
17322f0aea93SViresh Kumar 	/*
1733c75de0acSViresh Kumar 	 * schedule call cpufreq_update_policy() for first-online CPU, as that
1734c75de0acSViresh Kumar 	 * wouldn't be hotplugged-out on suspend. It will verify that the
1735c75de0acSViresh Kumar 	 * current freq is in sync with what we believe it to be.
17362f0aea93SViresh Kumar 	 */
1737c75de0acSViresh Kumar 	policy = cpufreq_cpu_get_raw(cpumask_first(cpu_online_mask));
1738c75de0acSViresh Kumar 	if (WARN_ON(!policy))
1739c75de0acSViresh Kumar 		return;
1740c75de0acSViresh Kumar 
17413a3e9e06SViresh Kumar 	schedule_work(&policy->update);
17421da177e4SLinus Torvalds }
17431da177e4SLinus Torvalds 
17449d95046eSBorislav Petkov /**
17459d95046eSBorislav Petkov  *	cpufreq_get_current_driver - return current driver's name
17469d95046eSBorislav Petkov  *
17479d95046eSBorislav Petkov  *	Return the name string of the currently loaded cpufreq driver
17489d95046eSBorislav Petkov  *	or NULL, if none.
17499d95046eSBorislav Petkov  */
17509d95046eSBorislav Petkov const char *cpufreq_get_current_driver(void)
17519d95046eSBorislav Petkov {
17521c3d85ddSRafael J. Wysocki 	if (cpufreq_driver)
17531c3d85ddSRafael J. Wysocki 		return cpufreq_driver->name;
17541c3d85ddSRafael J. Wysocki 
17551c3d85ddSRafael J. Wysocki 	return NULL;
17569d95046eSBorislav Petkov }
17579d95046eSBorislav Petkov EXPORT_SYMBOL_GPL(cpufreq_get_current_driver);
17581da177e4SLinus Torvalds 
175951315cdfSThomas Petazzoni /**
176051315cdfSThomas Petazzoni  *	cpufreq_get_driver_data - return current driver data
176151315cdfSThomas Petazzoni  *
176251315cdfSThomas Petazzoni  *	Return the private data of the currently loaded cpufreq
176351315cdfSThomas Petazzoni  *	driver, or NULL if no cpufreq driver is loaded.
176451315cdfSThomas Petazzoni  */
176551315cdfSThomas Petazzoni void *cpufreq_get_driver_data(void)
176651315cdfSThomas Petazzoni {
176751315cdfSThomas Petazzoni 	if (cpufreq_driver)
176851315cdfSThomas Petazzoni 		return cpufreq_driver->driver_data;
176951315cdfSThomas Petazzoni 
177051315cdfSThomas Petazzoni 	return NULL;
177151315cdfSThomas Petazzoni }
177251315cdfSThomas Petazzoni EXPORT_SYMBOL_GPL(cpufreq_get_driver_data);
177351315cdfSThomas Petazzoni 
17741da177e4SLinus Torvalds /*********************************************************************
17751da177e4SLinus Torvalds  *                     NOTIFIER LISTS INTERFACE                      *
17761da177e4SLinus Torvalds  *********************************************************************/
17771da177e4SLinus Torvalds 
17781da177e4SLinus Torvalds /**
17791da177e4SLinus Torvalds  *	cpufreq_register_notifier - register a driver with cpufreq
17801da177e4SLinus Torvalds  *	@nb: notifier function to register
17811da177e4SLinus Torvalds  *      @list: CPUFREQ_TRANSITION_NOTIFIER or CPUFREQ_POLICY_NOTIFIER
17821da177e4SLinus Torvalds  *
17831da177e4SLinus Torvalds  *	Add a driver to one of two lists: either a list of drivers that
17841da177e4SLinus Torvalds  *      are notified about clock rate changes (once before and once after
17851da177e4SLinus Torvalds  *      the transition), or a list of drivers that are notified about
17861da177e4SLinus Torvalds  *      changes in cpufreq policy.
17871da177e4SLinus Torvalds  *
17881da177e4SLinus Torvalds  *	This function may sleep, and has the same return conditions as
1789e041c683SAlan Stern  *	blocking_notifier_chain_register.
17901da177e4SLinus Torvalds  */
17911da177e4SLinus Torvalds int cpufreq_register_notifier(struct notifier_block *nb, unsigned int list)
17921da177e4SLinus Torvalds {
17931da177e4SLinus Torvalds 	int ret;
17941da177e4SLinus Torvalds 
1795d5aaffa9SDirk Brandewie 	if (cpufreq_disabled())
1796d5aaffa9SDirk Brandewie 		return -EINVAL;
1797d5aaffa9SDirk Brandewie 
179874212ca4SCesar Eduardo Barros 	WARN_ON(!init_cpufreq_transition_notifier_list_called);
179974212ca4SCesar Eduardo Barros 
18001da177e4SLinus Torvalds 	switch (list) {
18011da177e4SLinus Torvalds 	case CPUFREQ_TRANSITION_NOTIFIER:
1802b4dfdbb3SAlan Stern 		ret = srcu_notifier_chain_register(
1803e041c683SAlan Stern 				&cpufreq_transition_notifier_list, nb);
18041da177e4SLinus Torvalds 		break;
18051da177e4SLinus Torvalds 	case CPUFREQ_POLICY_NOTIFIER:
1806e041c683SAlan Stern 		ret = blocking_notifier_chain_register(
1807e041c683SAlan Stern 				&cpufreq_policy_notifier_list, nb);
18081da177e4SLinus Torvalds 		break;
18091da177e4SLinus Torvalds 	default:
18101da177e4SLinus Torvalds 		ret = -EINVAL;
18111da177e4SLinus Torvalds 	}
18121da177e4SLinus Torvalds 
18131da177e4SLinus Torvalds 	return ret;
18141da177e4SLinus Torvalds }
18151da177e4SLinus Torvalds EXPORT_SYMBOL(cpufreq_register_notifier);
18161da177e4SLinus Torvalds 
18171da177e4SLinus Torvalds /**
18181da177e4SLinus Torvalds  *	cpufreq_unregister_notifier - unregister a driver with cpufreq
18191da177e4SLinus Torvalds  *	@nb: notifier block to be unregistered
18201da177e4SLinus Torvalds  *	@list: CPUFREQ_TRANSITION_NOTIFIER or CPUFREQ_POLICY_NOTIFIER
18211da177e4SLinus Torvalds  *
18221da177e4SLinus Torvalds  *	Remove a driver from the CPU frequency notifier list.
18231da177e4SLinus Torvalds  *
18241da177e4SLinus Torvalds  *	This function may sleep, and has the same return conditions as
1825e041c683SAlan Stern  *	blocking_notifier_chain_unregister.
18261da177e4SLinus Torvalds  */
18271da177e4SLinus Torvalds int cpufreq_unregister_notifier(struct notifier_block *nb, unsigned int list)
18281da177e4SLinus Torvalds {
18291da177e4SLinus Torvalds 	int ret;
18301da177e4SLinus Torvalds 
1831d5aaffa9SDirk Brandewie 	if (cpufreq_disabled())
1832d5aaffa9SDirk Brandewie 		return -EINVAL;
1833d5aaffa9SDirk Brandewie 
18341da177e4SLinus Torvalds 	switch (list) {
18351da177e4SLinus Torvalds 	case CPUFREQ_TRANSITION_NOTIFIER:
1836b4dfdbb3SAlan Stern 		ret = srcu_notifier_chain_unregister(
1837e041c683SAlan Stern 				&cpufreq_transition_notifier_list, nb);
18381da177e4SLinus Torvalds 		break;
18391da177e4SLinus Torvalds 	case CPUFREQ_POLICY_NOTIFIER:
1840e041c683SAlan Stern 		ret = blocking_notifier_chain_unregister(
1841e041c683SAlan Stern 				&cpufreq_policy_notifier_list, nb);
18421da177e4SLinus Torvalds 		break;
18431da177e4SLinus Torvalds 	default:
18441da177e4SLinus Torvalds 		ret = -EINVAL;
18451da177e4SLinus Torvalds 	}
18461da177e4SLinus Torvalds 
18471da177e4SLinus Torvalds 	return ret;
18481da177e4SLinus Torvalds }
18491da177e4SLinus Torvalds EXPORT_SYMBOL(cpufreq_unregister_notifier);
18501da177e4SLinus Torvalds 
18511da177e4SLinus Torvalds 
18521da177e4SLinus Torvalds /*********************************************************************
18531da177e4SLinus Torvalds  *                              GOVERNORS                            *
18541da177e4SLinus Torvalds  *********************************************************************/
18551da177e4SLinus Torvalds 
18561c03a2d0SViresh Kumar /* Must set freqs->new to intermediate frequency */
18571c03a2d0SViresh Kumar static int __target_intermediate(struct cpufreq_policy *policy,
18581c03a2d0SViresh Kumar 				 struct cpufreq_freqs *freqs, int index)
18591c03a2d0SViresh Kumar {
18601c03a2d0SViresh Kumar 	int ret;
18611c03a2d0SViresh Kumar 
18621c03a2d0SViresh Kumar 	freqs->new = cpufreq_driver->get_intermediate(policy, index);
18631c03a2d0SViresh Kumar 
18641c03a2d0SViresh Kumar 	/* We don't need to switch to intermediate freq */
18651c03a2d0SViresh Kumar 	if (!freqs->new)
18661c03a2d0SViresh Kumar 		return 0;
18671c03a2d0SViresh Kumar 
18681c03a2d0SViresh Kumar 	pr_debug("%s: cpu: %d, switching to intermediate freq: oldfreq: %u, intermediate freq: %u\n",
18691c03a2d0SViresh Kumar 		 __func__, policy->cpu, freqs->old, freqs->new);
18701c03a2d0SViresh Kumar 
18711c03a2d0SViresh Kumar 	cpufreq_freq_transition_begin(policy, freqs);
18721c03a2d0SViresh Kumar 	ret = cpufreq_driver->target_intermediate(policy, index);
18731c03a2d0SViresh Kumar 	cpufreq_freq_transition_end(policy, freqs, ret);
18741c03a2d0SViresh Kumar 
18751c03a2d0SViresh Kumar 	if (ret)
18761c03a2d0SViresh Kumar 		pr_err("%s: Failed to change to intermediate frequency: %d\n",
18771c03a2d0SViresh Kumar 		       __func__, ret);
18781c03a2d0SViresh Kumar 
18791c03a2d0SViresh Kumar 	return ret;
18801c03a2d0SViresh Kumar }
18811c03a2d0SViresh Kumar 
18828d65775dSViresh Kumar static int __target_index(struct cpufreq_policy *policy,
18838d65775dSViresh Kumar 			  struct cpufreq_frequency_table *freq_table, int index)
18848d65775dSViresh Kumar {
18851c03a2d0SViresh Kumar 	struct cpufreq_freqs freqs = {.old = policy->cur, .flags = 0};
18861c03a2d0SViresh Kumar 	unsigned int intermediate_freq = 0;
18878d65775dSViresh Kumar 	int retval = -EINVAL;
18888d65775dSViresh Kumar 	bool notify;
18898d65775dSViresh Kumar 
18908d65775dSViresh Kumar 	notify = !(cpufreq_driver->flags & CPUFREQ_ASYNC_NOTIFICATION);
18918d65775dSViresh Kumar 	if (notify) {
18921c03a2d0SViresh Kumar 		/* Handle switching to intermediate frequency */
18931c03a2d0SViresh Kumar 		if (cpufreq_driver->get_intermediate) {
18941c03a2d0SViresh Kumar 			retval = __target_intermediate(policy, &freqs, index);
18951c03a2d0SViresh Kumar 			if (retval)
18961c03a2d0SViresh Kumar 				return retval;
18978d65775dSViresh Kumar 
18981c03a2d0SViresh Kumar 			intermediate_freq = freqs.new;
18991c03a2d0SViresh Kumar 			/* Set old freq to intermediate */
19001c03a2d0SViresh Kumar 			if (intermediate_freq)
19011c03a2d0SViresh Kumar 				freqs.old = freqs.new;
19021c03a2d0SViresh Kumar 		}
19031c03a2d0SViresh Kumar 
19041c03a2d0SViresh Kumar 		freqs.new = freq_table[index].frequency;
19058d65775dSViresh Kumar 		pr_debug("%s: cpu: %d, oldfreq: %u, new freq: %u\n",
19068d65775dSViresh Kumar 			 __func__, policy->cpu, freqs.old, freqs.new);
19078d65775dSViresh Kumar 
19088d65775dSViresh Kumar 		cpufreq_freq_transition_begin(policy, &freqs);
19098d65775dSViresh Kumar 	}
19108d65775dSViresh Kumar 
19118d65775dSViresh Kumar 	retval = cpufreq_driver->target_index(policy, index);
19128d65775dSViresh Kumar 	if (retval)
19138d65775dSViresh Kumar 		pr_err("%s: Failed to change cpu frequency: %d\n", __func__,
19148d65775dSViresh Kumar 		       retval);
19158d65775dSViresh Kumar 
19161c03a2d0SViresh Kumar 	if (notify) {
19178d65775dSViresh Kumar 		cpufreq_freq_transition_end(policy, &freqs, retval);
19188d65775dSViresh Kumar 
19191c03a2d0SViresh Kumar 		/*
19201c03a2d0SViresh Kumar 		 * Failed after setting to intermediate freq? Driver should have
19211c03a2d0SViresh Kumar 		 * reverted back to initial frequency and so should we. Check
19221c03a2d0SViresh Kumar 		 * here for intermediate_freq instead of get_intermediate, in
192358405af6SShailendra Verma 		 * case we haven't switched to intermediate freq at all.
19241c03a2d0SViresh Kumar 		 */
19251c03a2d0SViresh Kumar 		if (unlikely(retval && intermediate_freq)) {
19261c03a2d0SViresh Kumar 			freqs.old = intermediate_freq;
19271c03a2d0SViresh Kumar 			freqs.new = policy->restore_freq;
19281c03a2d0SViresh Kumar 			cpufreq_freq_transition_begin(policy, &freqs);
19291c03a2d0SViresh Kumar 			cpufreq_freq_transition_end(policy, &freqs, 0);
19301c03a2d0SViresh Kumar 		}
19311c03a2d0SViresh Kumar 	}
19321c03a2d0SViresh Kumar 
19338d65775dSViresh Kumar 	return retval;
19348d65775dSViresh Kumar }
19358d65775dSViresh Kumar 
19361da177e4SLinus Torvalds int __cpufreq_driver_target(struct cpufreq_policy *policy,
19371da177e4SLinus Torvalds 			    unsigned int target_freq,
19381da177e4SLinus Torvalds 			    unsigned int relation)
19391da177e4SLinus Torvalds {
19407249924eSViresh Kumar 	unsigned int old_target_freq = target_freq;
19418d65775dSViresh Kumar 	int retval = -EINVAL;
1942c32b6b8eSAshok Raj 
1943a7b422cdSKonrad Rzeszutek Wilk 	if (cpufreq_disabled())
1944a7b422cdSKonrad Rzeszutek Wilk 		return -ENODEV;
1945a7b422cdSKonrad Rzeszutek Wilk 
19467249924eSViresh Kumar 	/* Make sure that target_freq is within supported range */
19477249924eSViresh Kumar 	if (target_freq > policy->max)
19487249924eSViresh Kumar 		target_freq = policy->max;
19497249924eSViresh Kumar 	if (target_freq < policy->min)
19507249924eSViresh Kumar 		target_freq = policy->min;
19517249924eSViresh Kumar 
19527249924eSViresh Kumar 	pr_debug("target for CPU %u: %u kHz, relation %u, requested %u kHz\n",
19537249924eSViresh Kumar 		 policy->cpu, target_freq, relation, old_target_freq);
19545a1c0228SViresh Kumar 
19559c0ebcf7SViresh Kumar 	/*
19569c0ebcf7SViresh Kumar 	 * This might look like a redundant call as we are checking it again
19579c0ebcf7SViresh Kumar 	 * after finding index. But it is left intentionally for cases where
19589c0ebcf7SViresh Kumar 	 * exactly same freq is called again and so we can save on few function
19599c0ebcf7SViresh Kumar 	 * calls.
19609c0ebcf7SViresh Kumar 	 */
19615a1c0228SViresh Kumar 	if (target_freq == policy->cur)
19625a1c0228SViresh Kumar 		return 0;
19635a1c0228SViresh Kumar 
19641c03a2d0SViresh Kumar 	/* Save last value to restore later on errors */
19651c03a2d0SViresh Kumar 	policy->restore_freq = policy->cur;
19661c03a2d0SViresh Kumar 
19671c3d85ddSRafael J. Wysocki 	if (cpufreq_driver->target)
19681c3d85ddSRafael J. Wysocki 		retval = cpufreq_driver->target(policy, target_freq, relation);
19699c0ebcf7SViresh Kumar 	else if (cpufreq_driver->target_index) {
19709c0ebcf7SViresh Kumar 		struct cpufreq_frequency_table *freq_table;
19719c0ebcf7SViresh Kumar 		int index;
197290d45d17SAshok Raj 
19739c0ebcf7SViresh Kumar 		freq_table = cpufreq_frequency_get_table(policy->cpu);
19749c0ebcf7SViresh Kumar 		if (unlikely(!freq_table)) {
19759c0ebcf7SViresh Kumar 			pr_err("%s: Unable to find freq_table\n", __func__);
19769c0ebcf7SViresh Kumar 			goto out;
19779c0ebcf7SViresh Kumar 		}
19789c0ebcf7SViresh Kumar 
19799c0ebcf7SViresh Kumar 		retval = cpufreq_frequency_table_target(policy, freq_table,
19809c0ebcf7SViresh Kumar 				target_freq, relation, &index);
19819c0ebcf7SViresh Kumar 		if (unlikely(retval)) {
19829c0ebcf7SViresh Kumar 			pr_err("%s: Unable to find matching freq\n", __func__);
19839c0ebcf7SViresh Kumar 			goto out;
19849c0ebcf7SViresh Kumar 		}
19859c0ebcf7SViresh Kumar 
1986d4019f0aSViresh Kumar 		if (freq_table[index].frequency == policy->cur) {
19879c0ebcf7SViresh Kumar 			retval = 0;
1988d4019f0aSViresh Kumar 			goto out;
1989d4019f0aSViresh Kumar 		}
1990d4019f0aSViresh Kumar 
19918d65775dSViresh Kumar 		retval = __target_index(policy, freq_table, index);
19929c0ebcf7SViresh Kumar 	}
19939c0ebcf7SViresh Kumar 
19949c0ebcf7SViresh Kumar out:
19951da177e4SLinus Torvalds 	return retval;
19961da177e4SLinus Torvalds }
19971da177e4SLinus Torvalds EXPORT_SYMBOL_GPL(__cpufreq_driver_target);
19981da177e4SLinus Torvalds 
19991da177e4SLinus Torvalds int cpufreq_driver_target(struct cpufreq_policy *policy,
20001da177e4SLinus Torvalds 			  unsigned int target_freq,
20011da177e4SLinus Torvalds 			  unsigned int relation)
20021da177e4SLinus Torvalds {
2003f1829e4aSJulia Lawall 	int ret = -EINVAL;
20041da177e4SLinus Torvalds 
2005ad7722daSviresh kumar 	down_write(&policy->rwsem);
20061da177e4SLinus Torvalds 
20071da177e4SLinus Torvalds 	ret = __cpufreq_driver_target(policy, target_freq, relation);
20081da177e4SLinus Torvalds 
2009ad7722daSviresh kumar 	up_write(&policy->rwsem);
20101da177e4SLinus Torvalds 
20111da177e4SLinus Torvalds 	return ret;
20121da177e4SLinus Torvalds }
20131da177e4SLinus Torvalds EXPORT_SYMBOL_GPL(cpufreq_driver_target);
20141da177e4SLinus Torvalds 
2015e08f5f5bSGautham R Shenoy static int __cpufreq_governor(struct cpufreq_policy *policy,
2016e08f5f5bSGautham R Shenoy 					unsigned int event)
20171da177e4SLinus Torvalds {
2018cc993cabSDave Jones 	int ret;
20196afde10cSThomas Renninger 
20206afde10cSThomas Renninger 	/* Only must be defined when default governor is known to have latency
20216afde10cSThomas Renninger 	   restrictions, like e.g. conservative or ondemand.
20226afde10cSThomas Renninger 	   That this is the case is already ensured in Kconfig
20236afde10cSThomas Renninger 	*/
20246afde10cSThomas Renninger #ifdef CONFIG_CPU_FREQ_GOV_PERFORMANCE
20256afde10cSThomas Renninger 	struct cpufreq_governor *gov = &cpufreq_gov_performance;
20266afde10cSThomas Renninger #else
20276afde10cSThomas Renninger 	struct cpufreq_governor *gov = NULL;
20286afde10cSThomas Renninger #endif
20291c256245SThomas Renninger 
20302f0aea93SViresh Kumar 	/* Don't start any governor operations if we are entering suspend */
20312f0aea93SViresh Kumar 	if (cpufreq_suspended)
20322f0aea93SViresh Kumar 		return 0;
2033cb57720bSEthan Zhao 	/*
2034cb57720bSEthan Zhao 	 * Governor might not be initiated here if ACPI _PPC changed
2035cb57720bSEthan Zhao 	 * notification happened, so check it.
2036cb57720bSEthan Zhao 	 */
2037cb57720bSEthan Zhao 	if (!policy->governor)
2038cb57720bSEthan Zhao 		return -EINVAL;
20392f0aea93SViresh Kumar 
20401c256245SThomas Renninger 	if (policy->governor->max_transition_latency &&
20411c256245SThomas Renninger 	    policy->cpuinfo.transition_latency >
20421c256245SThomas Renninger 	    policy->governor->max_transition_latency) {
20436afde10cSThomas Renninger 		if (!gov)
20446afde10cSThomas Renninger 			return -EINVAL;
20456afde10cSThomas Renninger 		else {
2046e837f9b5SJoe Perches 			pr_warn("%s governor failed, too long transition latency of HW, fallback to %s governor\n",
2047e837f9b5SJoe Perches 				policy->governor->name, gov->name);
20481c256245SThomas Renninger 			policy->governor = gov;
20491c256245SThomas Renninger 		}
20506afde10cSThomas Renninger 	}
20511da177e4SLinus Torvalds 
2052fe492f3fSViresh Kumar 	if (event == CPUFREQ_GOV_POLICY_INIT)
20531da177e4SLinus Torvalds 		if (!try_module_get(policy->governor->owner))
20541da177e4SLinus Torvalds 			return -EINVAL;
20551da177e4SLinus Torvalds 
20562d06d8c4SDominik Brodowski 	pr_debug("__cpufreq_governor for CPU %u, event %u\n",
2057e08f5f5bSGautham R Shenoy 		 policy->cpu, event);
205895731ebbSXiaoguang Chen 
205995731ebbSXiaoguang Chen 	mutex_lock(&cpufreq_governor_lock);
206056d07db2SSrivatsa S. Bhat 	if ((policy->governor_enabled && event == CPUFREQ_GOV_START)
2061f73d3933SViresh Kumar 	    || (!policy->governor_enabled
2062f73d3933SViresh Kumar 	    && (event == CPUFREQ_GOV_LIMITS || event == CPUFREQ_GOV_STOP))) {
206395731ebbSXiaoguang Chen 		mutex_unlock(&cpufreq_governor_lock);
206495731ebbSXiaoguang Chen 		return -EBUSY;
206595731ebbSXiaoguang Chen 	}
206695731ebbSXiaoguang Chen 
206795731ebbSXiaoguang Chen 	if (event == CPUFREQ_GOV_STOP)
206895731ebbSXiaoguang Chen 		policy->governor_enabled = false;
206995731ebbSXiaoguang Chen 	else if (event == CPUFREQ_GOV_START)
207095731ebbSXiaoguang Chen 		policy->governor_enabled = true;
207195731ebbSXiaoguang Chen 
207295731ebbSXiaoguang Chen 	mutex_unlock(&cpufreq_governor_lock);
207395731ebbSXiaoguang Chen 
20741da177e4SLinus Torvalds 	ret = policy->governor->governor(policy, event);
20751da177e4SLinus Torvalds 
20764d5dcc42SViresh Kumar 	if (!ret) {
20774d5dcc42SViresh Kumar 		if (event == CPUFREQ_GOV_POLICY_INIT)
20788e53695fSViresh Kumar 			policy->governor->initialized++;
20794d5dcc42SViresh Kumar 		else if (event == CPUFREQ_GOV_POLICY_EXIT)
20808e53695fSViresh Kumar 			policy->governor->initialized--;
208195731ebbSXiaoguang Chen 	} else {
208295731ebbSXiaoguang Chen 		/* Restore original values */
208395731ebbSXiaoguang Chen 		mutex_lock(&cpufreq_governor_lock);
208495731ebbSXiaoguang Chen 		if (event == CPUFREQ_GOV_STOP)
208595731ebbSXiaoguang Chen 			policy->governor_enabled = true;
208695731ebbSXiaoguang Chen 		else if (event == CPUFREQ_GOV_START)
208795731ebbSXiaoguang Chen 			policy->governor_enabled = false;
208895731ebbSXiaoguang Chen 		mutex_unlock(&cpufreq_governor_lock);
20894d5dcc42SViresh Kumar 	}
2090b394058fSViresh Kumar 
2091fe492f3fSViresh Kumar 	if (((event == CPUFREQ_GOV_POLICY_INIT) && ret) ||
2092fe492f3fSViresh Kumar 			((event == CPUFREQ_GOV_POLICY_EXIT) && !ret))
20931da177e4SLinus Torvalds 		module_put(policy->governor->owner);
20941da177e4SLinus Torvalds 
20951da177e4SLinus Torvalds 	return ret;
20961da177e4SLinus Torvalds }
20971da177e4SLinus Torvalds 
20981da177e4SLinus Torvalds int cpufreq_register_governor(struct cpufreq_governor *governor)
20991da177e4SLinus Torvalds {
21003bcb09a3SJeremy Fitzhardinge 	int err;
21011da177e4SLinus Torvalds 
21021da177e4SLinus Torvalds 	if (!governor)
21031da177e4SLinus Torvalds 		return -EINVAL;
21041da177e4SLinus Torvalds 
2105a7b422cdSKonrad Rzeszutek Wilk 	if (cpufreq_disabled())
2106a7b422cdSKonrad Rzeszutek Wilk 		return -ENODEV;
2107a7b422cdSKonrad Rzeszutek Wilk 
21083fc54d37Sakpm@osdl.org 	mutex_lock(&cpufreq_governor_mutex);
21091da177e4SLinus Torvalds 
2110b394058fSViresh Kumar 	governor->initialized = 0;
21113bcb09a3SJeremy Fitzhardinge 	err = -EBUSY;
211242f91fa1SViresh Kumar 	if (!find_governor(governor->name)) {
21133bcb09a3SJeremy Fitzhardinge 		err = 0;
21141da177e4SLinus Torvalds 		list_add(&governor->governor_list, &cpufreq_governor_list);
21153bcb09a3SJeremy Fitzhardinge 	}
21161da177e4SLinus Torvalds 
21173fc54d37Sakpm@osdl.org 	mutex_unlock(&cpufreq_governor_mutex);
21183bcb09a3SJeremy Fitzhardinge 	return err;
21191da177e4SLinus Torvalds }
21201da177e4SLinus Torvalds EXPORT_SYMBOL_GPL(cpufreq_register_governor);
21211da177e4SLinus Torvalds 
21221da177e4SLinus Torvalds void cpufreq_unregister_governor(struct cpufreq_governor *governor)
21231da177e4SLinus Torvalds {
21244573237bSViresh Kumar 	struct cpufreq_policy *policy;
21254573237bSViresh Kumar 	unsigned long flags;
212690e41bacSPrarit Bhargava 
21271da177e4SLinus Torvalds 	if (!governor)
21281da177e4SLinus Torvalds 		return;
21291da177e4SLinus Torvalds 
2130a7b422cdSKonrad Rzeszutek Wilk 	if (cpufreq_disabled())
2131a7b422cdSKonrad Rzeszutek Wilk 		return;
2132a7b422cdSKonrad Rzeszutek Wilk 
21334573237bSViresh Kumar 	/* clear last_governor for all inactive policies */
21344573237bSViresh Kumar 	read_lock_irqsave(&cpufreq_driver_lock, flags);
21354573237bSViresh Kumar 	for_each_inactive_policy(policy) {
213618bf3a12SViresh Kumar 		if (!strcmp(policy->last_governor, governor->name)) {
213718bf3a12SViresh Kumar 			policy->governor = NULL;
21384573237bSViresh Kumar 			strcpy(policy->last_governor, "\0");
213990e41bacSPrarit Bhargava 		}
214018bf3a12SViresh Kumar 	}
21414573237bSViresh Kumar 	read_unlock_irqrestore(&cpufreq_driver_lock, flags);
214290e41bacSPrarit Bhargava 
21433fc54d37Sakpm@osdl.org 	mutex_lock(&cpufreq_governor_mutex);
21441da177e4SLinus Torvalds 	list_del(&governor->governor_list);
21453fc54d37Sakpm@osdl.org 	mutex_unlock(&cpufreq_governor_mutex);
21461da177e4SLinus Torvalds 	return;
21471da177e4SLinus Torvalds }
21481da177e4SLinus Torvalds EXPORT_SYMBOL_GPL(cpufreq_unregister_governor);
21491da177e4SLinus Torvalds 
21501da177e4SLinus Torvalds 
21511da177e4SLinus Torvalds /*********************************************************************
21521da177e4SLinus Torvalds  *                          POLICY INTERFACE                         *
21531da177e4SLinus Torvalds  *********************************************************************/
21541da177e4SLinus Torvalds 
21551da177e4SLinus Torvalds /**
21561da177e4SLinus Torvalds  * cpufreq_get_policy - get the current cpufreq_policy
215729464f28SDave Jones  * @policy: struct cpufreq_policy into which the current cpufreq_policy
215829464f28SDave Jones  *	is written
21591da177e4SLinus Torvalds  *
21601da177e4SLinus Torvalds  * Reads the current cpufreq policy.
21611da177e4SLinus Torvalds  */
21621da177e4SLinus Torvalds int cpufreq_get_policy(struct cpufreq_policy *policy, unsigned int cpu)
21631da177e4SLinus Torvalds {
21641da177e4SLinus Torvalds 	struct cpufreq_policy *cpu_policy;
21651da177e4SLinus Torvalds 	if (!policy)
21661da177e4SLinus Torvalds 		return -EINVAL;
21671da177e4SLinus Torvalds 
21681da177e4SLinus Torvalds 	cpu_policy = cpufreq_cpu_get(cpu);
21691da177e4SLinus Torvalds 	if (!cpu_policy)
21701da177e4SLinus Torvalds 		return -EINVAL;
21711da177e4SLinus Torvalds 
2172d5b73cd8SViresh Kumar 	memcpy(policy, cpu_policy, sizeof(*policy));
21731da177e4SLinus Torvalds 
21741da177e4SLinus Torvalds 	cpufreq_cpu_put(cpu_policy);
21751da177e4SLinus Torvalds 	return 0;
21761da177e4SLinus Torvalds }
21771da177e4SLinus Torvalds EXPORT_SYMBOL(cpufreq_get_policy);
21781da177e4SLinus Torvalds 
2179153d7f3fSArjan van de Ven /*
2180037ce839SViresh Kumar  * policy : current policy.
2181037ce839SViresh Kumar  * new_policy: policy to be set.
2182153d7f3fSArjan van de Ven  */
2183037ce839SViresh Kumar static int cpufreq_set_policy(struct cpufreq_policy *policy,
21843a3e9e06SViresh Kumar 				struct cpufreq_policy *new_policy)
21851da177e4SLinus Torvalds {
2186d9a789c7SRafael J. Wysocki 	struct cpufreq_governor *old_gov;
2187d9a789c7SRafael J. Wysocki 	int ret;
21881da177e4SLinus Torvalds 
2189e837f9b5SJoe Perches 	pr_debug("setting new policy for CPU %u: %u - %u kHz\n",
2190e837f9b5SJoe Perches 		 new_policy->cpu, new_policy->min, new_policy->max);
21911da177e4SLinus Torvalds 
2192d5b73cd8SViresh Kumar 	memcpy(&new_policy->cpuinfo, &policy->cpuinfo, sizeof(policy->cpuinfo));
21931da177e4SLinus Torvalds 
2194d9a789c7SRafael J. Wysocki 	if (new_policy->min > policy->max || new_policy->max < policy->min)
2195d9a789c7SRafael J. Wysocki 		return -EINVAL;
21969c9a43edSMattia Dongili 
21971da177e4SLinus Torvalds 	/* verify the cpu speed can be set within this limit */
21983a3e9e06SViresh Kumar 	ret = cpufreq_driver->verify(new_policy);
21991da177e4SLinus Torvalds 	if (ret)
2200d9a789c7SRafael J. Wysocki 		return ret;
22011da177e4SLinus Torvalds 
22021da177e4SLinus Torvalds 	/* adjust if necessary - all reasons */
2203e041c683SAlan Stern 	blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
22043a3e9e06SViresh Kumar 			CPUFREQ_ADJUST, new_policy);
22051da177e4SLinus Torvalds 
22061da177e4SLinus Torvalds 	/* adjust if necessary - hardware incompatibility*/
2207e041c683SAlan Stern 	blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
22083a3e9e06SViresh Kumar 			CPUFREQ_INCOMPATIBLE, new_policy);
22091da177e4SLinus Torvalds 
2210bb176f7dSViresh Kumar 	/*
2211bb176f7dSViresh Kumar 	 * verify the cpu speed can be set within this limit, which might be
2212bb176f7dSViresh Kumar 	 * different to the first one
2213bb176f7dSViresh Kumar 	 */
22143a3e9e06SViresh Kumar 	ret = cpufreq_driver->verify(new_policy);
2215e041c683SAlan Stern 	if (ret)
2216d9a789c7SRafael J. Wysocki 		return ret;
22171da177e4SLinus Torvalds 
22181da177e4SLinus Torvalds 	/* notification of the new policy */
2219e041c683SAlan Stern 	blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
22203a3e9e06SViresh Kumar 			CPUFREQ_NOTIFY, new_policy);
22211da177e4SLinus Torvalds 
22223a3e9e06SViresh Kumar 	policy->min = new_policy->min;
22233a3e9e06SViresh Kumar 	policy->max = new_policy->max;
22241da177e4SLinus Torvalds 
22252d06d8c4SDominik Brodowski 	pr_debug("new min and max freqs are %u - %u kHz\n",
22263a3e9e06SViresh Kumar 		 policy->min, policy->max);
22271da177e4SLinus Torvalds 
22281c3d85ddSRafael J. Wysocki 	if (cpufreq_driver->setpolicy) {
22293a3e9e06SViresh Kumar 		policy->policy = new_policy->policy;
22302d06d8c4SDominik Brodowski 		pr_debug("setting range\n");
2231d9a789c7SRafael J. Wysocki 		return cpufreq_driver->setpolicy(new_policy);
2232d9a789c7SRafael J. Wysocki 	}
2233d9a789c7SRafael J. Wysocki 
2234d9a789c7SRafael J. Wysocki 	if (new_policy->governor == policy->governor)
2235d9a789c7SRafael J. Wysocki 		goto out;
22361da177e4SLinus Torvalds 
22372d06d8c4SDominik Brodowski 	pr_debug("governor switch\n");
22381da177e4SLinus Torvalds 
2239d9a789c7SRafael J. Wysocki 	/* save old, working values */
2240d9a789c7SRafael J. Wysocki 	old_gov = policy->governor;
22411da177e4SLinus Torvalds 	/* end old governor */
2242d9a789c7SRafael J. Wysocki 	if (old_gov) {
22434bc384aeSViresh Kumar 		ret = __cpufreq_governor(policy, CPUFREQ_GOV_STOP);
22444bc384aeSViresh Kumar 		if (ret) {
22454bc384aeSViresh Kumar 			/* This can happen due to race with other operations */
22464bc384aeSViresh Kumar 			pr_debug("%s: Failed to Stop Governor: %s (%d)\n",
22474bc384aeSViresh Kumar 				 __func__, old_gov->name, ret);
22484bc384aeSViresh Kumar 			return ret;
22494bc384aeSViresh Kumar 		}
22504bc384aeSViresh Kumar 
2251ad7722daSviresh kumar 		up_write(&policy->rwsem);
22524bc384aeSViresh Kumar 		ret = __cpufreq_governor(policy, CPUFREQ_GOV_POLICY_EXIT);
2253ad7722daSviresh kumar 		down_write(&policy->rwsem);
22544bc384aeSViresh Kumar 
22554bc384aeSViresh Kumar 		if (ret) {
22564bc384aeSViresh Kumar 			pr_err("%s: Failed to Exit Governor: %s (%d)\n",
22574bc384aeSViresh Kumar 			       __func__, old_gov->name, ret);
22584bc384aeSViresh Kumar 			return ret;
22594bc384aeSViresh Kumar 		}
22607bd353a9SViresh Kumar 	}
22611da177e4SLinus Torvalds 
22621da177e4SLinus Torvalds 	/* start new governor */
22633a3e9e06SViresh Kumar 	policy->governor = new_policy->governor;
22644bc384aeSViresh Kumar 	ret = __cpufreq_governor(policy, CPUFREQ_GOV_POLICY_INIT);
22654bc384aeSViresh Kumar 	if (!ret) {
22664bc384aeSViresh Kumar 		ret = __cpufreq_governor(policy, CPUFREQ_GOV_START);
22674bc384aeSViresh Kumar 		if (!ret)
2268d9a789c7SRafael J. Wysocki 			goto out;
2269d9a789c7SRafael J. Wysocki 
2270ad7722daSviresh kumar 		up_write(&policy->rwsem);
2271d9a789c7SRafael J. Wysocki 		__cpufreq_governor(policy, CPUFREQ_GOV_POLICY_EXIT);
2272ad7722daSviresh kumar 		down_write(&policy->rwsem);
2273955ef483SViresh Kumar 	}
22747bd353a9SViresh Kumar 
22751da177e4SLinus Torvalds 	/* new governor failed, so re-start old one */
2276d9a789c7SRafael J. Wysocki 	pr_debug("starting governor %s failed\n", policy->governor->name);
22771da177e4SLinus Torvalds 	if (old_gov) {
22783a3e9e06SViresh Kumar 		policy->governor = old_gov;
22794bc384aeSViresh Kumar 		if (__cpufreq_governor(policy, CPUFREQ_GOV_POLICY_INIT))
22804bc384aeSViresh Kumar 			policy->governor = NULL;
22814bc384aeSViresh Kumar 		else
2282d9a789c7SRafael J. Wysocki 			__cpufreq_governor(policy, CPUFREQ_GOV_START);
22831da177e4SLinus Torvalds 	}
22841da177e4SLinus Torvalds 
22854bc384aeSViresh Kumar 	return ret;
2286d9a789c7SRafael J. Wysocki 
2287d9a789c7SRafael J. Wysocki  out:
2288d9a789c7SRafael J. Wysocki 	pr_debug("governor: change or update limits\n");
2289d9a789c7SRafael J. Wysocki 	return __cpufreq_governor(policy, CPUFREQ_GOV_LIMITS);
22901da177e4SLinus Torvalds }
22911da177e4SLinus Torvalds 
22921da177e4SLinus Torvalds /**
22931da177e4SLinus Torvalds  *	cpufreq_update_policy - re-evaluate an existing cpufreq policy
22941da177e4SLinus Torvalds  *	@cpu: CPU which shall be re-evaluated
22951da177e4SLinus Torvalds  *
229625985edcSLucas De Marchi  *	Useful for policy notifiers which have different necessities
22971da177e4SLinus Torvalds  *	at different times.
22981da177e4SLinus Torvalds  */
22991da177e4SLinus Torvalds int cpufreq_update_policy(unsigned int cpu)
23001da177e4SLinus Torvalds {
23013a3e9e06SViresh Kumar 	struct cpufreq_policy *policy = cpufreq_cpu_get(cpu);
23023a3e9e06SViresh Kumar 	struct cpufreq_policy new_policy;
2303f1829e4aSJulia Lawall 	int ret;
23041da177e4SLinus Torvalds 
2305fefa8ff8SAaron Plattner 	if (!policy)
2306fefa8ff8SAaron Plattner 		return -ENODEV;
23071da177e4SLinus Torvalds 
2308ad7722daSviresh kumar 	down_write(&policy->rwsem);
23091da177e4SLinus Torvalds 
23102d06d8c4SDominik Brodowski 	pr_debug("updating policy for CPU %u\n", cpu);
2311d5b73cd8SViresh Kumar 	memcpy(&new_policy, policy, sizeof(*policy));
23123a3e9e06SViresh Kumar 	new_policy.min = policy->user_policy.min;
23133a3e9e06SViresh Kumar 	new_policy.max = policy->user_policy.max;
23143a3e9e06SViresh Kumar 	new_policy.policy = policy->user_policy.policy;
23153a3e9e06SViresh Kumar 	new_policy.governor = policy->user_policy.governor;
23161da177e4SLinus Torvalds 
2317bb176f7dSViresh Kumar 	/*
2318bb176f7dSViresh Kumar 	 * BIOS might change freq behind our back
2319bb176f7dSViresh Kumar 	 * -> ask driver for current freq and notify governors about a change
2320bb176f7dSViresh Kumar 	 */
23212ed99e39SRafael J. Wysocki 	if (cpufreq_driver->get && !cpufreq_driver->setpolicy) {
23223a3e9e06SViresh Kumar 		new_policy.cur = cpufreq_driver->get(cpu);
2323bd0fa9bbSViresh Kumar 		if (WARN_ON(!new_policy.cur)) {
2324bd0fa9bbSViresh Kumar 			ret = -EIO;
2325fefa8ff8SAaron Plattner 			goto unlock;
2326bd0fa9bbSViresh Kumar 		}
2327bd0fa9bbSViresh Kumar 
23283a3e9e06SViresh Kumar 		if (!policy->cur) {
2329e837f9b5SJoe Perches 			pr_debug("Driver did not initialize current freq\n");
23303a3e9e06SViresh Kumar 			policy->cur = new_policy.cur;
2331a85f7bd3SThomas Renninger 		} else {
23329c0ebcf7SViresh Kumar 			if (policy->cur != new_policy.cur && has_target())
2333a1e1dc41SViresh Kumar 				cpufreq_out_of_sync(policy, new_policy.cur);
23340961dd0dSThomas Renninger 		}
2335a85f7bd3SThomas Renninger 	}
23360961dd0dSThomas Renninger 
2337037ce839SViresh Kumar 	ret = cpufreq_set_policy(policy, &new_policy);
23381da177e4SLinus Torvalds 
2339fefa8ff8SAaron Plattner unlock:
2340ad7722daSviresh kumar 	up_write(&policy->rwsem);
23415a01f2e8SVenkatesh Pallipadi 
23423a3e9e06SViresh Kumar 	cpufreq_cpu_put(policy);
23431da177e4SLinus Torvalds 	return ret;
23441da177e4SLinus Torvalds }
23451da177e4SLinus Torvalds EXPORT_SYMBOL(cpufreq_update_policy);
23461da177e4SLinus Torvalds 
23472760984fSPaul Gortmaker static int cpufreq_cpu_callback(struct notifier_block *nfb,
2348c32b6b8eSAshok Raj 					unsigned long action, void *hcpu)
2349c32b6b8eSAshok Raj {
2350c32b6b8eSAshok Raj 	unsigned int cpu = (unsigned long)hcpu;
2351c32b6b8eSAshok Raj 
23525302c3fbSSrivatsa S. Bhat 	switch (action & ~CPU_TASKS_FROZEN) {
2353c32b6b8eSAshok Raj 	case CPU_ONLINE:
2354*0b275352SRafael J. Wysocki 		cpufreq_online(cpu);
2355c32b6b8eSAshok Raj 		break;
23565302c3fbSSrivatsa S. Bhat 
2357c32b6b8eSAshok Raj 	case CPU_DOWN_PREPARE:
235815c0b4d2SRafael J. Wysocki 		cpufreq_offline_prepare(cpu);
23591aee40acSSrivatsa S. Bhat 		break;
23601aee40acSSrivatsa S. Bhat 
23611aee40acSSrivatsa S. Bhat 	case CPU_POST_DEAD:
236215c0b4d2SRafael J. Wysocki 		cpufreq_offline_finish(cpu);
2363c32b6b8eSAshok Raj 		break;
23645302c3fbSSrivatsa S. Bhat 
23655a01f2e8SVenkatesh Pallipadi 	case CPU_DOWN_FAILED:
2366*0b275352SRafael J. Wysocki 		cpufreq_online(cpu);
2367c32b6b8eSAshok Raj 		break;
2368c32b6b8eSAshok Raj 	}
2369c32b6b8eSAshok Raj 	return NOTIFY_OK;
2370c32b6b8eSAshok Raj }
2371c32b6b8eSAshok Raj 
23729c36f746SNeal Buckendahl static struct notifier_block __refdata cpufreq_cpu_notifier = {
2373c32b6b8eSAshok Raj 	.notifier_call = cpufreq_cpu_callback,
2374c32b6b8eSAshok Raj };
23751da177e4SLinus Torvalds 
23761da177e4SLinus Torvalds /*********************************************************************
23776f19efc0SLukasz Majewski  *               BOOST						     *
23786f19efc0SLukasz Majewski  *********************************************************************/
23796f19efc0SLukasz Majewski static int cpufreq_boost_set_sw(int state)
23806f19efc0SLukasz Majewski {
23816f19efc0SLukasz Majewski 	struct cpufreq_frequency_table *freq_table;
23826f19efc0SLukasz Majewski 	struct cpufreq_policy *policy;
23836f19efc0SLukasz Majewski 	int ret = -EINVAL;
23846f19efc0SLukasz Majewski 
2385f963735aSViresh Kumar 	for_each_active_policy(policy) {
23866f19efc0SLukasz Majewski 		freq_table = cpufreq_frequency_get_table(policy->cpu);
23876f19efc0SLukasz Majewski 		if (freq_table) {
23886f19efc0SLukasz Majewski 			ret = cpufreq_frequency_table_cpuinfo(policy,
23896f19efc0SLukasz Majewski 							freq_table);
23906f19efc0SLukasz Majewski 			if (ret) {
23916f19efc0SLukasz Majewski 				pr_err("%s: Policy frequency update failed\n",
23926f19efc0SLukasz Majewski 				       __func__);
23936f19efc0SLukasz Majewski 				break;
23946f19efc0SLukasz Majewski 			}
23956f19efc0SLukasz Majewski 			policy->user_policy.max = policy->max;
23966f19efc0SLukasz Majewski 			__cpufreq_governor(policy, CPUFREQ_GOV_LIMITS);
23976f19efc0SLukasz Majewski 		}
23986f19efc0SLukasz Majewski 	}
23996f19efc0SLukasz Majewski 
24006f19efc0SLukasz Majewski 	return ret;
24016f19efc0SLukasz Majewski }
24026f19efc0SLukasz Majewski 
24036f19efc0SLukasz Majewski int cpufreq_boost_trigger_state(int state)
24046f19efc0SLukasz Majewski {
24056f19efc0SLukasz Majewski 	unsigned long flags;
24066f19efc0SLukasz Majewski 	int ret = 0;
24076f19efc0SLukasz Majewski 
24086f19efc0SLukasz Majewski 	if (cpufreq_driver->boost_enabled == state)
24096f19efc0SLukasz Majewski 		return 0;
24106f19efc0SLukasz Majewski 
24116f19efc0SLukasz Majewski 	write_lock_irqsave(&cpufreq_driver_lock, flags);
24126f19efc0SLukasz Majewski 	cpufreq_driver->boost_enabled = state;
24136f19efc0SLukasz Majewski 	write_unlock_irqrestore(&cpufreq_driver_lock, flags);
24146f19efc0SLukasz Majewski 
24156f19efc0SLukasz Majewski 	ret = cpufreq_driver->set_boost(state);
24166f19efc0SLukasz Majewski 	if (ret) {
24176f19efc0SLukasz Majewski 		write_lock_irqsave(&cpufreq_driver_lock, flags);
24186f19efc0SLukasz Majewski 		cpufreq_driver->boost_enabled = !state;
24196f19efc0SLukasz Majewski 		write_unlock_irqrestore(&cpufreq_driver_lock, flags);
24206f19efc0SLukasz Majewski 
2421e837f9b5SJoe Perches 		pr_err("%s: Cannot %s BOOST\n",
2422e837f9b5SJoe Perches 		       __func__, state ? "enable" : "disable");
24236f19efc0SLukasz Majewski 	}
24246f19efc0SLukasz Majewski 
24256f19efc0SLukasz Majewski 	return ret;
24266f19efc0SLukasz Majewski }
24276f19efc0SLukasz Majewski 
24286f19efc0SLukasz Majewski int cpufreq_boost_supported(void)
24296f19efc0SLukasz Majewski {
24306f19efc0SLukasz Majewski 	if (likely(cpufreq_driver))
24316f19efc0SLukasz Majewski 		return cpufreq_driver->boost_supported;
24326f19efc0SLukasz Majewski 
24336f19efc0SLukasz Majewski 	return 0;
24346f19efc0SLukasz Majewski }
24356f19efc0SLukasz Majewski EXPORT_SYMBOL_GPL(cpufreq_boost_supported);
24366f19efc0SLukasz Majewski 
24376f19efc0SLukasz Majewski int cpufreq_boost_enabled(void)
24386f19efc0SLukasz Majewski {
24396f19efc0SLukasz Majewski 	return cpufreq_driver->boost_enabled;
24406f19efc0SLukasz Majewski }
24416f19efc0SLukasz Majewski EXPORT_SYMBOL_GPL(cpufreq_boost_enabled);
24426f19efc0SLukasz Majewski 
24436f19efc0SLukasz Majewski /*********************************************************************
24441da177e4SLinus Torvalds  *               REGISTER / UNREGISTER CPUFREQ DRIVER                *
24451da177e4SLinus Torvalds  *********************************************************************/
24461da177e4SLinus Torvalds 
24471da177e4SLinus Torvalds /**
24481da177e4SLinus Torvalds  * cpufreq_register_driver - register a CPU Frequency driver
24491da177e4SLinus Torvalds  * @driver_data: A struct cpufreq_driver containing the values#
24501da177e4SLinus Torvalds  * submitted by the CPU Frequency driver.
24511da177e4SLinus Torvalds  *
24521da177e4SLinus Torvalds  * Registers a CPU Frequency driver to this core code. This code
24531da177e4SLinus Torvalds  * returns zero on success, -EBUSY when another driver got here first
24541da177e4SLinus Torvalds  * (and isn't unregistered in the meantime).
24551da177e4SLinus Torvalds  *
24561da177e4SLinus Torvalds  */
2457221dee28SLinus Torvalds int cpufreq_register_driver(struct cpufreq_driver *driver_data)
24581da177e4SLinus Torvalds {
24591da177e4SLinus Torvalds 	unsigned long flags;
24601da177e4SLinus Torvalds 	int ret;
24611da177e4SLinus Torvalds 
2462a7b422cdSKonrad Rzeszutek Wilk 	if (cpufreq_disabled())
2463a7b422cdSKonrad Rzeszutek Wilk 		return -ENODEV;
2464a7b422cdSKonrad Rzeszutek Wilk 
24651da177e4SLinus Torvalds 	if (!driver_data || !driver_data->verify || !driver_data->init ||
24669c0ebcf7SViresh Kumar 	    !(driver_data->setpolicy || driver_data->target_index ||
24679832235fSRafael J. Wysocki 		    driver_data->target) ||
24689832235fSRafael J. Wysocki 	     (driver_data->setpolicy && (driver_data->target_index ||
24691c03a2d0SViresh Kumar 		    driver_data->target)) ||
24701c03a2d0SViresh Kumar 	     (!!driver_data->get_intermediate != !!driver_data->target_intermediate))
24711da177e4SLinus Torvalds 		return -EINVAL;
24721da177e4SLinus Torvalds 
24732d06d8c4SDominik Brodowski 	pr_debug("trying to register driver %s\n", driver_data->name);
24741da177e4SLinus Torvalds 
24750d1857a1SNathan Zimmer 	write_lock_irqsave(&cpufreq_driver_lock, flags);
24761c3d85ddSRafael J. Wysocki 	if (cpufreq_driver) {
24770d1857a1SNathan Zimmer 		write_unlock_irqrestore(&cpufreq_driver_lock, flags);
24784dea5806SYinghai Lu 		return -EEXIST;
24791da177e4SLinus Torvalds 	}
24801c3d85ddSRafael J. Wysocki 	cpufreq_driver = driver_data;
24810d1857a1SNathan Zimmer 	write_unlock_irqrestore(&cpufreq_driver_lock, flags);
24821da177e4SLinus Torvalds 
2483bc68b7dfSViresh Kumar 	if (driver_data->setpolicy)
2484bc68b7dfSViresh Kumar 		driver_data->flags |= CPUFREQ_CONST_LOOPS;
2485bc68b7dfSViresh Kumar 
24866f19efc0SLukasz Majewski 	if (cpufreq_boost_supported()) {
24876f19efc0SLukasz Majewski 		/*
24886f19efc0SLukasz Majewski 		 * Check if driver provides function to enable boost -
24896f19efc0SLukasz Majewski 		 * if not, use cpufreq_boost_set_sw as default
24906f19efc0SLukasz Majewski 		 */
24916f19efc0SLukasz Majewski 		if (!cpufreq_driver->set_boost)
24926f19efc0SLukasz Majewski 			cpufreq_driver->set_boost = cpufreq_boost_set_sw;
24936f19efc0SLukasz Majewski 
24946f19efc0SLukasz Majewski 		ret = cpufreq_sysfs_create_file(&boost.attr);
24956f19efc0SLukasz Majewski 		if (ret) {
24966f19efc0SLukasz Majewski 			pr_err("%s: cannot register global BOOST sysfs file\n",
24976f19efc0SLukasz Majewski 			       __func__);
24986f19efc0SLukasz Majewski 			goto err_null_driver;
24996f19efc0SLukasz Majewski 		}
25006f19efc0SLukasz Majewski 	}
25016f19efc0SLukasz Majewski 
25028a25a2fdSKay Sievers 	ret = subsys_interface_register(&cpufreq_interface);
25038f5bc2abSJiri Slaby 	if (ret)
25046f19efc0SLukasz Majewski 		goto err_boost_unreg;
25051da177e4SLinus Torvalds 
2506ce1bcfe9SViresh Kumar 	if (!(cpufreq_driver->flags & CPUFREQ_STICKY) &&
2507ce1bcfe9SViresh Kumar 	    list_empty(&cpufreq_policy_list)) {
25081da177e4SLinus Torvalds 		/* if all ->init() calls failed, unregister */
2509ce1bcfe9SViresh Kumar 		pr_debug("%s: No CPU initialized for driver %s\n", __func__,
2510e08f5f5bSGautham R Shenoy 			 driver_data->name);
25118a25a2fdSKay Sievers 		goto err_if_unreg;
25121da177e4SLinus Torvalds 	}
25131da177e4SLinus Torvalds 
251465edc68cSChandra Seetharaman 	register_hotcpu_notifier(&cpufreq_cpu_notifier);
25152d06d8c4SDominik Brodowski 	pr_debug("driver %s up and running\n", driver_data->name);
25161da177e4SLinus Torvalds 
25178f5bc2abSJiri Slaby 	return 0;
25188a25a2fdSKay Sievers err_if_unreg:
25198a25a2fdSKay Sievers 	subsys_interface_unregister(&cpufreq_interface);
25206f19efc0SLukasz Majewski err_boost_unreg:
25216f19efc0SLukasz Majewski 	if (cpufreq_boost_supported())
25226f19efc0SLukasz Majewski 		cpufreq_sysfs_remove_file(&boost.attr);
25238f5bc2abSJiri Slaby err_null_driver:
25240d1857a1SNathan Zimmer 	write_lock_irqsave(&cpufreq_driver_lock, flags);
25251c3d85ddSRafael J. Wysocki 	cpufreq_driver = NULL;
25260d1857a1SNathan Zimmer 	write_unlock_irqrestore(&cpufreq_driver_lock, flags);
25274d34a67dSDave Jones 	return ret;
25281da177e4SLinus Torvalds }
25291da177e4SLinus Torvalds EXPORT_SYMBOL_GPL(cpufreq_register_driver);
25301da177e4SLinus Torvalds 
25311da177e4SLinus Torvalds /**
25321da177e4SLinus Torvalds  * cpufreq_unregister_driver - unregister the current CPUFreq driver
25331da177e4SLinus Torvalds  *
25341da177e4SLinus Torvalds  * Unregister the current CPUFreq driver. Only call this if you have
25351da177e4SLinus Torvalds  * the right to do so, i.e. if you have succeeded in initialising before!
25361da177e4SLinus Torvalds  * Returns zero if successful, and -EINVAL if the cpufreq_driver is
25371da177e4SLinus Torvalds  * currently not initialised.
25381da177e4SLinus Torvalds  */
2539221dee28SLinus Torvalds int cpufreq_unregister_driver(struct cpufreq_driver *driver)
25401da177e4SLinus Torvalds {
25411da177e4SLinus Torvalds 	unsigned long flags;
25421da177e4SLinus Torvalds 
25431c3d85ddSRafael J. Wysocki 	if (!cpufreq_driver || (driver != cpufreq_driver))
25441da177e4SLinus Torvalds 		return -EINVAL;
25451da177e4SLinus Torvalds 
25462d06d8c4SDominik Brodowski 	pr_debug("unregistering driver %s\n", driver->name);
25471da177e4SLinus Torvalds 
2548454d3a25SSebastian Andrzej Siewior 	/* Protect against concurrent cpu hotplug */
2549454d3a25SSebastian Andrzej Siewior 	get_online_cpus();
25508a25a2fdSKay Sievers 	subsys_interface_unregister(&cpufreq_interface);
25516f19efc0SLukasz Majewski 	if (cpufreq_boost_supported())
25526f19efc0SLukasz Majewski 		cpufreq_sysfs_remove_file(&boost.attr);
25536f19efc0SLukasz Majewski 
255465edc68cSChandra Seetharaman 	unregister_hotcpu_notifier(&cpufreq_cpu_notifier);
25551da177e4SLinus Torvalds 
25560d1857a1SNathan Zimmer 	write_lock_irqsave(&cpufreq_driver_lock, flags);
25576eed9404SViresh Kumar 
25581c3d85ddSRafael J. Wysocki 	cpufreq_driver = NULL;
25596eed9404SViresh Kumar 
25600d1857a1SNathan Zimmer 	write_unlock_irqrestore(&cpufreq_driver_lock, flags);
2561454d3a25SSebastian Andrzej Siewior 	put_online_cpus();
25621da177e4SLinus Torvalds 
25631da177e4SLinus Torvalds 	return 0;
25641da177e4SLinus Torvalds }
25651da177e4SLinus Torvalds EXPORT_SYMBOL_GPL(cpufreq_unregister_driver);
25665a01f2e8SVenkatesh Pallipadi 
256790de2a4aSDoug Anderson /*
256890de2a4aSDoug Anderson  * Stop cpufreq at shutdown to make sure it isn't holding any locks
256990de2a4aSDoug Anderson  * or mutexes when secondary CPUs are halted.
257090de2a4aSDoug Anderson  */
257190de2a4aSDoug Anderson static struct syscore_ops cpufreq_syscore_ops = {
257290de2a4aSDoug Anderson 	.shutdown = cpufreq_suspend,
257390de2a4aSDoug Anderson };
257490de2a4aSDoug Anderson 
25755a01f2e8SVenkatesh Pallipadi static int __init cpufreq_core_init(void)
25765a01f2e8SVenkatesh Pallipadi {
2577a7b422cdSKonrad Rzeszutek Wilk 	if (cpufreq_disabled())
2578a7b422cdSKonrad Rzeszutek Wilk 		return -ENODEV;
2579a7b422cdSKonrad Rzeszutek Wilk 
25802361be23SViresh Kumar 	cpufreq_global_kobject = kobject_create();
25818aa84ad8SThomas Renninger 	BUG_ON(!cpufreq_global_kobject);
25828aa84ad8SThomas Renninger 
258390de2a4aSDoug Anderson 	register_syscore_ops(&cpufreq_syscore_ops);
258490de2a4aSDoug Anderson 
25855a01f2e8SVenkatesh Pallipadi 	return 0;
25865a01f2e8SVenkatesh Pallipadi }
25875a01f2e8SVenkatesh Pallipadi core_initcall(cpufreq_core_init);
2588