11da177e4SLinus Torvalds /* 21da177e4SLinus Torvalds * linux/drivers/cpufreq/cpufreq.c 31da177e4SLinus Torvalds * 41da177e4SLinus Torvalds * Copyright (C) 2001 Russell King 51da177e4SLinus Torvalds * (C) 2002 - 2003 Dominik Brodowski <linux@brodo.de> 6bb176f7dSViresh Kumar * (C) 2013 Viresh Kumar <viresh.kumar@linaro.org> 71da177e4SLinus Torvalds * 8c32b6b8eSAshok Raj * Oct 2005 - Ashok Raj <ashok.raj@intel.com> 9c32b6b8eSAshok Raj * Added handling for CPU hotplug 108ff69732SDave Jones * Feb 2006 - Jacob Shin <jacob.shin@amd.com> 118ff69732SDave Jones * Fix handling for CPU hotplug -- affected CPUs 12c32b6b8eSAshok Raj * 131da177e4SLinus Torvalds * This program is free software; you can redistribute it and/or modify 141da177e4SLinus Torvalds * it under the terms of the GNU General Public License version 2 as 151da177e4SLinus Torvalds * published by the Free Software Foundation. 161da177e4SLinus Torvalds */ 171da177e4SLinus Torvalds 18db701151SViresh Kumar #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 19db701151SViresh Kumar 205ff0a268SViresh Kumar #include <linux/cpu.h> 211da177e4SLinus Torvalds #include <linux/cpufreq.h> 221da177e4SLinus Torvalds #include <linux/delay.h> 231da177e4SLinus Torvalds #include <linux/device.h> 245ff0a268SViresh Kumar #include <linux/init.h> 255ff0a268SViresh Kumar #include <linux/kernel_stat.h> 265ff0a268SViresh Kumar #include <linux/module.h> 273fc54d37Sakpm@osdl.org #include <linux/mutex.h> 285ff0a268SViresh Kumar #include <linux/slab.h> 292f0aea93SViresh Kumar #include <linux/suspend.h> 3090de2a4aSDoug Anderson #include <linux/syscore_ops.h> 315ff0a268SViresh Kumar #include <linux/tick.h> 326f4f2723SThomas Renninger #include <trace/events/power.h> 336f4f2723SThomas Renninger 34b4f0676fSViresh Kumar static LIST_HEAD(cpufreq_policy_list); 35f963735aSViresh Kumar 36f963735aSViresh Kumar static inline bool policy_is_inactive(struct cpufreq_policy *policy) 37f963735aSViresh Kumar { 38f963735aSViresh Kumar return cpumask_empty(policy->cpus); 39f963735aSViresh Kumar } 40f963735aSViresh Kumar 41f963735aSViresh Kumar static bool suitable_policy(struct cpufreq_policy *policy, bool active) 42f963735aSViresh Kumar { 43f963735aSViresh Kumar return active == !policy_is_inactive(policy); 44f963735aSViresh Kumar } 45f963735aSViresh Kumar 46f963735aSViresh Kumar /* Finds Next Acive/Inactive policy */ 47f963735aSViresh Kumar static struct cpufreq_policy *next_policy(struct cpufreq_policy *policy, 48f963735aSViresh Kumar bool active) 49f963735aSViresh Kumar { 50f963735aSViresh Kumar do { 51f963735aSViresh Kumar policy = list_next_entry(policy, policy_list); 52f963735aSViresh Kumar 53f963735aSViresh Kumar /* No more policies in the list */ 54f963735aSViresh Kumar if (&policy->policy_list == &cpufreq_policy_list) 55f963735aSViresh Kumar return NULL; 56f963735aSViresh Kumar } while (!suitable_policy(policy, active)); 57f963735aSViresh Kumar 58f963735aSViresh Kumar return policy; 59f963735aSViresh Kumar } 60f963735aSViresh Kumar 61f963735aSViresh Kumar static struct cpufreq_policy *first_policy(bool active) 62f963735aSViresh Kumar { 63f963735aSViresh Kumar struct cpufreq_policy *policy; 64f963735aSViresh Kumar 65f963735aSViresh Kumar /* No policies in the list */ 66f963735aSViresh Kumar if (list_empty(&cpufreq_policy_list)) 67f963735aSViresh Kumar return NULL; 68f963735aSViresh Kumar 69f963735aSViresh Kumar policy = list_first_entry(&cpufreq_policy_list, typeof(*policy), 70f963735aSViresh Kumar policy_list); 71f963735aSViresh Kumar 72f963735aSViresh Kumar if (!suitable_policy(policy, active)) 73f963735aSViresh Kumar policy = next_policy(policy, active); 74f963735aSViresh Kumar 75f963735aSViresh Kumar return policy; 76f963735aSViresh Kumar } 77f963735aSViresh Kumar 78f963735aSViresh Kumar /* Macros to iterate over CPU policies */ 79f963735aSViresh Kumar #define for_each_suitable_policy(__policy, __active) \ 80f963735aSViresh Kumar for (__policy = first_policy(__active); \ 81f963735aSViresh Kumar __policy; \ 82f963735aSViresh Kumar __policy = next_policy(__policy, __active)) 83f963735aSViresh Kumar 84f963735aSViresh Kumar #define for_each_active_policy(__policy) \ 85f963735aSViresh Kumar for_each_suitable_policy(__policy, true) 86f963735aSViresh Kumar #define for_each_inactive_policy(__policy) \ 87f963735aSViresh Kumar for_each_suitable_policy(__policy, false) 88f963735aSViresh Kumar 89b4f0676fSViresh Kumar #define for_each_policy(__policy) \ 90b4f0676fSViresh Kumar list_for_each_entry(__policy, &cpufreq_policy_list, policy_list) 91b4f0676fSViresh Kumar 92f7b27061SViresh Kumar /* Iterate over governors */ 93f7b27061SViresh Kumar static LIST_HEAD(cpufreq_governor_list); 94f7b27061SViresh Kumar #define for_each_governor(__governor) \ 95f7b27061SViresh Kumar list_for_each_entry(__governor, &cpufreq_governor_list, governor_list) 96f7b27061SViresh Kumar 971da177e4SLinus Torvalds /** 98cd878479SDave Jones * The "cpufreq driver" - the arch- or hardware-dependent low 991da177e4SLinus Torvalds * level driver of CPUFreq support, and its spinlock. This lock 1001da177e4SLinus Torvalds * also protects the cpufreq_cpu_data array. 1011da177e4SLinus Torvalds */ 1021c3d85ddSRafael J. Wysocki static struct cpufreq_driver *cpufreq_driver; 1037a6aedfaSMike Travis static DEFINE_PER_CPU(struct cpufreq_policy *, cpufreq_cpu_data); 104bb176f7dSViresh Kumar static DEFINE_RWLOCK(cpufreq_driver_lock); 1056f1e4efdSJane Li DEFINE_MUTEX(cpufreq_governor_lock); 106bb176f7dSViresh Kumar 1072f0aea93SViresh Kumar /* Flag to suspend/resume CPUFreq governors */ 1082f0aea93SViresh Kumar static bool cpufreq_suspended; 1091da177e4SLinus Torvalds 1109c0ebcf7SViresh Kumar static inline bool has_target(void) 1119c0ebcf7SViresh Kumar { 1129c0ebcf7SViresh Kumar return cpufreq_driver->target_index || cpufreq_driver->target; 1139c0ebcf7SViresh Kumar } 1149c0ebcf7SViresh Kumar 1155a01f2e8SVenkatesh Pallipadi /* 1166eed9404SViresh Kumar * rwsem to guarantee that cpufreq driver module doesn't unload during critical 1176eed9404SViresh Kumar * sections 1186eed9404SViresh Kumar */ 1196eed9404SViresh Kumar static DECLARE_RWSEM(cpufreq_rwsem); 1206eed9404SViresh Kumar 1211da177e4SLinus Torvalds /* internal prototypes */ 12229464f28SDave Jones static int __cpufreq_governor(struct cpufreq_policy *policy, 12329464f28SDave Jones unsigned int event); 124d92d50a4SViresh Kumar static unsigned int __cpufreq_get(struct cpufreq_policy *policy); 12565f27f38SDavid Howells static void handle_update(struct work_struct *work); 1261da177e4SLinus Torvalds 1271da177e4SLinus Torvalds /** 1281da177e4SLinus Torvalds * Two notifier lists: the "policy" list is involved in the 1291da177e4SLinus Torvalds * validation process for a new CPU frequency policy; the 1301da177e4SLinus Torvalds * "transition" list for kernel code that needs to handle 1311da177e4SLinus Torvalds * changes to devices when the CPU clock speed changes. 1321da177e4SLinus Torvalds * The mutex locks both lists. 1331da177e4SLinus Torvalds */ 134e041c683SAlan Stern static BLOCKING_NOTIFIER_HEAD(cpufreq_policy_notifier_list); 135b4dfdbb3SAlan Stern static struct srcu_notifier_head cpufreq_transition_notifier_list; 1361da177e4SLinus Torvalds 13774212ca4SCesar Eduardo Barros static bool init_cpufreq_transition_notifier_list_called; 138b4dfdbb3SAlan Stern static int __init init_cpufreq_transition_notifier_list(void) 139b4dfdbb3SAlan Stern { 140b4dfdbb3SAlan Stern srcu_init_notifier_head(&cpufreq_transition_notifier_list); 14174212ca4SCesar Eduardo Barros init_cpufreq_transition_notifier_list_called = true; 142b4dfdbb3SAlan Stern return 0; 143b4dfdbb3SAlan Stern } 144b3438f82SLinus Torvalds pure_initcall(init_cpufreq_transition_notifier_list); 1451da177e4SLinus Torvalds 146a7b422cdSKonrad Rzeszutek Wilk static int off __read_mostly; 147da584455SViresh Kumar static int cpufreq_disabled(void) 148a7b422cdSKonrad Rzeszutek Wilk { 149a7b422cdSKonrad Rzeszutek Wilk return off; 150a7b422cdSKonrad Rzeszutek Wilk } 151a7b422cdSKonrad Rzeszutek Wilk void disable_cpufreq(void) 152a7b422cdSKonrad Rzeszutek Wilk { 153a7b422cdSKonrad Rzeszutek Wilk off = 1; 154a7b422cdSKonrad Rzeszutek Wilk } 1553fc54d37Sakpm@osdl.org static DEFINE_MUTEX(cpufreq_governor_mutex); 1561da177e4SLinus Torvalds 1574d5dcc42SViresh Kumar bool have_governor_per_policy(void) 1584d5dcc42SViresh Kumar { 1590b981e70SViresh Kumar return !!(cpufreq_driver->flags & CPUFREQ_HAVE_GOVERNOR_PER_POLICY); 1604d5dcc42SViresh Kumar } 1613f869d6dSViresh Kumar EXPORT_SYMBOL_GPL(have_governor_per_policy); 1624d5dcc42SViresh Kumar 163944e9a03SViresh Kumar struct kobject *get_governor_parent_kobj(struct cpufreq_policy *policy) 164944e9a03SViresh Kumar { 165944e9a03SViresh Kumar if (have_governor_per_policy()) 166944e9a03SViresh Kumar return &policy->kobj; 167944e9a03SViresh Kumar else 168944e9a03SViresh Kumar return cpufreq_global_kobject; 169944e9a03SViresh Kumar } 170944e9a03SViresh Kumar EXPORT_SYMBOL_GPL(get_governor_parent_kobj); 171944e9a03SViresh Kumar 17272a4ce34SViresh Kumar static inline u64 get_cpu_idle_time_jiffy(unsigned int cpu, u64 *wall) 17372a4ce34SViresh Kumar { 17472a4ce34SViresh Kumar u64 idle_time; 17572a4ce34SViresh Kumar u64 cur_wall_time; 17672a4ce34SViresh Kumar u64 busy_time; 17772a4ce34SViresh Kumar 17872a4ce34SViresh Kumar cur_wall_time = jiffies64_to_cputime64(get_jiffies_64()); 17972a4ce34SViresh Kumar 18072a4ce34SViresh Kumar busy_time = kcpustat_cpu(cpu).cpustat[CPUTIME_USER]; 18172a4ce34SViresh Kumar busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_SYSTEM]; 18272a4ce34SViresh Kumar busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_IRQ]; 18372a4ce34SViresh Kumar busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_SOFTIRQ]; 18472a4ce34SViresh Kumar busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_STEAL]; 18572a4ce34SViresh Kumar busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_NICE]; 18672a4ce34SViresh Kumar 18772a4ce34SViresh Kumar idle_time = cur_wall_time - busy_time; 18872a4ce34SViresh Kumar if (wall) 18972a4ce34SViresh Kumar *wall = cputime_to_usecs(cur_wall_time); 19072a4ce34SViresh Kumar 19172a4ce34SViresh Kumar return cputime_to_usecs(idle_time); 19272a4ce34SViresh Kumar } 19372a4ce34SViresh Kumar 19472a4ce34SViresh Kumar u64 get_cpu_idle_time(unsigned int cpu, u64 *wall, int io_busy) 19572a4ce34SViresh Kumar { 19672a4ce34SViresh Kumar u64 idle_time = get_cpu_idle_time_us(cpu, io_busy ? wall : NULL); 19772a4ce34SViresh Kumar 19872a4ce34SViresh Kumar if (idle_time == -1ULL) 19972a4ce34SViresh Kumar return get_cpu_idle_time_jiffy(cpu, wall); 20072a4ce34SViresh Kumar else if (!io_busy) 20172a4ce34SViresh Kumar idle_time += get_cpu_iowait_time_us(cpu, wall); 20272a4ce34SViresh Kumar 20372a4ce34SViresh Kumar return idle_time; 20472a4ce34SViresh Kumar } 20572a4ce34SViresh Kumar EXPORT_SYMBOL_GPL(get_cpu_idle_time); 20672a4ce34SViresh Kumar 20770e9e778SViresh Kumar /* 20870e9e778SViresh Kumar * This is a generic cpufreq init() routine which can be used by cpufreq 20970e9e778SViresh Kumar * drivers of SMP systems. It will do following: 21070e9e778SViresh Kumar * - validate & show freq table passed 21170e9e778SViresh Kumar * - set policies transition latency 21270e9e778SViresh Kumar * - policy->cpus with all possible CPUs 21370e9e778SViresh Kumar */ 21470e9e778SViresh Kumar int cpufreq_generic_init(struct cpufreq_policy *policy, 21570e9e778SViresh Kumar struct cpufreq_frequency_table *table, 21670e9e778SViresh Kumar unsigned int transition_latency) 21770e9e778SViresh Kumar { 21870e9e778SViresh Kumar int ret; 21970e9e778SViresh Kumar 22070e9e778SViresh Kumar ret = cpufreq_table_validate_and_show(policy, table); 22170e9e778SViresh Kumar if (ret) { 22270e9e778SViresh Kumar pr_err("%s: invalid frequency table: %d\n", __func__, ret); 22370e9e778SViresh Kumar return ret; 22470e9e778SViresh Kumar } 22570e9e778SViresh Kumar 22670e9e778SViresh Kumar policy->cpuinfo.transition_latency = transition_latency; 22770e9e778SViresh Kumar 22870e9e778SViresh Kumar /* 22958405af6SShailendra Verma * The driver only supports the SMP configuration where all processors 23070e9e778SViresh Kumar * share the clock and voltage and clock. 23170e9e778SViresh Kumar */ 23270e9e778SViresh Kumar cpumask_setall(policy->cpus); 23370e9e778SViresh Kumar 23470e9e778SViresh Kumar return 0; 23570e9e778SViresh Kumar } 23670e9e778SViresh Kumar EXPORT_SYMBOL_GPL(cpufreq_generic_init); 23770e9e778SViresh Kumar 238988bed09SViresh Kumar /* Only for cpufreq core internal use */ 239988bed09SViresh Kumar struct cpufreq_policy *cpufreq_cpu_get_raw(unsigned int cpu) 240652ed95dSViresh Kumar { 241652ed95dSViresh Kumar struct cpufreq_policy *policy = per_cpu(cpufreq_cpu_data, cpu); 242652ed95dSViresh Kumar 243988bed09SViresh Kumar return policy && cpumask_test_cpu(cpu, policy->cpus) ? policy : NULL; 244988bed09SViresh Kumar } 245988bed09SViresh Kumar 246988bed09SViresh Kumar unsigned int cpufreq_generic_get(unsigned int cpu) 247988bed09SViresh Kumar { 248988bed09SViresh Kumar struct cpufreq_policy *policy = cpufreq_cpu_get_raw(cpu); 249988bed09SViresh Kumar 250652ed95dSViresh Kumar if (!policy || IS_ERR(policy->clk)) { 251e837f9b5SJoe Perches pr_err("%s: No %s associated to cpu: %d\n", 252e837f9b5SJoe Perches __func__, policy ? "clk" : "policy", cpu); 253652ed95dSViresh Kumar return 0; 254652ed95dSViresh Kumar } 255652ed95dSViresh Kumar 256652ed95dSViresh Kumar return clk_get_rate(policy->clk) / 1000; 257652ed95dSViresh Kumar } 258652ed95dSViresh Kumar EXPORT_SYMBOL_GPL(cpufreq_generic_get); 259652ed95dSViresh Kumar 26050e9c852SViresh Kumar /** 26150e9c852SViresh Kumar * cpufreq_cpu_get: returns policy for a cpu and marks it busy. 26250e9c852SViresh Kumar * 26350e9c852SViresh Kumar * @cpu: cpu to find policy for. 26450e9c852SViresh Kumar * 26550e9c852SViresh Kumar * This returns policy for 'cpu', returns NULL if it doesn't exist. 26650e9c852SViresh Kumar * It also increments the kobject reference count to mark it busy and so would 26750e9c852SViresh Kumar * require a corresponding call to cpufreq_cpu_put() to decrement it back. 26850e9c852SViresh Kumar * If corresponding call cpufreq_cpu_put() isn't made, the policy wouldn't be 26950e9c852SViresh Kumar * freed as that depends on the kobj count. 27050e9c852SViresh Kumar * 27150e9c852SViresh Kumar * It also takes a read-lock of 'cpufreq_rwsem' and doesn't put it back if a 27250e9c852SViresh Kumar * valid policy is found. This is done to make sure the driver doesn't get 27350e9c852SViresh Kumar * unregistered while the policy is being used. 27450e9c852SViresh Kumar * 27550e9c852SViresh Kumar * Return: A valid policy on success, otherwise NULL on failure. 27650e9c852SViresh Kumar */ 2776eed9404SViresh Kumar struct cpufreq_policy *cpufreq_cpu_get(unsigned int cpu) 2781da177e4SLinus Torvalds { 2796eed9404SViresh Kumar struct cpufreq_policy *policy = NULL; 2801da177e4SLinus Torvalds unsigned long flags; 2811da177e4SLinus Torvalds 2821b947c90SViresh Kumar if (WARN_ON(cpu >= nr_cpu_ids)) 2836eed9404SViresh Kumar return NULL; 2846eed9404SViresh Kumar 2856eed9404SViresh Kumar if (!down_read_trylock(&cpufreq_rwsem)) 2866eed9404SViresh Kumar return NULL; 2871da177e4SLinus Torvalds 2881da177e4SLinus Torvalds /* get the cpufreq driver */ 2890d1857a1SNathan Zimmer read_lock_irqsave(&cpufreq_driver_lock, flags); 2901da177e4SLinus Torvalds 2916eed9404SViresh Kumar if (cpufreq_driver) { 2921da177e4SLinus Torvalds /* get the CPU */ 293988bed09SViresh Kumar policy = cpufreq_cpu_get_raw(cpu); 2946eed9404SViresh Kumar if (policy) 2956eed9404SViresh Kumar kobject_get(&policy->kobj); 2966eed9404SViresh Kumar } 2976eed9404SViresh Kumar 2986eed9404SViresh Kumar read_unlock_irqrestore(&cpufreq_driver_lock, flags); 2991da177e4SLinus Torvalds 3003a3e9e06SViresh Kumar if (!policy) 3016eed9404SViresh Kumar up_read(&cpufreq_rwsem); 3021da177e4SLinus Torvalds 3033a3e9e06SViresh Kumar return policy; 304a9144436SStephen Boyd } 3051da177e4SLinus Torvalds EXPORT_SYMBOL_GPL(cpufreq_cpu_get); 3061da177e4SLinus Torvalds 30750e9c852SViresh Kumar /** 30850e9c852SViresh Kumar * cpufreq_cpu_put: Decrements the usage count of a policy 30950e9c852SViresh Kumar * 31050e9c852SViresh Kumar * @policy: policy earlier returned by cpufreq_cpu_get(). 31150e9c852SViresh Kumar * 31250e9c852SViresh Kumar * This decrements the kobject reference count incremented earlier by calling 31350e9c852SViresh Kumar * cpufreq_cpu_get(). 31450e9c852SViresh Kumar * 31550e9c852SViresh Kumar * It also drops the read-lock of 'cpufreq_rwsem' taken at cpufreq_cpu_get(). 31650e9c852SViresh Kumar */ 3173a3e9e06SViresh Kumar void cpufreq_cpu_put(struct cpufreq_policy *policy) 318a9144436SStephen Boyd { 3196eed9404SViresh Kumar kobject_put(&policy->kobj); 3206eed9404SViresh Kumar up_read(&cpufreq_rwsem); 321a9144436SStephen Boyd } 3221da177e4SLinus Torvalds EXPORT_SYMBOL_GPL(cpufreq_cpu_put); 3231da177e4SLinus Torvalds 3241da177e4SLinus Torvalds /********************************************************************* 3251da177e4SLinus Torvalds * EXTERNALLY AFFECTING FREQUENCY CHANGES * 3261da177e4SLinus Torvalds *********************************************************************/ 3271da177e4SLinus Torvalds 3281da177e4SLinus Torvalds /** 3291da177e4SLinus Torvalds * adjust_jiffies - adjust the system "loops_per_jiffy" 3301da177e4SLinus Torvalds * 3311da177e4SLinus Torvalds * This function alters the system "loops_per_jiffy" for the clock 3321da177e4SLinus Torvalds * speed change. Note that loops_per_jiffy cannot be updated on SMP 3331da177e4SLinus Torvalds * systems as each CPU might be scaled differently. So, use the arch 3341da177e4SLinus Torvalds * per-CPU loops_per_jiffy value wherever possible. 3351da177e4SLinus Torvalds */ 33639c132eeSViresh Kumar static void adjust_jiffies(unsigned long val, struct cpufreq_freqs *ci) 33739c132eeSViresh Kumar { 3381da177e4SLinus Torvalds #ifndef CONFIG_SMP 3391da177e4SLinus Torvalds static unsigned long l_p_j_ref; 3401da177e4SLinus Torvalds static unsigned int l_p_j_ref_freq; 3411da177e4SLinus Torvalds 3421da177e4SLinus Torvalds if (ci->flags & CPUFREQ_CONST_LOOPS) 3431da177e4SLinus Torvalds return; 3441da177e4SLinus Torvalds 3451da177e4SLinus Torvalds if (!l_p_j_ref_freq) { 3461da177e4SLinus Torvalds l_p_j_ref = loops_per_jiffy; 3471da177e4SLinus Torvalds l_p_j_ref_freq = ci->old; 348e837f9b5SJoe Perches pr_debug("saving %lu as reference value for loops_per_jiffy; freq is %u kHz\n", 349e837f9b5SJoe Perches l_p_j_ref, l_p_j_ref_freq); 3501da177e4SLinus Torvalds } 3510b443eadSViresh Kumar if (val == CPUFREQ_POSTCHANGE && ci->old != ci->new) { 352e08f5f5bSGautham R Shenoy loops_per_jiffy = cpufreq_scale(l_p_j_ref, l_p_j_ref_freq, 353e08f5f5bSGautham R Shenoy ci->new); 354e837f9b5SJoe Perches pr_debug("scaling loops_per_jiffy to %lu for frequency %u kHz\n", 355e837f9b5SJoe Perches loops_per_jiffy, ci->new); 3561da177e4SLinus Torvalds } 3571da177e4SLinus Torvalds #endif 35839c132eeSViresh Kumar } 3591da177e4SLinus Torvalds 3600956df9cSViresh Kumar static void __cpufreq_notify_transition(struct cpufreq_policy *policy, 361b43a7ffbSViresh Kumar struct cpufreq_freqs *freqs, unsigned int state) 3621da177e4SLinus Torvalds { 3631da177e4SLinus Torvalds BUG_ON(irqs_disabled()); 3641da177e4SLinus Torvalds 365d5aaffa9SDirk Brandewie if (cpufreq_disabled()) 366d5aaffa9SDirk Brandewie return; 367d5aaffa9SDirk Brandewie 3681c3d85ddSRafael J. Wysocki freqs->flags = cpufreq_driver->flags; 3692d06d8c4SDominik Brodowski pr_debug("notification %u of frequency transition to %u kHz\n", 370e4472cb3SDave Jones state, freqs->new); 3711da177e4SLinus Torvalds 3721da177e4SLinus Torvalds switch (state) { 373e4472cb3SDave Jones 3741da177e4SLinus Torvalds case CPUFREQ_PRECHANGE: 375e4472cb3SDave Jones /* detect if the driver reported a value as "old frequency" 376e4472cb3SDave Jones * which is not equal to what the cpufreq core thinks is 377e4472cb3SDave Jones * "old frequency". 3781da177e4SLinus Torvalds */ 3791c3d85ddSRafael J. Wysocki if (!(cpufreq_driver->flags & CPUFREQ_CONST_LOOPS)) { 380e4472cb3SDave Jones if ((policy) && (policy->cpu == freqs->cpu) && 381e4472cb3SDave Jones (policy->cur) && (policy->cur != freqs->old)) { 382e837f9b5SJoe Perches pr_debug("Warning: CPU frequency is %u, cpufreq assumed %u kHz\n", 383e4472cb3SDave Jones freqs->old, policy->cur); 384e4472cb3SDave Jones freqs->old = policy->cur; 3851da177e4SLinus Torvalds } 3861da177e4SLinus Torvalds } 387b4dfdbb3SAlan Stern srcu_notifier_call_chain(&cpufreq_transition_notifier_list, 388e4472cb3SDave Jones CPUFREQ_PRECHANGE, freqs); 3891da177e4SLinus Torvalds adjust_jiffies(CPUFREQ_PRECHANGE, freqs); 3901da177e4SLinus Torvalds break; 391e4472cb3SDave Jones 3921da177e4SLinus Torvalds case CPUFREQ_POSTCHANGE: 3931da177e4SLinus Torvalds adjust_jiffies(CPUFREQ_POSTCHANGE, freqs); 394e837f9b5SJoe Perches pr_debug("FREQ: %lu - CPU: %lu\n", 395e837f9b5SJoe Perches (unsigned long)freqs->new, (unsigned long)freqs->cpu); 39625e41933SThomas Renninger trace_cpu_frequency(freqs->new, freqs->cpu); 397b4dfdbb3SAlan Stern srcu_notifier_call_chain(&cpufreq_transition_notifier_list, 398e4472cb3SDave Jones CPUFREQ_POSTCHANGE, freqs); 399e4472cb3SDave Jones if (likely(policy) && likely(policy->cpu == freqs->cpu)) 400e4472cb3SDave Jones policy->cur = freqs->new; 4011da177e4SLinus Torvalds break; 4021da177e4SLinus Torvalds } 4031da177e4SLinus Torvalds } 404bb176f7dSViresh Kumar 405b43a7ffbSViresh Kumar /** 406b43a7ffbSViresh Kumar * cpufreq_notify_transition - call notifier chain and adjust_jiffies 407b43a7ffbSViresh Kumar * on frequency transition. 408b43a7ffbSViresh Kumar * 409b43a7ffbSViresh Kumar * This function calls the transition notifiers and the "adjust_jiffies" 410b43a7ffbSViresh Kumar * function. It is called twice on all CPU frequency changes that have 411b43a7ffbSViresh Kumar * external effects. 412b43a7ffbSViresh Kumar */ 413236a9800SViresh Kumar static void cpufreq_notify_transition(struct cpufreq_policy *policy, 414b43a7ffbSViresh Kumar struct cpufreq_freqs *freqs, unsigned int state) 415b43a7ffbSViresh Kumar { 416b43a7ffbSViresh Kumar for_each_cpu(freqs->cpu, policy->cpus) 417b43a7ffbSViresh Kumar __cpufreq_notify_transition(policy, freqs, state); 418b43a7ffbSViresh Kumar } 4191da177e4SLinus Torvalds 420f7ba3b41SViresh Kumar /* Do post notifications when there are chances that transition has failed */ 421236a9800SViresh Kumar static void cpufreq_notify_post_transition(struct cpufreq_policy *policy, 422f7ba3b41SViresh Kumar struct cpufreq_freqs *freqs, int transition_failed) 423f7ba3b41SViresh Kumar { 424f7ba3b41SViresh Kumar cpufreq_notify_transition(policy, freqs, CPUFREQ_POSTCHANGE); 425f7ba3b41SViresh Kumar if (!transition_failed) 426f7ba3b41SViresh Kumar return; 427f7ba3b41SViresh Kumar 428f7ba3b41SViresh Kumar swap(freqs->old, freqs->new); 429f7ba3b41SViresh Kumar cpufreq_notify_transition(policy, freqs, CPUFREQ_PRECHANGE); 430f7ba3b41SViresh Kumar cpufreq_notify_transition(policy, freqs, CPUFREQ_POSTCHANGE); 431f7ba3b41SViresh Kumar } 432f7ba3b41SViresh Kumar 43312478cf0SSrivatsa S. Bhat void cpufreq_freq_transition_begin(struct cpufreq_policy *policy, 43412478cf0SSrivatsa S. Bhat struct cpufreq_freqs *freqs) 43512478cf0SSrivatsa S. Bhat { 436ca654dc3SSrivatsa S. Bhat 437ca654dc3SSrivatsa S. Bhat /* 438ca654dc3SSrivatsa S. Bhat * Catch double invocations of _begin() which lead to self-deadlock. 439ca654dc3SSrivatsa S. Bhat * ASYNC_NOTIFICATION drivers are left out because the cpufreq core 440ca654dc3SSrivatsa S. Bhat * doesn't invoke _begin() on their behalf, and hence the chances of 441ca654dc3SSrivatsa S. Bhat * double invocations are very low. Moreover, there are scenarios 442ca654dc3SSrivatsa S. Bhat * where these checks can emit false-positive warnings in these 443ca654dc3SSrivatsa S. Bhat * drivers; so we avoid that by skipping them altogether. 444ca654dc3SSrivatsa S. Bhat */ 445ca654dc3SSrivatsa S. Bhat WARN_ON(!(cpufreq_driver->flags & CPUFREQ_ASYNC_NOTIFICATION) 446ca654dc3SSrivatsa S. Bhat && current == policy->transition_task); 447ca654dc3SSrivatsa S. Bhat 44812478cf0SSrivatsa S. Bhat wait: 44912478cf0SSrivatsa S. Bhat wait_event(policy->transition_wait, !policy->transition_ongoing); 45012478cf0SSrivatsa S. Bhat 45112478cf0SSrivatsa S. Bhat spin_lock(&policy->transition_lock); 45212478cf0SSrivatsa S. Bhat 45312478cf0SSrivatsa S. Bhat if (unlikely(policy->transition_ongoing)) { 45412478cf0SSrivatsa S. Bhat spin_unlock(&policy->transition_lock); 45512478cf0SSrivatsa S. Bhat goto wait; 45612478cf0SSrivatsa S. Bhat } 45712478cf0SSrivatsa S. Bhat 45812478cf0SSrivatsa S. Bhat policy->transition_ongoing = true; 459ca654dc3SSrivatsa S. Bhat policy->transition_task = current; 46012478cf0SSrivatsa S. Bhat 46112478cf0SSrivatsa S. Bhat spin_unlock(&policy->transition_lock); 46212478cf0SSrivatsa S. Bhat 46312478cf0SSrivatsa S. Bhat cpufreq_notify_transition(policy, freqs, CPUFREQ_PRECHANGE); 46412478cf0SSrivatsa S. Bhat } 46512478cf0SSrivatsa S. Bhat EXPORT_SYMBOL_GPL(cpufreq_freq_transition_begin); 46612478cf0SSrivatsa S. Bhat 46712478cf0SSrivatsa S. Bhat void cpufreq_freq_transition_end(struct cpufreq_policy *policy, 46812478cf0SSrivatsa S. Bhat struct cpufreq_freqs *freqs, int transition_failed) 46912478cf0SSrivatsa S. Bhat { 47012478cf0SSrivatsa S. Bhat if (unlikely(WARN_ON(!policy->transition_ongoing))) 47112478cf0SSrivatsa S. Bhat return; 47212478cf0SSrivatsa S. Bhat 47312478cf0SSrivatsa S. Bhat cpufreq_notify_post_transition(policy, freqs, transition_failed); 47412478cf0SSrivatsa S. Bhat 47512478cf0SSrivatsa S. Bhat policy->transition_ongoing = false; 476ca654dc3SSrivatsa S. Bhat policy->transition_task = NULL; 47712478cf0SSrivatsa S. Bhat 47812478cf0SSrivatsa S. Bhat wake_up(&policy->transition_wait); 47912478cf0SSrivatsa S. Bhat } 48012478cf0SSrivatsa S. Bhat EXPORT_SYMBOL_GPL(cpufreq_freq_transition_end); 48112478cf0SSrivatsa S. Bhat 4821da177e4SLinus Torvalds 4831da177e4SLinus Torvalds /********************************************************************* 4841da177e4SLinus Torvalds * SYSFS INTERFACE * 4851da177e4SLinus Torvalds *********************************************************************/ 4868a5c74a1SRashika Kheria static ssize_t show_boost(struct kobject *kobj, 4876f19efc0SLukasz Majewski struct attribute *attr, char *buf) 4886f19efc0SLukasz Majewski { 4896f19efc0SLukasz Majewski return sprintf(buf, "%d\n", cpufreq_driver->boost_enabled); 4906f19efc0SLukasz Majewski } 4916f19efc0SLukasz Majewski 4926f19efc0SLukasz Majewski static ssize_t store_boost(struct kobject *kobj, struct attribute *attr, 4936f19efc0SLukasz Majewski const char *buf, size_t count) 4946f19efc0SLukasz Majewski { 4956f19efc0SLukasz Majewski int ret, enable; 4966f19efc0SLukasz Majewski 4976f19efc0SLukasz Majewski ret = sscanf(buf, "%d", &enable); 4986f19efc0SLukasz Majewski if (ret != 1 || enable < 0 || enable > 1) 4996f19efc0SLukasz Majewski return -EINVAL; 5006f19efc0SLukasz Majewski 5016f19efc0SLukasz Majewski if (cpufreq_boost_trigger_state(enable)) { 502e837f9b5SJoe Perches pr_err("%s: Cannot %s BOOST!\n", 503e837f9b5SJoe Perches __func__, enable ? "enable" : "disable"); 5046f19efc0SLukasz Majewski return -EINVAL; 5056f19efc0SLukasz Majewski } 5066f19efc0SLukasz Majewski 507e837f9b5SJoe Perches pr_debug("%s: cpufreq BOOST %s\n", 508e837f9b5SJoe Perches __func__, enable ? "enabled" : "disabled"); 5096f19efc0SLukasz Majewski 5106f19efc0SLukasz Majewski return count; 5116f19efc0SLukasz Majewski } 5126f19efc0SLukasz Majewski define_one_global_rw(boost); 5131da177e4SLinus Torvalds 51442f91fa1SViresh Kumar static struct cpufreq_governor *find_governor(const char *str_governor) 5153bcb09a3SJeremy Fitzhardinge { 5163bcb09a3SJeremy Fitzhardinge struct cpufreq_governor *t; 5173bcb09a3SJeremy Fitzhardinge 518f7b27061SViresh Kumar for_each_governor(t) 5197c4f4539SRasmus Villemoes if (!strncasecmp(str_governor, t->name, CPUFREQ_NAME_LEN)) 5203bcb09a3SJeremy Fitzhardinge return t; 5213bcb09a3SJeremy Fitzhardinge 5223bcb09a3SJeremy Fitzhardinge return NULL; 5233bcb09a3SJeremy Fitzhardinge } 5243bcb09a3SJeremy Fitzhardinge 5251da177e4SLinus Torvalds /** 5261da177e4SLinus Torvalds * cpufreq_parse_governor - parse a governor string 5271da177e4SLinus Torvalds */ 5281da177e4SLinus Torvalds static int cpufreq_parse_governor(char *str_governor, unsigned int *policy, 5291da177e4SLinus Torvalds struct cpufreq_governor **governor) 5301da177e4SLinus Torvalds { 5313bcb09a3SJeremy Fitzhardinge int err = -EINVAL; 5323bcb09a3SJeremy Fitzhardinge 5331c3d85ddSRafael J. Wysocki if (!cpufreq_driver) 5343bcb09a3SJeremy Fitzhardinge goto out; 5353bcb09a3SJeremy Fitzhardinge 5361c3d85ddSRafael J. Wysocki if (cpufreq_driver->setpolicy) { 5377c4f4539SRasmus Villemoes if (!strncasecmp(str_governor, "performance", CPUFREQ_NAME_LEN)) { 5381da177e4SLinus Torvalds *policy = CPUFREQ_POLICY_PERFORMANCE; 5393bcb09a3SJeremy Fitzhardinge err = 0; 5407c4f4539SRasmus Villemoes } else if (!strncasecmp(str_governor, "powersave", 541e08f5f5bSGautham R Shenoy CPUFREQ_NAME_LEN)) { 5421da177e4SLinus Torvalds *policy = CPUFREQ_POLICY_POWERSAVE; 5433bcb09a3SJeremy Fitzhardinge err = 0; 5441da177e4SLinus Torvalds } 5452e1cc3a5SViresh Kumar } else { 5461da177e4SLinus Torvalds struct cpufreq_governor *t; 5473bcb09a3SJeremy Fitzhardinge 5483fc54d37Sakpm@osdl.org mutex_lock(&cpufreq_governor_mutex); 5493bcb09a3SJeremy Fitzhardinge 55042f91fa1SViresh Kumar t = find_governor(str_governor); 5513bcb09a3SJeremy Fitzhardinge 552ea714970SJeremy Fitzhardinge if (t == NULL) { 553ea714970SJeremy Fitzhardinge int ret; 554ea714970SJeremy Fitzhardinge 555ea714970SJeremy Fitzhardinge mutex_unlock(&cpufreq_governor_mutex); 5561a8e1463SKees Cook ret = request_module("cpufreq_%s", str_governor); 557ea714970SJeremy Fitzhardinge mutex_lock(&cpufreq_governor_mutex); 558ea714970SJeremy Fitzhardinge 559ea714970SJeremy Fitzhardinge if (ret == 0) 56042f91fa1SViresh Kumar t = find_governor(str_governor); 561ea714970SJeremy Fitzhardinge } 562ea714970SJeremy Fitzhardinge 5633bcb09a3SJeremy Fitzhardinge if (t != NULL) { 5641da177e4SLinus Torvalds *governor = t; 5653bcb09a3SJeremy Fitzhardinge err = 0; 5661da177e4SLinus Torvalds } 5673bcb09a3SJeremy Fitzhardinge 5683bcb09a3SJeremy Fitzhardinge mutex_unlock(&cpufreq_governor_mutex); 5691da177e4SLinus Torvalds } 5701da177e4SLinus Torvalds out: 5713bcb09a3SJeremy Fitzhardinge return err; 5721da177e4SLinus Torvalds } 5731da177e4SLinus Torvalds 5741da177e4SLinus Torvalds /** 575e08f5f5bSGautham R Shenoy * cpufreq_per_cpu_attr_read() / show_##file_name() - 576e08f5f5bSGautham R Shenoy * print out cpufreq information 5771da177e4SLinus Torvalds * 5781da177e4SLinus Torvalds * Write out information from cpufreq_driver->policy[cpu]; object must be 5791da177e4SLinus Torvalds * "unsigned int". 5801da177e4SLinus Torvalds */ 5811da177e4SLinus Torvalds 5821da177e4SLinus Torvalds #define show_one(file_name, object) \ 5831da177e4SLinus Torvalds static ssize_t show_##file_name \ 5841da177e4SLinus Torvalds (struct cpufreq_policy *policy, char *buf) \ 5851da177e4SLinus Torvalds { \ 5861da177e4SLinus Torvalds return sprintf(buf, "%u\n", policy->object); \ 5871da177e4SLinus Torvalds } 5881da177e4SLinus Torvalds 5891da177e4SLinus Torvalds show_one(cpuinfo_min_freq, cpuinfo.min_freq); 5901da177e4SLinus Torvalds show_one(cpuinfo_max_freq, cpuinfo.max_freq); 591ed129784SThomas Renninger show_one(cpuinfo_transition_latency, cpuinfo.transition_latency); 5921da177e4SLinus Torvalds show_one(scaling_min_freq, min); 5931da177e4SLinus Torvalds show_one(scaling_max_freq, max); 594c034b02eSDirk Brandewie 59509347b29SViresh Kumar static ssize_t show_scaling_cur_freq(struct cpufreq_policy *policy, char *buf) 596c034b02eSDirk Brandewie { 597c034b02eSDirk Brandewie ssize_t ret; 598c034b02eSDirk Brandewie 599c034b02eSDirk Brandewie if (cpufreq_driver && cpufreq_driver->setpolicy && cpufreq_driver->get) 600c034b02eSDirk Brandewie ret = sprintf(buf, "%u\n", cpufreq_driver->get(policy->cpu)); 601c034b02eSDirk Brandewie else 602c034b02eSDirk Brandewie ret = sprintf(buf, "%u\n", policy->cur); 603c034b02eSDirk Brandewie return ret; 604c034b02eSDirk Brandewie } 6051da177e4SLinus Torvalds 606037ce839SViresh Kumar static int cpufreq_set_policy(struct cpufreq_policy *policy, 6073a3e9e06SViresh Kumar struct cpufreq_policy *new_policy); 6087970e08bSThomas Renninger 6091da177e4SLinus Torvalds /** 6101da177e4SLinus Torvalds * cpufreq_per_cpu_attr_write() / store_##file_name() - sysfs write access 6111da177e4SLinus Torvalds */ 6121da177e4SLinus Torvalds #define store_one(file_name, object) \ 6131da177e4SLinus Torvalds static ssize_t store_##file_name \ 6141da177e4SLinus Torvalds (struct cpufreq_policy *policy, const char *buf, size_t count) \ 6151da177e4SLinus Torvalds { \ 616619c144cSVince Hsu int ret, temp; \ 6171da177e4SLinus Torvalds struct cpufreq_policy new_policy; \ 6181da177e4SLinus Torvalds \ 6191da177e4SLinus Torvalds ret = cpufreq_get_policy(&new_policy, policy->cpu); \ 6201da177e4SLinus Torvalds if (ret) \ 6211da177e4SLinus Torvalds return -EINVAL; \ 6221da177e4SLinus Torvalds \ 6231da177e4SLinus Torvalds ret = sscanf(buf, "%u", &new_policy.object); \ 6241da177e4SLinus Torvalds if (ret != 1) \ 6251da177e4SLinus Torvalds return -EINVAL; \ 6261da177e4SLinus Torvalds \ 627619c144cSVince Hsu temp = new_policy.object; \ 628037ce839SViresh Kumar ret = cpufreq_set_policy(policy, &new_policy); \ 629619c144cSVince Hsu if (!ret) \ 630619c144cSVince Hsu policy->user_policy.object = temp; \ 6311da177e4SLinus Torvalds \ 6321da177e4SLinus Torvalds return ret ? ret : count; \ 6331da177e4SLinus Torvalds } 6341da177e4SLinus Torvalds 6351da177e4SLinus Torvalds store_one(scaling_min_freq, min); 6361da177e4SLinus Torvalds store_one(scaling_max_freq, max); 6371da177e4SLinus Torvalds 6381da177e4SLinus Torvalds /** 6391da177e4SLinus Torvalds * show_cpuinfo_cur_freq - current CPU frequency as detected by hardware 6401da177e4SLinus Torvalds */ 641e08f5f5bSGautham R Shenoy static ssize_t show_cpuinfo_cur_freq(struct cpufreq_policy *policy, 642e08f5f5bSGautham R Shenoy char *buf) 6431da177e4SLinus Torvalds { 644d92d50a4SViresh Kumar unsigned int cur_freq = __cpufreq_get(policy); 6451da177e4SLinus Torvalds if (!cur_freq) 6461da177e4SLinus Torvalds return sprintf(buf, "<unknown>"); 6471da177e4SLinus Torvalds return sprintf(buf, "%u\n", cur_freq); 6481da177e4SLinus Torvalds } 6491da177e4SLinus Torvalds 6501da177e4SLinus Torvalds /** 6511da177e4SLinus Torvalds * show_scaling_governor - show the current policy for the specified CPU 6521da177e4SLinus Torvalds */ 653905d77cdSDave Jones static ssize_t show_scaling_governor(struct cpufreq_policy *policy, char *buf) 6541da177e4SLinus Torvalds { 6551da177e4SLinus Torvalds if (policy->policy == CPUFREQ_POLICY_POWERSAVE) 6561da177e4SLinus Torvalds return sprintf(buf, "powersave\n"); 6571da177e4SLinus Torvalds else if (policy->policy == CPUFREQ_POLICY_PERFORMANCE) 6581da177e4SLinus Torvalds return sprintf(buf, "performance\n"); 6591da177e4SLinus Torvalds else if (policy->governor) 6604b972f0bSviresh kumar return scnprintf(buf, CPUFREQ_NAME_PLEN, "%s\n", 66129464f28SDave Jones policy->governor->name); 6621da177e4SLinus Torvalds return -EINVAL; 6631da177e4SLinus Torvalds } 6641da177e4SLinus Torvalds 6651da177e4SLinus Torvalds /** 6661da177e4SLinus Torvalds * store_scaling_governor - store policy for the specified CPU 6671da177e4SLinus Torvalds */ 6681da177e4SLinus Torvalds static ssize_t store_scaling_governor(struct cpufreq_policy *policy, 6691da177e4SLinus Torvalds const char *buf, size_t count) 6701da177e4SLinus Torvalds { 6715136fa56SSrivatsa S. Bhat int ret; 6721da177e4SLinus Torvalds char str_governor[16]; 6731da177e4SLinus Torvalds struct cpufreq_policy new_policy; 6741da177e4SLinus Torvalds 6751da177e4SLinus Torvalds ret = cpufreq_get_policy(&new_policy, policy->cpu); 6761da177e4SLinus Torvalds if (ret) 6771da177e4SLinus Torvalds return ret; 6781da177e4SLinus Torvalds 6791da177e4SLinus Torvalds ret = sscanf(buf, "%15s", str_governor); 6801da177e4SLinus Torvalds if (ret != 1) 6811da177e4SLinus Torvalds return -EINVAL; 6821da177e4SLinus Torvalds 683e08f5f5bSGautham R Shenoy if (cpufreq_parse_governor(str_governor, &new_policy.policy, 684e08f5f5bSGautham R Shenoy &new_policy.governor)) 6851da177e4SLinus Torvalds return -EINVAL; 6861da177e4SLinus Torvalds 687037ce839SViresh Kumar ret = cpufreq_set_policy(policy, &new_policy); 6887970e08bSThomas Renninger 6897970e08bSThomas Renninger policy->user_policy.policy = policy->policy; 6907970e08bSThomas Renninger policy->user_policy.governor = policy->governor; 6917970e08bSThomas Renninger 692e08f5f5bSGautham R Shenoy if (ret) 693e08f5f5bSGautham R Shenoy return ret; 694e08f5f5bSGautham R Shenoy else 695e08f5f5bSGautham R Shenoy return count; 6961da177e4SLinus Torvalds } 6971da177e4SLinus Torvalds 6981da177e4SLinus Torvalds /** 6991da177e4SLinus Torvalds * show_scaling_driver - show the cpufreq driver currently loaded 7001da177e4SLinus Torvalds */ 7011da177e4SLinus Torvalds static ssize_t show_scaling_driver(struct cpufreq_policy *policy, char *buf) 7021da177e4SLinus Torvalds { 7031c3d85ddSRafael J. Wysocki return scnprintf(buf, CPUFREQ_NAME_PLEN, "%s\n", cpufreq_driver->name); 7041da177e4SLinus Torvalds } 7051da177e4SLinus Torvalds 7061da177e4SLinus Torvalds /** 7071da177e4SLinus Torvalds * show_scaling_available_governors - show the available CPUfreq governors 7081da177e4SLinus Torvalds */ 7091da177e4SLinus Torvalds static ssize_t show_scaling_available_governors(struct cpufreq_policy *policy, 7101da177e4SLinus Torvalds char *buf) 7111da177e4SLinus Torvalds { 7121da177e4SLinus Torvalds ssize_t i = 0; 7131da177e4SLinus Torvalds struct cpufreq_governor *t; 7141da177e4SLinus Torvalds 7159c0ebcf7SViresh Kumar if (!has_target()) { 7161da177e4SLinus Torvalds i += sprintf(buf, "performance powersave"); 7171da177e4SLinus Torvalds goto out; 7181da177e4SLinus Torvalds } 7191da177e4SLinus Torvalds 720f7b27061SViresh Kumar for_each_governor(t) { 72129464f28SDave Jones if (i >= (ssize_t) ((PAGE_SIZE / sizeof(char)) 72229464f28SDave Jones - (CPUFREQ_NAME_LEN + 2))) 7231da177e4SLinus Torvalds goto out; 7244b972f0bSviresh kumar i += scnprintf(&buf[i], CPUFREQ_NAME_PLEN, "%s ", t->name); 7251da177e4SLinus Torvalds } 7261da177e4SLinus Torvalds out: 7271da177e4SLinus Torvalds i += sprintf(&buf[i], "\n"); 7281da177e4SLinus Torvalds return i; 7291da177e4SLinus Torvalds } 730e8628dd0SDarrick J. Wong 731f4fd3797SLan Tianyu ssize_t cpufreq_show_cpus(const struct cpumask *mask, char *buf) 7321da177e4SLinus Torvalds { 7331da177e4SLinus Torvalds ssize_t i = 0; 7341da177e4SLinus Torvalds unsigned int cpu; 7351da177e4SLinus Torvalds 736835481d9SRusty Russell for_each_cpu(cpu, mask) { 7371da177e4SLinus Torvalds if (i) 7381da177e4SLinus Torvalds i += scnprintf(&buf[i], (PAGE_SIZE - i - 2), " "); 7391da177e4SLinus Torvalds i += scnprintf(&buf[i], (PAGE_SIZE - i - 2), "%u", cpu); 7401da177e4SLinus Torvalds if (i >= (PAGE_SIZE - 5)) 7411da177e4SLinus Torvalds break; 7421da177e4SLinus Torvalds } 7431da177e4SLinus Torvalds i += sprintf(&buf[i], "\n"); 7441da177e4SLinus Torvalds return i; 7451da177e4SLinus Torvalds } 746f4fd3797SLan Tianyu EXPORT_SYMBOL_GPL(cpufreq_show_cpus); 7471da177e4SLinus Torvalds 748e8628dd0SDarrick J. Wong /** 749e8628dd0SDarrick J. Wong * show_related_cpus - show the CPUs affected by each transition even if 750e8628dd0SDarrick J. Wong * hw coordination is in use 751e8628dd0SDarrick J. Wong */ 752e8628dd0SDarrick J. Wong static ssize_t show_related_cpus(struct cpufreq_policy *policy, char *buf) 753e8628dd0SDarrick J. Wong { 754f4fd3797SLan Tianyu return cpufreq_show_cpus(policy->related_cpus, buf); 755e8628dd0SDarrick J. Wong } 756e8628dd0SDarrick J. Wong 757e8628dd0SDarrick J. Wong /** 758e8628dd0SDarrick J. Wong * show_affected_cpus - show the CPUs affected by each transition 759e8628dd0SDarrick J. Wong */ 760e8628dd0SDarrick J. Wong static ssize_t show_affected_cpus(struct cpufreq_policy *policy, char *buf) 761e8628dd0SDarrick J. Wong { 762f4fd3797SLan Tianyu return cpufreq_show_cpus(policy->cpus, buf); 763e8628dd0SDarrick J. Wong } 764e8628dd0SDarrick J. Wong 7659e76988eSVenki Pallipadi static ssize_t store_scaling_setspeed(struct cpufreq_policy *policy, 7669e76988eSVenki Pallipadi const char *buf, size_t count) 7679e76988eSVenki Pallipadi { 7689e76988eSVenki Pallipadi unsigned int freq = 0; 7699e76988eSVenki Pallipadi unsigned int ret; 7709e76988eSVenki Pallipadi 771879000f9SCHIKAMA masaki if (!policy->governor || !policy->governor->store_setspeed) 7729e76988eSVenki Pallipadi return -EINVAL; 7739e76988eSVenki Pallipadi 7749e76988eSVenki Pallipadi ret = sscanf(buf, "%u", &freq); 7759e76988eSVenki Pallipadi if (ret != 1) 7769e76988eSVenki Pallipadi return -EINVAL; 7779e76988eSVenki Pallipadi 7789e76988eSVenki Pallipadi policy->governor->store_setspeed(policy, freq); 7799e76988eSVenki Pallipadi 7809e76988eSVenki Pallipadi return count; 7819e76988eSVenki Pallipadi } 7829e76988eSVenki Pallipadi 7839e76988eSVenki Pallipadi static ssize_t show_scaling_setspeed(struct cpufreq_policy *policy, char *buf) 7849e76988eSVenki Pallipadi { 785879000f9SCHIKAMA masaki if (!policy->governor || !policy->governor->show_setspeed) 7869e76988eSVenki Pallipadi return sprintf(buf, "<unsupported>\n"); 7879e76988eSVenki Pallipadi 7889e76988eSVenki Pallipadi return policy->governor->show_setspeed(policy, buf); 7899e76988eSVenki Pallipadi } 7901da177e4SLinus Torvalds 791e2f74f35SThomas Renninger /** 7928bf1ac72Sviresh kumar * show_bios_limit - show the current cpufreq HW/BIOS limitation 793e2f74f35SThomas Renninger */ 794e2f74f35SThomas Renninger static ssize_t show_bios_limit(struct cpufreq_policy *policy, char *buf) 795e2f74f35SThomas Renninger { 796e2f74f35SThomas Renninger unsigned int limit; 797e2f74f35SThomas Renninger int ret; 7981c3d85ddSRafael J. Wysocki if (cpufreq_driver->bios_limit) { 7991c3d85ddSRafael J. Wysocki ret = cpufreq_driver->bios_limit(policy->cpu, &limit); 800e2f74f35SThomas Renninger if (!ret) 801e2f74f35SThomas Renninger return sprintf(buf, "%u\n", limit); 802e2f74f35SThomas Renninger } 803e2f74f35SThomas Renninger return sprintf(buf, "%u\n", policy->cpuinfo.max_freq); 804e2f74f35SThomas Renninger } 805e2f74f35SThomas Renninger 8066dad2a29SBorislav Petkov cpufreq_freq_attr_ro_perm(cpuinfo_cur_freq, 0400); 8076dad2a29SBorislav Petkov cpufreq_freq_attr_ro(cpuinfo_min_freq); 8086dad2a29SBorislav Petkov cpufreq_freq_attr_ro(cpuinfo_max_freq); 8096dad2a29SBorislav Petkov cpufreq_freq_attr_ro(cpuinfo_transition_latency); 8106dad2a29SBorislav Petkov cpufreq_freq_attr_ro(scaling_available_governors); 8116dad2a29SBorislav Petkov cpufreq_freq_attr_ro(scaling_driver); 8126dad2a29SBorislav Petkov cpufreq_freq_attr_ro(scaling_cur_freq); 8136dad2a29SBorislav Petkov cpufreq_freq_attr_ro(bios_limit); 8146dad2a29SBorislav Petkov cpufreq_freq_attr_ro(related_cpus); 8156dad2a29SBorislav Petkov cpufreq_freq_attr_ro(affected_cpus); 8166dad2a29SBorislav Petkov cpufreq_freq_attr_rw(scaling_min_freq); 8176dad2a29SBorislav Petkov cpufreq_freq_attr_rw(scaling_max_freq); 8186dad2a29SBorislav Petkov cpufreq_freq_attr_rw(scaling_governor); 8196dad2a29SBorislav Petkov cpufreq_freq_attr_rw(scaling_setspeed); 8201da177e4SLinus Torvalds 8211da177e4SLinus Torvalds static struct attribute *default_attrs[] = { 8221da177e4SLinus Torvalds &cpuinfo_min_freq.attr, 8231da177e4SLinus Torvalds &cpuinfo_max_freq.attr, 824ed129784SThomas Renninger &cpuinfo_transition_latency.attr, 8251da177e4SLinus Torvalds &scaling_min_freq.attr, 8261da177e4SLinus Torvalds &scaling_max_freq.attr, 8271da177e4SLinus Torvalds &affected_cpus.attr, 828e8628dd0SDarrick J. Wong &related_cpus.attr, 8291da177e4SLinus Torvalds &scaling_governor.attr, 8301da177e4SLinus Torvalds &scaling_driver.attr, 8311da177e4SLinus Torvalds &scaling_available_governors.attr, 8329e76988eSVenki Pallipadi &scaling_setspeed.attr, 8331da177e4SLinus Torvalds NULL 8341da177e4SLinus Torvalds }; 8351da177e4SLinus Torvalds 8361da177e4SLinus Torvalds #define to_policy(k) container_of(k, struct cpufreq_policy, kobj) 8371da177e4SLinus Torvalds #define to_attr(a) container_of(a, struct freq_attr, attr) 8381da177e4SLinus Torvalds 8391da177e4SLinus Torvalds static ssize_t show(struct kobject *kobj, struct attribute *attr, char *buf) 8401da177e4SLinus Torvalds { 8411da177e4SLinus Torvalds struct cpufreq_policy *policy = to_policy(kobj); 8421da177e4SLinus Torvalds struct freq_attr *fattr = to_attr(attr); 8431b750e3bSViresh Kumar ssize_t ret; 8446eed9404SViresh Kumar 8456eed9404SViresh Kumar if (!down_read_trylock(&cpufreq_rwsem)) 8461b750e3bSViresh Kumar return -EINVAL; 8475a01f2e8SVenkatesh Pallipadi 848ad7722daSviresh kumar down_read(&policy->rwsem); 8495a01f2e8SVenkatesh Pallipadi 850e08f5f5bSGautham R Shenoy if (fattr->show) 851e08f5f5bSGautham R Shenoy ret = fattr->show(policy, buf); 852e08f5f5bSGautham R Shenoy else 853e08f5f5bSGautham R Shenoy ret = -EIO; 854e08f5f5bSGautham R Shenoy 855ad7722daSviresh kumar up_read(&policy->rwsem); 8566eed9404SViresh Kumar up_read(&cpufreq_rwsem); 8571b750e3bSViresh Kumar 8581da177e4SLinus Torvalds return ret; 8591da177e4SLinus Torvalds } 8601da177e4SLinus Torvalds 8611da177e4SLinus Torvalds static ssize_t store(struct kobject *kobj, struct attribute *attr, 8621da177e4SLinus Torvalds const char *buf, size_t count) 8631da177e4SLinus Torvalds { 8641da177e4SLinus Torvalds struct cpufreq_policy *policy = to_policy(kobj); 8651da177e4SLinus Torvalds struct freq_attr *fattr = to_attr(attr); 866a07530b4SDave Jones ssize_t ret = -EINVAL; 8676eed9404SViresh Kumar 8684f750c93SSrivatsa S. Bhat get_online_cpus(); 8694f750c93SSrivatsa S. Bhat 8704f750c93SSrivatsa S. Bhat if (!cpu_online(policy->cpu)) 8714f750c93SSrivatsa S. Bhat goto unlock; 8724f750c93SSrivatsa S. Bhat 8736eed9404SViresh Kumar if (!down_read_trylock(&cpufreq_rwsem)) 8744f750c93SSrivatsa S. Bhat goto unlock; 8755a01f2e8SVenkatesh Pallipadi 876ad7722daSviresh kumar down_write(&policy->rwsem); 8775a01f2e8SVenkatesh Pallipadi 87811e584cfSViresh Kumar /* Updating inactive policies is invalid, so avoid doing that. */ 87911e584cfSViresh Kumar if (unlikely(policy_is_inactive(policy))) { 88011e584cfSViresh Kumar ret = -EBUSY; 88111e584cfSViresh Kumar goto unlock_policy_rwsem; 88211e584cfSViresh Kumar } 88311e584cfSViresh Kumar 884e08f5f5bSGautham R Shenoy if (fattr->store) 885e08f5f5bSGautham R Shenoy ret = fattr->store(policy, buf, count); 886e08f5f5bSGautham R Shenoy else 887e08f5f5bSGautham R Shenoy ret = -EIO; 888e08f5f5bSGautham R Shenoy 88911e584cfSViresh Kumar unlock_policy_rwsem: 890ad7722daSviresh kumar up_write(&policy->rwsem); 8916eed9404SViresh Kumar 8926eed9404SViresh Kumar up_read(&cpufreq_rwsem); 8934f750c93SSrivatsa S. Bhat unlock: 8944f750c93SSrivatsa S. Bhat put_online_cpus(); 8954f750c93SSrivatsa S. Bhat 8961da177e4SLinus Torvalds return ret; 8971da177e4SLinus Torvalds } 8981da177e4SLinus Torvalds 8991da177e4SLinus Torvalds static void cpufreq_sysfs_release(struct kobject *kobj) 9001da177e4SLinus Torvalds { 9011da177e4SLinus Torvalds struct cpufreq_policy *policy = to_policy(kobj); 9022d06d8c4SDominik Brodowski pr_debug("last reference is dropped\n"); 9031da177e4SLinus Torvalds complete(&policy->kobj_unregister); 9041da177e4SLinus Torvalds } 9051da177e4SLinus Torvalds 90652cf25d0SEmese Revfy static const struct sysfs_ops sysfs_ops = { 9071da177e4SLinus Torvalds .show = show, 9081da177e4SLinus Torvalds .store = store, 9091da177e4SLinus Torvalds }; 9101da177e4SLinus Torvalds 9111da177e4SLinus Torvalds static struct kobj_type ktype_cpufreq = { 9121da177e4SLinus Torvalds .sysfs_ops = &sysfs_ops, 9131da177e4SLinus Torvalds .default_attrs = default_attrs, 9141da177e4SLinus Torvalds .release = cpufreq_sysfs_release, 9151da177e4SLinus Torvalds }; 9161da177e4SLinus Torvalds 9172361be23SViresh Kumar struct kobject *cpufreq_global_kobject; 9182361be23SViresh Kumar EXPORT_SYMBOL(cpufreq_global_kobject); 9192361be23SViresh Kumar 9202361be23SViresh Kumar static int cpufreq_global_kobject_usage; 9212361be23SViresh Kumar 9222361be23SViresh Kumar int cpufreq_get_global_kobject(void) 9232361be23SViresh Kumar { 9242361be23SViresh Kumar if (!cpufreq_global_kobject_usage++) 9252361be23SViresh Kumar return kobject_add(cpufreq_global_kobject, 9262361be23SViresh Kumar &cpu_subsys.dev_root->kobj, "%s", "cpufreq"); 9272361be23SViresh Kumar 9282361be23SViresh Kumar return 0; 9292361be23SViresh Kumar } 9302361be23SViresh Kumar EXPORT_SYMBOL(cpufreq_get_global_kobject); 9312361be23SViresh Kumar 9322361be23SViresh Kumar void cpufreq_put_global_kobject(void) 9332361be23SViresh Kumar { 9342361be23SViresh Kumar if (!--cpufreq_global_kobject_usage) 9352361be23SViresh Kumar kobject_del(cpufreq_global_kobject); 9362361be23SViresh Kumar } 9372361be23SViresh Kumar EXPORT_SYMBOL(cpufreq_put_global_kobject); 9382361be23SViresh Kumar 9392361be23SViresh Kumar int cpufreq_sysfs_create_file(const struct attribute *attr) 9402361be23SViresh Kumar { 9412361be23SViresh Kumar int ret = cpufreq_get_global_kobject(); 9422361be23SViresh Kumar 9432361be23SViresh Kumar if (!ret) { 9442361be23SViresh Kumar ret = sysfs_create_file(cpufreq_global_kobject, attr); 9452361be23SViresh Kumar if (ret) 9462361be23SViresh Kumar cpufreq_put_global_kobject(); 9472361be23SViresh Kumar } 9482361be23SViresh Kumar 9492361be23SViresh Kumar return ret; 9502361be23SViresh Kumar } 9512361be23SViresh Kumar EXPORT_SYMBOL(cpufreq_sysfs_create_file); 9522361be23SViresh Kumar 9532361be23SViresh Kumar void cpufreq_sysfs_remove_file(const struct attribute *attr) 9542361be23SViresh Kumar { 9552361be23SViresh Kumar sysfs_remove_file(cpufreq_global_kobject, attr); 9562361be23SViresh Kumar cpufreq_put_global_kobject(); 9572361be23SViresh Kumar } 9582361be23SViresh Kumar EXPORT_SYMBOL(cpufreq_sysfs_remove_file); 9592361be23SViresh Kumar 960*87549141SViresh Kumar static int add_cpu_dev_symlink(struct cpufreq_policy *policy, int cpu) 961*87549141SViresh Kumar { 962*87549141SViresh Kumar struct device *cpu_dev; 963*87549141SViresh Kumar 964*87549141SViresh Kumar pr_debug("%s: Adding symlink for CPU: %u\n", __func__, cpu); 965*87549141SViresh Kumar 966*87549141SViresh Kumar if (!policy) 967*87549141SViresh Kumar return 0; 968*87549141SViresh Kumar 969*87549141SViresh Kumar cpu_dev = get_cpu_device(cpu); 970*87549141SViresh Kumar if (WARN_ON(!cpu_dev)) 971*87549141SViresh Kumar return 0; 972*87549141SViresh Kumar 973*87549141SViresh Kumar return sysfs_create_link(&cpu_dev->kobj, &policy->kobj, "cpufreq"); 974*87549141SViresh Kumar } 975*87549141SViresh Kumar 976*87549141SViresh Kumar static void remove_cpu_dev_symlink(struct cpufreq_policy *policy, int cpu) 977*87549141SViresh Kumar { 978*87549141SViresh Kumar struct device *cpu_dev; 979*87549141SViresh Kumar 980*87549141SViresh Kumar pr_debug("%s: Removing symlink for CPU: %u\n", __func__, cpu); 981*87549141SViresh Kumar 982*87549141SViresh Kumar cpu_dev = get_cpu_device(cpu); 983*87549141SViresh Kumar if (WARN_ON(!cpu_dev)) 984*87549141SViresh Kumar return; 985*87549141SViresh Kumar 986*87549141SViresh Kumar sysfs_remove_link(&cpu_dev->kobj, "cpufreq"); 987*87549141SViresh Kumar } 988*87549141SViresh Kumar 989*87549141SViresh Kumar /* Add/remove symlinks for all related CPUs */ 990308b60e7SViresh Kumar static int cpufreq_add_dev_symlink(struct cpufreq_policy *policy) 99119d6f7ecSDave Jones { 99219d6f7ecSDave Jones unsigned int j; 99319d6f7ecSDave Jones int ret = 0; 99419d6f7ecSDave Jones 995*87549141SViresh Kumar /* Some related CPUs might not be present (physically hotplugged) */ 996*87549141SViresh Kumar for_each_cpu_and(j, policy->related_cpus, cpu_present_mask) { 9979d16f207SSaravana Kannan if (j == policy->kobj_cpu) 99819d6f7ecSDave Jones continue; 99919d6f7ecSDave Jones 1000*87549141SViresh Kumar ret = add_cpu_dev_symlink(policy, j); 100171c3461eSRafael J. Wysocki if (ret) 100271c3461eSRafael J. Wysocki break; 100319d6f7ecSDave Jones } 1004*87549141SViresh Kumar 100519d6f7ecSDave Jones return ret; 100619d6f7ecSDave Jones } 100719d6f7ecSDave Jones 1008*87549141SViresh Kumar static void cpufreq_remove_dev_symlink(struct cpufreq_policy *policy) 1009*87549141SViresh Kumar { 1010*87549141SViresh Kumar unsigned int j; 1011*87549141SViresh Kumar 1012*87549141SViresh Kumar /* Some related CPUs might not be present (physically hotplugged) */ 1013*87549141SViresh Kumar for_each_cpu_and(j, policy->related_cpus, cpu_present_mask) { 1014*87549141SViresh Kumar if (j == policy->kobj_cpu) 1015*87549141SViresh Kumar continue; 1016*87549141SViresh Kumar 1017*87549141SViresh Kumar remove_cpu_dev_symlink(policy, j); 1018*87549141SViresh Kumar } 1019*87549141SViresh Kumar } 1020*87549141SViresh Kumar 1021308b60e7SViresh Kumar static int cpufreq_add_dev_interface(struct cpufreq_policy *policy, 10228a25a2fdSKay Sievers struct device *dev) 1023909a694eSDave Jones { 1024909a694eSDave Jones struct freq_attr **drv_attr; 1025909a694eSDave Jones int ret = 0; 1026909a694eSDave Jones 1027909a694eSDave Jones /* set up files for this cpu device */ 10281c3d85ddSRafael J. Wysocki drv_attr = cpufreq_driver->attr; 1029f13f1184SViresh Kumar while (drv_attr && *drv_attr) { 1030909a694eSDave Jones ret = sysfs_create_file(&policy->kobj, &((*drv_attr)->attr)); 1031909a694eSDave Jones if (ret) 10326d4e81edSTomeu Vizoso return ret; 1033909a694eSDave Jones drv_attr++; 1034909a694eSDave Jones } 10351c3d85ddSRafael J. Wysocki if (cpufreq_driver->get) { 1036909a694eSDave Jones ret = sysfs_create_file(&policy->kobj, &cpuinfo_cur_freq.attr); 1037909a694eSDave Jones if (ret) 10386d4e81edSTomeu Vizoso return ret; 1039909a694eSDave Jones } 1040c034b02eSDirk Brandewie 1041909a694eSDave Jones ret = sysfs_create_file(&policy->kobj, &scaling_cur_freq.attr); 1042909a694eSDave Jones if (ret) 10436d4e81edSTomeu Vizoso return ret; 1044c034b02eSDirk Brandewie 10451c3d85ddSRafael J. Wysocki if (cpufreq_driver->bios_limit) { 1046e2f74f35SThomas Renninger ret = sysfs_create_file(&policy->kobj, &bios_limit.attr); 1047e2f74f35SThomas Renninger if (ret) 10486d4e81edSTomeu Vizoso return ret; 1049e2f74f35SThomas Renninger } 1050909a694eSDave Jones 10516d4e81edSTomeu Vizoso return cpufreq_add_dev_symlink(policy); 1052e18f1682SSrivatsa S. Bhat } 1053e18f1682SSrivatsa S. Bhat 1054e18f1682SSrivatsa S. Bhat static void cpufreq_init_policy(struct cpufreq_policy *policy) 1055e18f1682SSrivatsa S. Bhat { 10566e2c89d1Sviresh kumar struct cpufreq_governor *gov = NULL; 1057e18f1682SSrivatsa S. Bhat struct cpufreq_policy new_policy; 1058e18f1682SSrivatsa S. Bhat int ret = 0; 1059e18f1682SSrivatsa S. Bhat 1060d5b73cd8SViresh Kumar memcpy(&new_policy, policy, sizeof(*policy)); 1061a27a9ab7SJason Baron 10626e2c89d1Sviresh kumar /* Update governor of new_policy to the governor used before hotplug */ 10634573237bSViresh Kumar gov = find_governor(policy->last_governor); 10646e2c89d1Sviresh kumar if (gov) 10656e2c89d1Sviresh kumar pr_debug("Restoring governor %s for cpu %d\n", 10666e2c89d1Sviresh kumar policy->governor->name, policy->cpu); 10676e2c89d1Sviresh kumar else 10686e2c89d1Sviresh kumar gov = CPUFREQ_DEFAULT_GOVERNOR; 10696e2c89d1Sviresh kumar 10706e2c89d1Sviresh kumar new_policy.governor = gov; 10716e2c89d1Sviresh kumar 1072a27a9ab7SJason Baron /* Use the default policy if its valid. */ 1073a27a9ab7SJason Baron if (cpufreq_driver->setpolicy) 10746e2c89d1Sviresh kumar cpufreq_parse_governor(gov->name, &new_policy.policy, NULL); 1075ecf7e461SDave Jones 1076ecf7e461SDave Jones /* set default policy */ 1077037ce839SViresh Kumar ret = cpufreq_set_policy(policy, &new_policy); 1078ecf7e461SDave Jones if (ret) { 10792d06d8c4SDominik Brodowski pr_debug("setting policy failed\n"); 10801c3d85ddSRafael J. Wysocki if (cpufreq_driver->exit) 10811c3d85ddSRafael J. Wysocki cpufreq_driver->exit(policy); 1082ecf7e461SDave Jones } 1083909a694eSDave Jones } 1084909a694eSDave Jones 1085d8d3b471SViresh Kumar static int cpufreq_add_policy_cpu(struct cpufreq_policy *policy, 108642f921a6SViresh Kumar unsigned int cpu, struct device *dev) 1087fcf80582SViresh Kumar { 10889c0ebcf7SViresh Kumar int ret = 0; 1089fcf80582SViresh Kumar 1090bb29ae15SViresh Kumar /* Has this CPU been taken care of already? */ 1091bb29ae15SViresh Kumar if (cpumask_test_cpu(cpu, policy->cpus)) 1092bb29ae15SViresh Kumar return 0; 1093bb29ae15SViresh Kumar 10949c0ebcf7SViresh Kumar if (has_target()) { 10953de9bdebSViresh Kumar ret = __cpufreq_governor(policy, CPUFREQ_GOV_STOP); 10963de9bdebSViresh Kumar if (ret) { 10973de9bdebSViresh Kumar pr_err("%s: Failed to stop governor\n", __func__); 10983de9bdebSViresh Kumar return ret; 10993de9bdebSViresh Kumar } 11003de9bdebSViresh Kumar } 1101fcf80582SViresh Kumar 1102ad7722daSviresh kumar down_write(&policy->rwsem); 1103fcf80582SViresh Kumar cpumask_set_cpu(cpu, policy->cpus); 1104ad7722daSviresh kumar up_write(&policy->rwsem); 11052eaa3e2dSViresh Kumar 11069c0ebcf7SViresh Kumar if (has_target()) { 1107e5c87b76SStratos Karafotis ret = __cpufreq_governor(policy, CPUFREQ_GOV_START); 1108e5c87b76SStratos Karafotis if (!ret) 1109e5c87b76SStratos Karafotis ret = __cpufreq_governor(policy, CPUFREQ_GOV_LIMITS); 1110e5c87b76SStratos Karafotis 1111e5c87b76SStratos Karafotis if (ret) { 11123de9bdebSViresh Kumar pr_err("%s: Failed to start governor\n", __func__); 11133de9bdebSViresh Kumar return ret; 11143de9bdebSViresh Kumar } 1115820c6ca2SViresh Kumar } 1116fcf80582SViresh Kumar 1117*87549141SViresh Kumar return 0; 1118fcf80582SViresh Kumar } 11191da177e4SLinus Torvalds 11208414809cSSrivatsa S. Bhat static struct cpufreq_policy *cpufreq_policy_restore(unsigned int cpu) 11218414809cSSrivatsa S. Bhat { 11228414809cSSrivatsa S. Bhat struct cpufreq_policy *policy; 11238414809cSSrivatsa S. Bhat unsigned long flags; 11248414809cSSrivatsa S. Bhat 112544871c9cSLan Tianyu read_lock_irqsave(&cpufreq_driver_lock, flags); 11263914d379SViresh Kumar policy = per_cpu(cpufreq_cpu_data, cpu); 112744871c9cSLan Tianyu read_unlock_irqrestore(&cpufreq_driver_lock, flags); 11288414809cSSrivatsa S. Bhat 11293914d379SViresh Kumar if (likely(policy)) { 11303914d379SViresh Kumar /* Policy should be inactive here */ 11313914d379SViresh Kumar WARN_ON(!policy_is_inactive(policy)); 11323914d379SViresh Kumar } 11336e2c89d1Sviresh kumar 11348414809cSSrivatsa S. Bhat return policy; 11358414809cSSrivatsa S. Bhat } 11368414809cSSrivatsa S. Bhat 1137*87549141SViresh Kumar static struct cpufreq_policy *cpufreq_policy_alloc(int cpu) 1138e9698cc5SSrivatsa S. Bhat { 1139e9698cc5SSrivatsa S. Bhat struct cpufreq_policy *policy; 1140e9698cc5SSrivatsa S. Bhat 1141e9698cc5SSrivatsa S. Bhat policy = kzalloc(sizeof(*policy), GFP_KERNEL); 1142e9698cc5SSrivatsa S. Bhat if (!policy) 1143e9698cc5SSrivatsa S. Bhat return NULL; 1144e9698cc5SSrivatsa S. Bhat 1145e9698cc5SSrivatsa S. Bhat if (!alloc_cpumask_var(&policy->cpus, GFP_KERNEL)) 1146e9698cc5SSrivatsa S. Bhat goto err_free_policy; 1147e9698cc5SSrivatsa S. Bhat 1148e9698cc5SSrivatsa S. Bhat if (!zalloc_cpumask_var(&policy->related_cpus, GFP_KERNEL)) 1149e9698cc5SSrivatsa S. Bhat goto err_free_cpumask; 1150e9698cc5SSrivatsa S. Bhat 1151c88a1f8bSLukasz Majewski INIT_LIST_HEAD(&policy->policy_list); 1152ad7722daSviresh kumar init_rwsem(&policy->rwsem); 115312478cf0SSrivatsa S. Bhat spin_lock_init(&policy->transition_lock); 115412478cf0SSrivatsa S. Bhat init_waitqueue_head(&policy->transition_wait); 1155818c5712SViresh Kumar init_completion(&policy->kobj_unregister); 1156818c5712SViresh Kumar INIT_WORK(&policy->update, handle_update); 1157ad7722daSviresh kumar 1158*87549141SViresh Kumar policy->cpu = cpu; 1159*87549141SViresh Kumar 1160*87549141SViresh Kumar /* Set this once on allocation */ 1161*87549141SViresh Kumar policy->kobj_cpu = cpu; 1162*87549141SViresh Kumar 1163e9698cc5SSrivatsa S. Bhat return policy; 1164e9698cc5SSrivatsa S. Bhat 1165e9698cc5SSrivatsa S. Bhat err_free_cpumask: 1166e9698cc5SSrivatsa S. Bhat free_cpumask_var(policy->cpus); 1167e9698cc5SSrivatsa S. Bhat err_free_policy: 1168e9698cc5SSrivatsa S. Bhat kfree(policy); 1169e9698cc5SSrivatsa S. Bhat 1170e9698cc5SSrivatsa S. Bhat return NULL; 1171e9698cc5SSrivatsa S. Bhat } 1172e9698cc5SSrivatsa S. Bhat 117342f921a6SViresh Kumar static void cpufreq_policy_put_kobj(struct cpufreq_policy *policy) 117442f921a6SViresh Kumar { 117542f921a6SViresh Kumar struct kobject *kobj; 117642f921a6SViresh Kumar struct completion *cmp; 117742f921a6SViresh Kumar 1178fcd7af91SViresh Kumar blocking_notifier_call_chain(&cpufreq_policy_notifier_list, 1179fcd7af91SViresh Kumar CPUFREQ_REMOVE_POLICY, policy); 1180fcd7af91SViresh Kumar 1181*87549141SViresh Kumar down_write(&policy->rwsem); 1182*87549141SViresh Kumar cpufreq_remove_dev_symlink(policy); 118342f921a6SViresh Kumar kobj = &policy->kobj; 118442f921a6SViresh Kumar cmp = &policy->kobj_unregister; 1185*87549141SViresh Kumar up_write(&policy->rwsem); 118642f921a6SViresh Kumar kobject_put(kobj); 118742f921a6SViresh Kumar 118842f921a6SViresh Kumar /* 118942f921a6SViresh Kumar * We need to make sure that the underlying kobj is 119042f921a6SViresh Kumar * actually not referenced anymore by anybody before we 119142f921a6SViresh Kumar * proceed with unloading. 119242f921a6SViresh Kumar */ 119342f921a6SViresh Kumar pr_debug("waiting for dropping of refcount\n"); 119442f921a6SViresh Kumar wait_for_completion(cmp); 119542f921a6SViresh Kumar pr_debug("wait complete\n"); 119642f921a6SViresh Kumar } 119742f921a6SViresh Kumar 1198e9698cc5SSrivatsa S. Bhat static void cpufreq_policy_free(struct cpufreq_policy *policy) 1199e9698cc5SSrivatsa S. Bhat { 1200988bed09SViresh Kumar unsigned long flags; 1201988bed09SViresh Kumar int cpu; 1202988bed09SViresh Kumar 1203988bed09SViresh Kumar /* Remove policy from list */ 1204988bed09SViresh Kumar write_lock_irqsave(&cpufreq_driver_lock, flags); 1205988bed09SViresh Kumar list_del(&policy->policy_list); 1206988bed09SViresh Kumar 1207988bed09SViresh Kumar for_each_cpu(cpu, policy->related_cpus) 1208988bed09SViresh Kumar per_cpu(cpufreq_cpu_data, cpu) = NULL; 1209988bed09SViresh Kumar write_unlock_irqrestore(&cpufreq_driver_lock, flags); 1210988bed09SViresh Kumar 1211e9698cc5SSrivatsa S. Bhat free_cpumask_var(policy->related_cpus); 1212e9698cc5SSrivatsa S. Bhat free_cpumask_var(policy->cpus); 1213e9698cc5SSrivatsa S. Bhat kfree(policy); 1214e9698cc5SSrivatsa S. Bhat } 1215e9698cc5SSrivatsa S. Bhat 1216*87549141SViresh Kumar static void update_policy_cpu(struct cpufreq_policy *policy, unsigned int cpu) 12170d66b91eSSrivatsa S. Bhat { 121899ec899eSSrivatsa S. Bhat if (WARN_ON(cpu == policy->cpu)) 1219*87549141SViresh Kumar return; 1220cb38ed5cSSrivatsa S. Bhat 1221ad7722daSviresh kumar down_write(&policy->rwsem); 12220d66b91eSSrivatsa S. Bhat policy->cpu = cpu; 1223ad7722daSviresh kumar up_write(&policy->rwsem); 12240d66b91eSSrivatsa S. Bhat } 12250d66b91eSSrivatsa S. Bhat 122623faf0b7SViresh Kumar /** 122723faf0b7SViresh Kumar * cpufreq_add_dev - add a CPU device 122823faf0b7SViresh Kumar * 122923faf0b7SViresh Kumar * Adds the cpufreq interface for a CPU device. 123023faf0b7SViresh Kumar * 123123faf0b7SViresh Kumar * The Oracle says: try running cpufreq registration/unregistration concurrently 123223faf0b7SViresh Kumar * with with cpu hotplugging and all hell will break loose. Tried to clean this 123323faf0b7SViresh Kumar * mess up, but more thorough testing is needed. - Mathieu 123423faf0b7SViresh Kumar */ 123523faf0b7SViresh Kumar static int cpufreq_add_dev(struct device *dev, struct subsys_interface *sif) 12361da177e4SLinus Torvalds { 1237fcf80582SViresh Kumar unsigned int j, cpu = dev->id; 123865922465SViresh Kumar int ret = -ENOMEM; 12397f0c020aSViresh Kumar struct cpufreq_policy *policy; 12401da177e4SLinus Torvalds unsigned long flags; 1241*87549141SViresh Kumar bool recover_policy = !sif; 1242c32b6b8eSAshok Raj 12432d06d8c4SDominik Brodowski pr_debug("adding CPU %u\n", cpu); 12441da177e4SLinus Torvalds 1245*87549141SViresh Kumar /* 1246*87549141SViresh Kumar * Only possible if 'cpu' wasn't physically present earlier and we are 1247*87549141SViresh Kumar * here from subsys_interface add callback. A hotplug notifier will 1248*87549141SViresh Kumar * follow and we will handle it like logical CPU hotplug then. For now, 1249*87549141SViresh Kumar * just create the sysfs link. 1250*87549141SViresh Kumar */ 1251*87549141SViresh Kumar if (cpu_is_offline(cpu)) 1252*87549141SViresh Kumar return add_cpu_dev_symlink(per_cpu(cpufreq_cpu_data, cpu), cpu); 1253*87549141SViresh Kumar 12546eed9404SViresh Kumar if (!down_read_trylock(&cpufreq_rwsem)) 12556eed9404SViresh Kumar return 0; 12566eed9404SViresh Kumar 1257bb29ae15SViresh Kumar /* Check if this CPU already has a policy to manage it */ 12589104bb26SViresh Kumar policy = per_cpu(cpufreq_cpu_data, cpu); 12599104bb26SViresh Kumar if (policy && !policy_is_inactive(policy)) { 12609104bb26SViresh Kumar WARN_ON(!cpumask_test_cpu(cpu, policy->related_cpus)); 12617f0c020aSViresh Kumar ret = cpufreq_add_policy_cpu(policy, cpu, dev); 12626eed9404SViresh Kumar up_read(&cpufreq_rwsem); 12636eed9404SViresh Kumar return ret; 1264fcf80582SViresh Kumar } 12651da177e4SLinus Torvalds 126672368d12SRafael J. Wysocki /* 126772368d12SRafael J. Wysocki * Restore the saved policy when doing light-weight init and fall back 126872368d12SRafael J. Wysocki * to the full init if that fails. 126972368d12SRafael J. Wysocki */ 127096bbbe4aSViresh Kumar policy = recover_policy ? cpufreq_policy_restore(cpu) : NULL; 127172368d12SRafael J. Wysocki if (!policy) { 127296bbbe4aSViresh Kumar recover_policy = false; 1273*87549141SViresh Kumar policy = cpufreq_policy_alloc(cpu); 1274059019a3SDave Jones if (!policy) 12751da177e4SLinus Torvalds goto nomem_out; 127672368d12SRafael J. Wysocki } 12770d66b91eSSrivatsa S. Bhat 12780d66b91eSSrivatsa S. Bhat /* 12790d66b91eSSrivatsa S. Bhat * In the resume path, since we restore a saved policy, the assignment 12800d66b91eSSrivatsa S. Bhat * to policy->cpu is like an update of the existing policy, rather than 12810d66b91eSSrivatsa S. Bhat * the creation of a brand new one. So we need to perform this update 12820d66b91eSSrivatsa S. Bhat * by invoking update_policy_cpu(). 12830d66b91eSSrivatsa S. Bhat */ 1284*87549141SViresh Kumar if (recover_policy && cpu != policy->cpu) 1285*87549141SViresh Kumar update_policy_cpu(policy, cpu); 12860d66b91eSSrivatsa S. Bhat 1287835481d9SRusty Russell cpumask_copy(policy->cpus, cpumask_of(cpu)); 12881da177e4SLinus Torvalds 12891da177e4SLinus Torvalds /* call driver. From then on the cpufreq must be able 12901da177e4SLinus Torvalds * to accept all calls to ->verify and ->setpolicy for this CPU 12911da177e4SLinus Torvalds */ 12921c3d85ddSRafael J. Wysocki ret = cpufreq_driver->init(policy); 12931da177e4SLinus Torvalds if (ret) { 12942d06d8c4SDominik Brodowski pr_debug("initialization failed\n"); 12952eaa3e2dSViresh Kumar goto err_set_policy_cpu; 12961da177e4SLinus Torvalds } 1297643ae6e8SViresh Kumar 12986d4e81edSTomeu Vizoso down_write(&policy->rwsem); 12996d4e81edSTomeu Vizoso 13005a7e56a5SViresh Kumar /* related cpus should atleast have policy->cpus */ 13015a7e56a5SViresh Kumar cpumask_or(policy->related_cpus, policy->related_cpus, policy->cpus); 13025a7e56a5SViresh Kumar 13035a7e56a5SViresh Kumar /* 13045a7e56a5SViresh Kumar * affected cpus must always be the one, which are online. We aren't 13055a7e56a5SViresh Kumar * managing offline cpus here. 13065a7e56a5SViresh Kumar */ 13075a7e56a5SViresh Kumar cpumask_and(policy->cpus, policy->cpus, cpu_online_mask); 13085a7e56a5SViresh Kumar 130996bbbe4aSViresh Kumar if (!recover_policy) { 13105a7e56a5SViresh Kumar policy->user_policy.min = policy->min; 13115a7e56a5SViresh Kumar policy->user_policy.max = policy->max; 13126d4e81edSTomeu Vizoso 13136d4e81edSTomeu Vizoso /* prepare interface data */ 13146d4e81edSTomeu Vizoso ret = kobject_init_and_add(&policy->kobj, &ktype_cpufreq, 13156d4e81edSTomeu Vizoso &dev->kobj, "cpufreq"); 13166d4e81edSTomeu Vizoso if (ret) { 13176d4e81edSTomeu Vizoso pr_err("%s: failed to init policy->kobj: %d\n", 13186d4e81edSTomeu Vizoso __func__, ret); 13196d4e81edSTomeu Vizoso goto err_init_policy_kobj; 13206d4e81edSTomeu Vizoso } 13215a7e56a5SViresh Kumar 1322652ed95dSViresh Kumar write_lock_irqsave(&cpufreq_driver_lock, flags); 1323988bed09SViresh Kumar for_each_cpu(j, policy->related_cpus) 1324652ed95dSViresh Kumar per_cpu(cpufreq_cpu_data, j) = policy; 1325652ed95dSViresh Kumar write_unlock_irqrestore(&cpufreq_driver_lock, flags); 1326988bed09SViresh Kumar } 1327652ed95dSViresh Kumar 13282ed99e39SRafael J. Wysocki if (cpufreq_driver->get && !cpufreq_driver->setpolicy) { 1329da60ce9fSViresh Kumar policy->cur = cpufreq_driver->get(policy->cpu); 1330da60ce9fSViresh Kumar if (!policy->cur) { 1331da60ce9fSViresh Kumar pr_err("%s: ->get() failed\n", __func__); 1332da60ce9fSViresh Kumar goto err_get_freq; 1333da60ce9fSViresh Kumar } 1334da60ce9fSViresh Kumar } 1335da60ce9fSViresh Kumar 1336d3916691SViresh Kumar /* 1337d3916691SViresh Kumar * Sometimes boot loaders set CPU frequency to a value outside of 1338d3916691SViresh Kumar * frequency table present with cpufreq core. In such cases CPU might be 1339d3916691SViresh Kumar * unstable if it has to run on that frequency for long duration of time 1340d3916691SViresh Kumar * and so its better to set it to a frequency which is specified in 1341d3916691SViresh Kumar * freq-table. This also makes cpufreq stats inconsistent as 1342d3916691SViresh Kumar * cpufreq-stats would fail to register because current frequency of CPU 1343d3916691SViresh Kumar * isn't found in freq-table. 1344d3916691SViresh Kumar * 1345d3916691SViresh Kumar * Because we don't want this change to effect boot process badly, we go 1346d3916691SViresh Kumar * for the next freq which is >= policy->cur ('cur' must be set by now, 1347d3916691SViresh Kumar * otherwise we will end up setting freq to lowest of the table as 'cur' 1348d3916691SViresh Kumar * is initialized to zero). 1349d3916691SViresh Kumar * 1350d3916691SViresh Kumar * We are passing target-freq as "policy->cur - 1" otherwise 1351d3916691SViresh Kumar * __cpufreq_driver_target() would simply fail, as policy->cur will be 1352d3916691SViresh Kumar * equal to target-freq. 1353d3916691SViresh Kumar */ 1354d3916691SViresh Kumar if ((cpufreq_driver->flags & CPUFREQ_NEED_INITIAL_FREQ_CHECK) 1355d3916691SViresh Kumar && has_target()) { 1356d3916691SViresh Kumar /* Are we running at unknown frequency ? */ 1357d3916691SViresh Kumar ret = cpufreq_frequency_table_get_index(policy, policy->cur); 1358d3916691SViresh Kumar if (ret == -EINVAL) { 1359d3916691SViresh Kumar /* Warn user and fix it */ 1360d3916691SViresh Kumar pr_warn("%s: CPU%d: Running at unlisted freq: %u KHz\n", 1361d3916691SViresh Kumar __func__, policy->cpu, policy->cur); 1362d3916691SViresh Kumar ret = __cpufreq_driver_target(policy, policy->cur - 1, 1363d3916691SViresh Kumar CPUFREQ_RELATION_L); 1364d3916691SViresh Kumar 1365d3916691SViresh Kumar /* 1366d3916691SViresh Kumar * Reaching here after boot in a few seconds may not 1367d3916691SViresh Kumar * mean that system will remain stable at "unknown" 1368d3916691SViresh Kumar * frequency for longer duration. Hence, a BUG_ON(). 1369d3916691SViresh Kumar */ 1370d3916691SViresh Kumar BUG_ON(ret); 1371d3916691SViresh Kumar pr_warn("%s: CPU%d: Unlisted initial frequency changed to: %u KHz\n", 1372d3916691SViresh Kumar __func__, policy->cpu, policy->cur); 1373d3916691SViresh Kumar } 1374d3916691SViresh Kumar } 1375d3916691SViresh Kumar 1376a1531acdSThomas Renninger blocking_notifier_call_chain(&cpufreq_policy_notifier_list, 1377a1531acdSThomas Renninger CPUFREQ_START, policy); 1378a1531acdSThomas Renninger 137996bbbe4aSViresh Kumar if (!recover_policy) { 1380308b60e7SViresh Kumar ret = cpufreq_add_dev_interface(policy, dev); 138119d6f7ecSDave Jones if (ret) 13820142f9dcSAhmed S. Darwish goto err_out_unregister; 1383fcd7af91SViresh Kumar blocking_notifier_call_chain(&cpufreq_policy_notifier_list, 1384fcd7af91SViresh Kumar CPUFREQ_CREATE_POLICY, policy); 1385c88a1f8bSLukasz Majewski 1386c88a1f8bSLukasz Majewski write_lock_irqsave(&cpufreq_driver_lock, flags); 1387c88a1f8bSLukasz Majewski list_add(&policy->policy_list, &cpufreq_policy_list); 1388c88a1f8bSLukasz Majewski write_unlock_irqrestore(&cpufreq_driver_lock, flags); 1389988bed09SViresh Kumar } 13908ff69732SDave Jones 1391e18f1682SSrivatsa S. Bhat cpufreq_init_policy(policy); 1392e18f1682SSrivatsa S. Bhat 139396bbbe4aSViresh Kumar if (!recover_policy) { 139408fd8c1cSViresh Kumar policy->user_policy.policy = policy->policy; 139508fd8c1cSViresh Kumar policy->user_policy.governor = policy->governor; 139608fd8c1cSViresh Kumar } 13974e97b631SViresh Kumar up_write(&policy->rwsem); 139808fd8c1cSViresh Kumar 1399038c5b3eSGreg Kroah-Hartman kobject_uevent(&policy->kobj, KOBJ_ADD); 14007c45cf31SViresh Kumar 14016eed9404SViresh Kumar up_read(&cpufreq_rwsem); 14026eed9404SViresh Kumar 14037c45cf31SViresh Kumar /* Callback for handling stuff after policy is ready */ 14047c45cf31SViresh Kumar if (cpufreq_driver->ready) 14057c45cf31SViresh Kumar cpufreq_driver->ready(policy); 14067c45cf31SViresh Kumar 14072d06d8c4SDominik Brodowski pr_debug("initialization complete\n"); 14081da177e4SLinus Torvalds 14091da177e4SLinus Torvalds return 0; 14101da177e4SLinus Torvalds 14111da177e4SLinus Torvalds err_out_unregister: 1412652ed95dSViresh Kumar err_get_freq: 14136d4e81edSTomeu Vizoso if (!recover_policy) { 14146d4e81edSTomeu Vizoso kobject_put(&policy->kobj); 14156d4e81edSTomeu Vizoso wait_for_completion(&policy->kobj_unregister); 14166d4e81edSTomeu Vizoso } 14176d4e81edSTomeu Vizoso err_init_policy_kobj: 14187106e02bSPrarit Bhargava up_write(&policy->rwsem); 14197106e02bSPrarit Bhargava 1420da60ce9fSViresh Kumar if (cpufreq_driver->exit) 1421da60ce9fSViresh Kumar cpufreq_driver->exit(policy); 14222eaa3e2dSViresh Kumar err_set_policy_cpu: 14233914d379SViresh Kumar if (recover_policy) 142442f921a6SViresh Kumar cpufreq_policy_put_kobj(policy); 1425e9698cc5SSrivatsa S. Bhat cpufreq_policy_free(policy); 142642f921a6SViresh Kumar 14271da177e4SLinus Torvalds nomem_out: 14286eed9404SViresh Kumar up_read(&cpufreq_rwsem); 14296eed9404SViresh Kumar 14301da177e4SLinus Torvalds return ret; 14311da177e4SLinus Torvalds } 14321da177e4SLinus Torvalds 1433cedb70afSSrivatsa S. Bhat static int __cpufreq_remove_dev_prepare(struct device *dev, 143496bbbe4aSViresh Kumar struct subsys_interface *sif) 14351da177e4SLinus Torvalds { 1436f9ba680dSSrivatsa S. Bhat unsigned int cpu = dev->id, cpus; 14371bfb425bSViresh Kumar int ret; 14383a3e9e06SViresh Kumar struct cpufreq_policy *policy; 14391da177e4SLinus Torvalds 1440b8eed8afSViresh Kumar pr_debug("%s: unregistering CPU %u\n", __func__, cpu); 14411da177e4SLinus Torvalds 1442988bed09SViresh Kumar policy = cpufreq_cpu_get_raw(cpu); 14433a3e9e06SViresh Kumar if (!policy) { 1444b8eed8afSViresh Kumar pr_debug("%s: No cpu_data found\n", __func__); 14451da177e4SLinus Torvalds return -EINVAL; 14461da177e4SLinus Torvalds } 14471da177e4SLinus Torvalds 14489c0ebcf7SViresh Kumar if (has_target()) { 14493de9bdebSViresh Kumar ret = __cpufreq_governor(policy, CPUFREQ_GOV_STOP); 14503de9bdebSViresh Kumar if (ret) { 14513de9bdebSViresh Kumar pr_err("%s: Failed to stop governor\n", __func__); 14523de9bdebSViresh Kumar return ret; 14533de9bdebSViresh Kumar } 1454db5f2995SViresh Kumar } 14551da177e4SLinus Torvalds 14564573237bSViresh Kumar down_write(&policy->rwsem); 14573a3e9e06SViresh Kumar cpus = cpumask_weight(policy->cpus); 14584573237bSViresh Kumar 14594573237bSViresh Kumar if (has_target() && cpus == 1) 14604573237bSViresh Kumar strncpy(policy->last_governor, policy->governor->name, 14614573237bSViresh Kumar CPUFREQ_NAME_LEN); 14624573237bSViresh Kumar up_write(&policy->rwsem); 14631da177e4SLinus Torvalds 1464*87549141SViresh Kumar if (cpu != policy->cpu) 1465*87549141SViresh Kumar return 0; 1466*87549141SViresh Kumar 1467*87549141SViresh Kumar if (cpus > 1) 14681bfb425bSViresh Kumar /* Nominate new CPU */ 1469*87549141SViresh Kumar update_policy_cpu(policy, cpumask_any_but(policy->cpus, cpu)); 1470*87549141SViresh Kumar else if (cpufreq_driver->stop_cpu) 1471367dc4aaSDirk Brandewie cpufreq_driver->stop_cpu(policy); 1472b8eed8afSViresh Kumar 1473cedb70afSSrivatsa S. Bhat return 0; 1474cedb70afSSrivatsa S. Bhat } 1475cedb70afSSrivatsa S. Bhat 1476cedb70afSSrivatsa S. Bhat static int __cpufreq_remove_dev_finish(struct device *dev, 147796bbbe4aSViresh Kumar struct subsys_interface *sif) 1478cedb70afSSrivatsa S. Bhat { 1479988bed09SViresh Kumar unsigned int cpu = dev->id; 1480cedb70afSSrivatsa S. Bhat int ret; 1481988bed09SViresh Kumar struct cpufreq_policy *policy = cpufreq_cpu_get_raw(cpu); 1482cedb70afSSrivatsa S. Bhat 1483cedb70afSSrivatsa S. Bhat if (!policy) { 1484cedb70afSSrivatsa S. Bhat pr_debug("%s: No cpu_data found\n", __func__); 1485cedb70afSSrivatsa S. Bhat return -EINVAL; 1486cedb70afSSrivatsa S. Bhat } 1487cedb70afSSrivatsa S. Bhat 1488ad7722daSviresh kumar down_write(&policy->rwsem); 14899c8f1ee4SViresh Kumar cpumask_clear_cpu(cpu, policy->cpus); 1490ad7722daSviresh kumar up_write(&policy->rwsem); 1491cedb70afSSrivatsa S. Bhat 1492*87549141SViresh Kumar /* Not the last cpu of policy, start governor again ? */ 1493*87549141SViresh Kumar if (!policy_is_inactive(policy)) { 1494*87549141SViresh Kumar if (!has_target()) 1495*87549141SViresh Kumar return 0; 1496*87549141SViresh Kumar 1497*87549141SViresh Kumar ret = __cpufreq_governor(policy, CPUFREQ_GOV_START); 1498*87549141SViresh Kumar if (!ret) 1499*87549141SViresh Kumar ret = __cpufreq_governor(policy, CPUFREQ_GOV_LIMITS); 1500*87549141SViresh Kumar 15013de9bdebSViresh Kumar if (ret) { 1502*87549141SViresh Kumar pr_err("%s: Failed to start governor\n", __func__); 1503*87549141SViresh Kumar return ret; 1504*87549141SViresh Kumar } 1505*87549141SViresh Kumar 1506*87549141SViresh Kumar return 0; 1507*87549141SViresh Kumar } 1508*87549141SViresh Kumar 1509*87549141SViresh Kumar /* If cpu is last user of policy, free policy */ 1510*87549141SViresh Kumar if (has_target()) { 1511*87549141SViresh Kumar ret = __cpufreq_governor(policy, CPUFREQ_GOV_POLICY_EXIT); 1512*87549141SViresh Kumar if (ret) { 1513*87549141SViresh Kumar pr_err("%s: Failed to exit governor\n", __func__); 15143de9bdebSViresh Kumar return ret; 15153de9bdebSViresh Kumar } 15163de9bdebSViresh Kumar } 15172a998599SRafael J. Wysocki 1518*87549141SViresh Kumar /* Free the policy kobjects only if the driver is getting removed. */ 1519*87549141SViresh Kumar if (sif) 152042f921a6SViresh Kumar cpufreq_policy_put_kobj(policy); 15211da177e4SLinus Torvalds 15228414809cSSrivatsa S. Bhat /* 15238414809cSSrivatsa S. Bhat * Perform the ->exit() even during light-weight tear-down, 15248414809cSSrivatsa S. Bhat * since this is a core component, and is essential for the 15258414809cSSrivatsa S. Bhat * subsequent light-weight ->init() to succeed. 15268414809cSSrivatsa S. Bhat */ 15271c3d85ddSRafael J. Wysocki if (cpufreq_driver->exit) 15283a3e9e06SViresh Kumar cpufreq_driver->exit(policy); 152927ecddc2SJacob Shin 1530*87549141SViresh Kumar if (sif) 15313a3e9e06SViresh Kumar cpufreq_policy_free(policy); 15321da177e4SLinus Torvalds 15331da177e4SLinus Torvalds return 0; 15341da177e4SLinus Torvalds } 15351da177e4SLinus Torvalds 1536cedb70afSSrivatsa S. Bhat /** 153727a862e9SViresh Kumar * cpufreq_remove_dev - remove a CPU device 1538cedb70afSSrivatsa S. Bhat * 1539cedb70afSSrivatsa S. Bhat * Removes the cpufreq interface for a CPU device. 1540cedb70afSSrivatsa S. Bhat */ 15418a25a2fdSKay Sievers static int cpufreq_remove_dev(struct device *dev, struct subsys_interface *sif) 15425a01f2e8SVenkatesh Pallipadi { 15438a25a2fdSKay Sievers unsigned int cpu = dev->id; 154427a862e9SViresh Kumar int ret; 1545ec28297aSVenki Pallipadi 1546*87549141SViresh Kumar /* 1547*87549141SViresh Kumar * Only possible if 'cpu' is getting physically removed now. A hotplug 1548*87549141SViresh Kumar * notifier should have already been called and we just need to remove 1549*87549141SViresh Kumar * link or free policy here. 1550*87549141SViresh Kumar */ 1551*87549141SViresh Kumar if (cpu_is_offline(cpu)) { 1552*87549141SViresh Kumar struct cpufreq_policy *policy = per_cpu(cpufreq_cpu_data, cpu); 1553*87549141SViresh Kumar struct cpumask mask; 1554*87549141SViresh Kumar 1555*87549141SViresh Kumar if (!policy) 1556ec28297aSVenki Pallipadi return 0; 1557ec28297aSVenki Pallipadi 1558*87549141SViresh Kumar cpumask_copy(&mask, policy->related_cpus); 1559*87549141SViresh Kumar cpumask_clear_cpu(cpu, &mask); 1560*87549141SViresh Kumar 1561*87549141SViresh Kumar /* 1562*87549141SViresh Kumar * Free policy only if all policy->related_cpus are removed 1563*87549141SViresh Kumar * physically. 1564*87549141SViresh Kumar */ 1565*87549141SViresh Kumar if (cpumask_intersects(&mask, cpu_present_mask)) { 1566*87549141SViresh Kumar remove_cpu_dev_symlink(policy, cpu); 1567*87549141SViresh Kumar return 0; 1568*87549141SViresh Kumar } 1569*87549141SViresh Kumar 1570*87549141SViresh Kumar cpufreq_policy_put_kobj(policy); 1571*87549141SViresh Kumar cpufreq_policy_free(policy); 1572*87549141SViresh Kumar return 0; 1573*87549141SViresh Kumar } 1574*87549141SViresh Kumar 157596bbbe4aSViresh Kumar ret = __cpufreq_remove_dev_prepare(dev, sif); 157627a862e9SViresh Kumar 157727a862e9SViresh Kumar if (!ret) 157896bbbe4aSViresh Kumar ret = __cpufreq_remove_dev_finish(dev, sif); 157927a862e9SViresh Kumar 158027a862e9SViresh Kumar return ret; 15815a01f2e8SVenkatesh Pallipadi } 15825a01f2e8SVenkatesh Pallipadi 158365f27f38SDavid Howells static void handle_update(struct work_struct *work) 15841da177e4SLinus Torvalds { 158565f27f38SDavid Howells struct cpufreq_policy *policy = 158665f27f38SDavid Howells container_of(work, struct cpufreq_policy, update); 158765f27f38SDavid Howells unsigned int cpu = policy->cpu; 15882d06d8c4SDominik Brodowski pr_debug("handle_update for cpu %u called\n", cpu); 15891da177e4SLinus Torvalds cpufreq_update_policy(cpu); 15901da177e4SLinus Torvalds } 15911da177e4SLinus Torvalds 15921da177e4SLinus Torvalds /** 1593bb176f7dSViresh Kumar * cpufreq_out_of_sync - If actual and saved CPU frequency differs, we're 1594bb176f7dSViresh Kumar * in deep trouble. 1595a1e1dc41SViresh Kumar * @policy: policy managing CPUs 15961da177e4SLinus Torvalds * @new_freq: CPU frequency the CPU actually runs at 15971da177e4SLinus Torvalds * 159829464f28SDave Jones * We adjust to current frequency first, and need to clean up later. 159929464f28SDave Jones * So either call to cpufreq_update_policy() or schedule handle_update()). 16001da177e4SLinus Torvalds */ 1601a1e1dc41SViresh Kumar static void cpufreq_out_of_sync(struct cpufreq_policy *policy, 1602e08f5f5bSGautham R Shenoy unsigned int new_freq) 16031da177e4SLinus Torvalds { 16041da177e4SLinus Torvalds struct cpufreq_freqs freqs; 1605b43a7ffbSViresh Kumar 1606e837f9b5SJoe Perches pr_debug("Warning: CPU frequency out of sync: cpufreq and timing core thinks of %u, is %u kHz\n", 1607a1e1dc41SViresh Kumar policy->cur, new_freq); 16081da177e4SLinus Torvalds 1609a1e1dc41SViresh Kumar freqs.old = policy->cur; 16101da177e4SLinus Torvalds freqs.new = new_freq; 1611b43a7ffbSViresh Kumar 16128fec051eSViresh Kumar cpufreq_freq_transition_begin(policy, &freqs); 16138fec051eSViresh Kumar cpufreq_freq_transition_end(policy, &freqs, 0); 16141da177e4SLinus Torvalds } 16151da177e4SLinus Torvalds 16161da177e4SLinus Torvalds /** 16174ab70df4SDhaval Giani * cpufreq_quick_get - get the CPU frequency (in kHz) from policy->cur 161895235ca2SVenkatesh Pallipadi * @cpu: CPU number 161995235ca2SVenkatesh Pallipadi * 162095235ca2SVenkatesh Pallipadi * This is the last known freq, without actually getting it from the driver. 162195235ca2SVenkatesh Pallipadi * Return value will be same as what is shown in scaling_cur_freq in sysfs. 162295235ca2SVenkatesh Pallipadi */ 162395235ca2SVenkatesh Pallipadi unsigned int cpufreq_quick_get(unsigned int cpu) 162495235ca2SVenkatesh Pallipadi { 16259e21ba8bSDirk Brandewie struct cpufreq_policy *policy; 1626e08f5f5bSGautham R Shenoy unsigned int ret_freq = 0; 162795235ca2SVenkatesh Pallipadi 16281c3d85ddSRafael J. Wysocki if (cpufreq_driver && cpufreq_driver->setpolicy && cpufreq_driver->get) 16291c3d85ddSRafael J. Wysocki return cpufreq_driver->get(cpu); 16309e21ba8bSDirk Brandewie 16319e21ba8bSDirk Brandewie policy = cpufreq_cpu_get(cpu); 163295235ca2SVenkatesh Pallipadi if (policy) { 1633e08f5f5bSGautham R Shenoy ret_freq = policy->cur; 163495235ca2SVenkatesh Pallipadi cpufreq_cpu_put(policy); 163595235ca2SVenkatesh Pallipadi } 163695235ca2SVenkatesh Pallipadi 16374d34a67dSDave Jones return ret_freq; 163895235ca2SVenkatesh Pallipadi } 163995235ca2SVenkatesh Pallipadi EXPORT_SYMBOL(cpufreq_quick_get); 164095235ca2SVenkatesh Pallipadi 16413d737108SJesse Barnes /** 16423d737108SJesse Barnes * cpufreq_quick_get_max - get the max reported CPU frequency for this CPU 16433d737108SJesse Barnes * @cpu: CPU number 16443d737108SJesse Barnes * 16453d737108SJesse Barnes * Just return the max possible frequency for a given CPU. 16463d737108SJesse Barnes */ 16473d737108SJesse Barnes unsigned int cpufreq_quick_get_max(unsigned int cpu) 16483d737108SJesse Barnes { 16493d737108SJesse Barnes struct cpufreq_policy *policy = cpufreq_cpu_get(cpu); 16503d737108SJesse Barnes unsigned int ret_freq = 0; 16513d737108SJesse Barnes 16523d737108SJesse Barnes if (policy) { 16533d737108SJesse Barnes ret_freq = policy->max; 16543d737108SJesse Barnes cpufreq_cpu_put(policy); 16553d737108SJesse Barnes } 16563d737108SJesse Barnes 16573d737108SJesse Barnes return ret_freq; 16583d737108SJesse Barnes } 16593d737108SJesse Barnes EXPORT_SYMBOL(cpufreq_quick_get_max); 16603d737108SJesse Barnes 1661d92d50a4SViresh Kumar static unsigned int __cpufreq_get(struct cpufreq_policy *policy) 16621da177e4SLinus Torvalds { 1663e08f5f5bSGautham R Shenoy unsigned int ret_freq = 0; 16641da177e4SLinus Torvalds 16651c3d85ddSRafael J. Wysocki if (!cpufreq_driver->get) 16664d34a67dSDave Jones return ret_freq; 16671da177e4SLinus Torvalds 1668d92d50a4SViresh Kumar ret_freq = cpufreq_driver->get(policy->cpu); 16691da177e4SLinus Torvalds 167011e584cfSViresh Kumar /* Updating inactive policies is invalid, so avoid doing that. */ 167111e584cfSViresh Kumar if (unlikely(policy_is_inactive(policy))) 167211e584cfSViresh Kumar return ret_freq; 167311e584cfSViresh Kumar 1674e08f5f5bSGautham R Shenoy if (ret_freq && policy->cur && 16751c3d85ddSRafael J. Wysocki !(cpufreq_driver->flags & CPUFREQ_CONST_LOOPS)) { 1676e08f5f5bSGautham R Shenoy /* verify no discrepancy between actual and 1677e08f5f5bSGautham R Shenoy saved value exists */ 1678e08f5f5bSGautham R Shenoy if (unlikely(ret_freq != policy->cur)) { 1679a1e1dc41SViresh Kumar cpufreq_out_of_sync(policy, ret_freq); 16801da177e4SLinus Torvalds schedule_work(&policy->update); 16811da177e4SLinus Torvalds } 16821da177e4SLinus Torvalds } 16831da177e4SLinus Torvalds 16844d34a67dSDave Jones return ret_freq; 16855a01f2e8SVenkatesh Pallipadi } 16861da177e4SLinus Torvalds 16875a01f2e8SVenkatesh Pallipadi /** 16885a01f2e8SVenkatesh Pallipadi * cpufreq_get - get the current CPU frequency (in kHz) 16895a01f2e8SVenkatesh Pallipadi * @cpu: CPU number 16905a01f2e8SVenkatesh Pallipadi * 16915a01f2e8SVenkatesh Pallipadi * Get the CPU current (static) CPU frequency 16925a01f2e8SVenkatesh Pallipadi */ 16935a01f2e8SVenkatesh Pallipadi unsigned int cpufreq_get(unsigned int cpu) 16945a01f2e8SVenkatesh Pallipadi { 1695999976e0SAaron Plattner struct cpufreq_policy *policy = cpufreq_cpu_get(cpu); 16965a01f2e8SVenkatesh Pallipadi unsigned int ret_freq = 0; 16975a01f2e8SVenkatesh Pallipadi 1698999976e0SAaron Plattner if (policy) { 1699ad7722daSviresh kumar down_read(&policy->rwsem); 1700d92d50a4SViresh Kumar ret_freq = __cpufreq_get(policy); 1701ad7722daSviresh kumar up_read(&policy->rwsem); 1702999976e0SAaron Plattner 1703999976e0SAaron Plattner cpufreq_cpu_put(policy); 1704999976e0SAaron Plattner } 17056eed9404SViresh Kumar 17064d34a67dSDave Jones return ret_freq; 17071da177e4SLinus Torvalds } 17081da177e4SLinus Torvalds EXPORT_SYMBOL(cpufreq_get); 17091da177e4SLinus Torvalds 17108a25a2fdSKay Sievers static struct subsys_interface cpufreq_interface = { 17118a25a2fdSKay Sievers .name = "cpufreq", 17128a25a2fdSKay Sievers .subsys = &cpu_subsys, 17138a25a2fdSKay Sievers .add_dev = cpufreq_add_dev, 17148a25a2fdSKay Sievers .remove_dev = cpufreq_remove_dev, 1715e00e56dfSRafael J. Wysocki }; 1716e00e56dfSRafael J. Wysocki 1717e28867eaSViresh Kumar /* 1718e28867eaSViresh Kumar * In case platform wants some specific frequency to be configured 1719e28867eaSViresh Kumar * during suspend.. 172042d4dc3fSBenjamin Herrenschmidt */ 1721e28867eaSViresh Kumar int cpufreq_generic_suspend(struct cpufreq_policy *policy) 172242d4dc3fSBenjamin Herrenschmidt { 1723e28867eaSViresh Kumar int ret; 17244bc5d341SDave Jones 1725e28867eaSViresh Kumar if (!policy->suspend_freq) { 1726e28867eaSViresh Kumar pr_err("%s: suspend_freq can't be zero\n", __func__); 1727e28867eaSViresh Kumar return -EINVAL; 172842d4dc3fSBenjamin Herrenschmidt } 172942d4dc3fSBenjamin Herrenschmidt 1730e28867eaSViresh Kumar pr_debug("%s: Setting suspend-freq: %u\n", __func__, 1731e28867eaSViresh Kumar policy->suspend_freq); 1732e28867eaSViresh Kumar 1733e28867eaSViresh Kumar ret = __cpufreq_driver_target(policy, policy->suspend_freq, 1734e28867eaSViresh Kumar CPUFREQ_RELATION_H); 1735e28867eaSViresh Kumar if (ret) 1736e28867eaSViresh Kumar pr_err("%s: unable to set suspend-freq: %u. err: %d\n", 1737e28867eaSViresh Kumar __func__, policy->suspend_freq, ret); 1738e28867eaSViresh Kumar 1739c9060494SDave Jones return ret; 174042d4dc3fSBenjamin Herrenschmidt } 1741e28867eaSViresh Kumar EXPORT_SYMBOL(cpufreq_generic_suspend); 174242d4dc3fSBenjamin Herrenschmidt 174342d4dc3fSBenjamin Herrenschmidt /** 17442f0aea93SViresh Kumar * cpufreq_suspend() - Suspend CPUFreq governors 17451da177e4SLinus Torvalds * 17462f0aea93SViresh Kumar * Called during system wide Suspend/Hibernate cycles for suspending governors 17472f0aea93SViresh Kumar * as some platforms can't change frequency after this point in suspend cycle. 17482f0aea93SViresh Kumar * Because some of the devices (like: i2c, regulators, etc) they use for 17492f0aea93SViresh Kumar * changing frequency are suspended quickly after this point. 17501da177e4SLinus Torvalds */ 17512f0aea93SViresh Kumar void cpufreq_suspend(void) 17521da177e4SLinus Torvalds { 17533a3e9e06SViresh Kumar struct cpufreq_policy *policy; 17541da177e4SLinus Torvalds 17552f0aea93SViresh Kumar if (!cpufreq_driver) 1756e00e56dfSRafael J. Wysocki return; 17571da177e4SLinus Torvalds 17582f0aea93SViresh Kumar if (!has_target()) 1759b1b12babSViresh Kumar goto suspend; 17601da177e4SLinus Torvalds 17612f0aea93SViresh Kumar pr_debug("%s: Suspending Governors\n", __func__); 17622f0aea93SViresh Kumar 1763f963735aSViresh Kumar for_each_active_policy(policy) { 17642f0aea93SViresh Kumar if (__cpufreq_governor(policy, CPUFREQ_GOV_STOP)) 17652f0aea93SViresh Kumar pr_err("%s: Failed to stop governor for policy: %p\n", 17662f0aea93SViresh Kumar __func__, policy); 17672f0aea93SViresh Kumar else if (cpufreq_driver->suspend 17682f0aea93SViresh Kumar && cpufreq_driver->suspend(policy)) 17692f0aea93SViresh Kumar pr_err("%s: Failed to suspend driver: %p\n", __func__, 17702f0aea93SViresh Kumar policy); 17711da177e4SLinus Torvalds } 1772b1b12babSViresh Kumar 1773b1b12babSViresh Kumar suspend: 1774b1b12babSViresh Kumar cpufreq_suspended = true; 17751da177e4SLinus Torvalds } 17761da177e4SLinus Torvalds 17771da177e4SLinus Torvalds /** 17782f0aea93SViresh Kumar * cpufreq_resume() - Resume CPUFreq governors 17791da177e4SLinus Torvalds * 17802f0aea93SViresh Kumar * Called during system wide Suspend/Hibernate cycle for resuming governors that 17812f0aea93SViresh Kumar * are suspended with cpufreq_suspend(). 17821da177e4SLinus Torvalds */ 17832f0aea93SViresh Kumar void cpufreq_resume(void) 17841da177e4SLinus Torvalds { 17851da177e4SLinus Torvalds struct cpufreq_policy *policy; 17861da177e4SLinus Torvalds 17872f0aea93SViresh Kumar if (!cpufreq_driver) 17881da177e4SLinus Torvalds return; 17891da177e4SLinus Torvalds 17908e30444eSLan Tianyu cpufreq_suspended = false; 17918e30444eSLan Tianyu 17922f0aea93SViresh Kumar if (!has_target()) 17932f0aea93SViresh Kumar return; 17941da177e4SLinus Torvalds 17952f0aea93SViresh Kumar pr_debug("%s: Resuming Governors\n", __func__); 17962f0aea93SViresh Kumar 1797f963735aSViresh Kumar for_each_active_policy(policy) { 17980c5aa405SViresh Kumar if (cpufreq_driver->resume && cpufreq_driver->resume(policy)) 17990c5aa405SViresh Kumar pr_err("%s: Failed to resume driver: %p\n", __func__, 18000c5aa405SViresh Kumar policy); 18010c5aa405SViresh Kumar else if (__cpufreq_governor(policy, CPUFREQ_GOV_START) 18022f0aea93SViresh Kumar || __cpufreq_governor(policy, CPUFREQ_GOV_LIMITS)) 18032f0aea93SViresh Kumar pr_err("%s: Failed to start governor for policy: %p\n", 18042f0aea93SViresh Kumar __func__, policy); 1805c75de0acSViresh Kumar } 18062f0aea93SViresh Kumar 18072f0aea93SViresh Kumar /* 1808c75de0acSViresh Kumar * schedule call cpufreq_update_policy() for first-online CPU, as that 1809c75de0acSViresh Kumar * wouldn't be hotplugged-out on suspend. It will verify that the 1810c75de0acSViresh Kumar * current freq is in sync with what we believe it to be. 18112f0aea93SViresh Kumar */ 1812c75de0acSViresh Kumar policy = cpufreq_cpu_get_raw(cpumask_first(cpu_online_mask)); 1813c75de0acSViresh Kumar if (WARN_ON(!policy)) 1814c75de0acSViresh Kumar return; 1815c75de0acSViresh Kumar 18163a3e9e06SViresh Kumar schedule_work(&policy->update); 18171da177e4SLinus Torvalds } 18181da177e4SLinus Torvalds 18199d95046eSBorislav Petkov /** 18209d95046eSBorislav Petkov * cpufreq_get_current_driver - return current driver's name 18219d95046eSBorislav Petkov * 18229d95046eSBorislav Petkov * Return the name string of the currently loaded cpufreq driver 18239d95046eSBorislav Petkov * or NULL, if none. 18249d95046eSBorislav Petkov */ 18259d95046eSBorislav Petkov const char *cpufreq_get_current_driver(void) 18269d95046eSBorislav Petkov { 18271c3d85ddSRafael J. Wysocki if (cpufreq_driver) 18281c3d85ddSRafael J. Wysocki return cpufreq_driver->name; 18291c3d85ddSRafael J. Wysocki 18301c3d85ddSRafael J. Wysocki return NULL; 18319d95046eSBorislav Petkov } 18329d95046eSBorislav Petkov EXPORT_SYMBOL_GPL(cpufreq_get_current_driver); 18331da177e4SLinus Torvalds 183451315cdfSThomas Petazzoni /** 183551315cdfSThomas Petazzoni * cpufreq_get_driver_data - return current driver data 183651315cdfSThomas Petazzoni * 183751315cdfSThomas Petazzoni * Return the private data of the currently loaded cpufreq 183851315cdfSThomas Petazzoni * driver, or NULL if no cpufreq driver is loaded. 183951315cdfSThomas Petazzoni */ 184051315cdfSThomas Petazzoni void *cpufreq_get_driver_data(void) 184151315cdfSThomas Petazzoni { 184251315cdfSThomas Petazzoni if (cpufreq_driver) 184351315cdfSThomas Petazzoni return cpufreq_driver->driver_data; 184451315cdfSThomas Petazzoni 184551315cdfSThomas Petazzoni return NULL; 184651315cdfSThomas Petazzoni } 184751315cdfSThomas Petazzoni EXPORT_SYMBOL_GPL(cpufreq_get_driver_data); 184851315cdfSThomas Petazzoni 18491da177e4SLinus Torvalds /********************************************************************* 18501da177e4SLinus Torvalds * NOTIFIER LISTS INTERFACE * 18511da177e4SLinus Torvalds *********************************************************************/ 18521da177e4SLinus Torvalds 18531da177e4SLinus Torvalds /** 18541da177e4SLinus Torvalds * cpufreq_register_notifier - register a driver with cpufreq 18551da177e4SLinus Torvalds * @nb: notifier function to register 18561da177e4SLinus Torvalds * @list: CPUFREQ_TRANSITION_NOTIFIER or CPUFREQ_POLICY_NOTIFIER 18571da177e4SLinus Torvalds * 18581da177e4SLinus Torvalds * Add a driver to one of two lists: either a list of drivers that 18591da177e4SLinus Torvalds * are notified about clock rate changes (once before and once after 18601da177e4SLinus Torvalds * the transition), or a list of drivers that are notified about 18611da177e4SLinus Torvalds * changes in cpufreq policy. 18621da177e4SLinus Torvalds * 18631da177e4SLinus Torvalds * This function may sleep, and has the same return conditions as 1864e041c683SAlan Stern * blocking_notifier_chain_register. 18651da177e4SLinus Torvalds */ 18661da177e4SLinus Torvalds int cpufreq_register_notifier(struct notifier_block *nb, unsigned int list) 18671da177e4SLinus Torvalds { 18681da177e4SLinus Torvalds int ret; 18691da177e4SLinus Torvalds 1870d5aaffa9SDirk Brandewie if (cpufreq_disabled()) 1871d5aaffa9SDirk Brandewie return -EINVAL; 1872d5aaffa9SDirk Brandewie 187374212ca4SCesar Eduardo Barros WARN_ON(!init_cpufreq_transition_notifier_list_called); 187474212ca4SCesar Eduardo Barros 18751da177e4SLinus Torvalds switch (list) { 18761da177e4SLinus Torvalds case CPUFREQ_TRANSITION_NOTIFIER: 1877b4dfdbb3SAlan Stern ret = srcu_notifier_chain_register( 1878e041c683SAlan Stern &cpufreq_transition_notifier_list, nb); 18791da177e4SLinus Torvalds break; 18801da177e4SLinus Torvalds case CPUFREQ_POLICY_NOTIFIER: 1881e041c683SAlan Stern ret = blocking_notifier_chain_register( 1882e041c683SAlan Stern &cpufreq_policy_notifier_list, nb); 18831da177e4SLinus Torvalds break; 18841da177e4SLinus Torvalds default: 18851da177e4SLinus Torvalds ret = -EINVAL; 18861da177e4SLinus Torvalds } 18871da177e4SLinus Torvalds 18881da177e4SLinus Torvalds return ret; 18891da177e4SLinus Torvalds } 18901da177e4SLinus Torvalds EXPORT_SYMBOL(cpufreq_register_notifier); 18911da177e4SLinus Torvalds 18921da177e4SLinus Torvalds /** 18931da177e4SLinus Torvalds * cpufreq_unregister_notifier - unregister a driver with cpufreq 18941da177e4SLinus Torvalds * @nb: notifier block to be unregistered 18951da177e4SLinus Torvalds * @list: CPUFREQ_TRANSITION_NOTIFIER or CPUFREQ_POLICY_NOTIFIER 18961da177e4SLinus Torvalds * 18971da177e4SLinus Torvalds * Remove a driver from the CPU frequency notifier list. 18981da177e4SLinus Torvalds * 18991da177e4SLinus Torvalds * This function may sleep, and has the same return conditions as 1900e041c683SAlan Stern * blocking_notifier_chain_unregister. 19011da177e4SLinus Torvalds */ 19021da177e4SLinus Torvalds int cpufreq_unregister_notifier(struct notifier_block *nb, unsigned int list) 19031da177e4SLinus Torvalds { 19041da177e4SLinus Torvalds int ret; 19051da177e4SLinus Torvalds 1906d5aaffa9SDirk Brandewie if (cpufreq_disabled()) 1907d5aaffa9SDirk Brandewie return -EINVAL; 1908d5aaffa9SDirk Brandewie 19091da177e4SLinus Torvalds switch (list) { 19101da177e4SLinus Torvalds case CPUFREQ_TRANSITION_NOTIFIER: 1911b4dfdbb3SAlan Stern ret = srcu_notifier_chain_unregister( 1912e041c683SAlan Stern &cpufreq_transition_notifier_list, nb); 19131da177e4SLinus Torvalds break; 19141da177e4SLinus Torvalds case CPUFREQ_POLICY_NOTIFIER: 1915e041c683SAlan Stern ret = blocking_notifier_chain_unregister( 1916e041c683SAlan Stern &cpufreq_policy_notifier_list, nb); 19171da177e4SLinus Torvalds break; 19181da177e4SLinus Torvalds default: 19191da177e4SLinus Torvalds ret = -EINVAL; 19201da177e4SLinus Torvalds } 19211da177e4SLinus Torvalds 19221da177e4SLinus Torvalds return ret; 19231da177e4SLinus Torvalds } 19241da177e4SLinus Torvalds EXPORT_SYMBOL(cpufreq_unregister_notifier); 19251da177e4SLinus Torvalds 19261da177e4SLinus Torvalds 19271da177e4SLinus Torvalds /********************************************************************* 19281da177e4SLinus Torvalds * GOVERNORS * 19291da177e4SLinus Torvalds *********************************************************************/ 19301da177e4SLinus Torvalds 19311c03a2d0SViresh Kumar /* Must set freqs->new to intermediate frequency */ 19321c03a2d0SViresh Kumar static int __target_intermediate(struct cpufreq_policy *policy, 19331c03a2d0SViresh Kumar struct cpufreq_freqs *freqs, int index) 19341c03a2d0SViresh Kumar { 19351c03a2d0SViresh Kumar int ret; 19361c03a2d0SViresh Kumar 19371c03a2d0SViresh Kumar freqs->new = cpufreq_driver->get_intermediate(policy, index); 19381c03a2d0SViresh Kumar 19391c03a2d0SViresh Kumar /* We don't need to switch to intermediate freq */ 19401c03a2d0SViresh Kumar if (!freqs->new) 19411c03a2d0SViresh Kumar return 0; 19421c03a2d0SViresh Kumar 19431c03a2d0SViresh Kumar pr_debug("%s: cpu: %d, switching to intermediate freq: oldfreq: %u, intermediate freq: %u\n", 19441c03a2d0SViresh Kumar __func__, policy->cpu, freqs->old, freqs->new); 19451c03a2d0SViresh Kumar 19461c03a2d0SViresh Kumar cpufreq_freq_transition_begin(policy, freqs); 19471c03a2d0SViresh Kumar ret = cpufreq_driver->target_intermediate(policy, index); 19481c03a2d0SViresh Kumar cpufreq_freq_transition_end(policy, freqs, ret); 19491c03a2d0SViresh Kumar 19501c03a2d0SViresh Kumar if (ret) 19511c03a2d0SViresh Kumar pr_err("%s: Failed to change to intermediate frequency: %d\n", 19521c03a2d0SViresh Kumar __func__, ret); 19531c03a2d0SViresh Kumar 19541c03a2d0SViresh Kumar return ret; 19551c03a2d0SViresh Kumar } 19561c03a2d0SViresh Kumar 19578d65775dSViresh Kumar static int __target_index(struct cpufreq_policy *policy, 19588d65775dSViresh Kumar struct cpufreq_frequency_table *freq_table, int index) 19598d65775dSViresh Kumar { 19601c03a2d0SViresh Kumar struct cpufreq_freqs freqs = {.old = policy->cur, .flags = 0}; 19611c03a2d0SViresh Kumar unsigned int intermediate_freq = 0; 19628d65775dSViresh Kumar int retval = -EINVAL; 19638d65775dSViresh Kumar bool notify; 19648d65775dSViresh Kumar 19658d65775dSViresh Kumar notify = !(cpufreq_driver->flags & CPUFREQ_ASYNC_NOTIFICATION); 19668d65775dSViresh Kumar if (notify) { 19671c03a2d0SViresh Kumar /* Handle switching to intermediate frequency */ 19681c03a2d0SViresh Kumar if (cpufreq_driver->get_intermediate) { 19691c03a2d0SViresh Kumar retval = __target_intermediate(policy, &freqs, index); 19701c03a2d0SViresh Kumar if (retval) 19711c03a2d0SViresh Kumar return retval; 19728d65775dSViresh Kumar 19731c03a2d0SViresh Kumar intermediate_freq = freqs.new; 19741c03a2d0SViresh Kumar /* Set old freq to intermediate */ 19751c03a2d0SViresh Kumar if (intermediate_freq) 19761c03a2d0SViresh Kumar freqs.old = freqs.new; 19771c03a2d0SViresh Kumar } 19781c03a2d0SViresh Kumar 19791c03a2d0SViresh Kumar freqs.new = freq_table[index].frequency; 19808d65775dSViresh Kumar pr_debug("%s: cpu: %d, oldfreq: %u, new freq: %u\n", 19818d65775dSViresh Kumar __func__, policy->cpu, freqs.old, freqs.new); 19828d65775dSViresh Kumar 19838d65775dSViresh Kumar cpufreq_freq_transition_begin(policy, &freqs); 19848d65775dSViresh Kumar } 19858d65775dSViresh Kumar 19868d65775dSViresh Kumar retval = cpufreq_driver->target_index(policy, index); 19878d65775dSViresh Kumar if (retval) 19888d65775dSViresh Kumar pr_err("%s: Failed to change cpu frequency: %d\n", __func__, 19898d65775dSViresh Kumar retval); 19908d65775dSViresh Kumar 19911c03a2d0SViresh Kumar if (notify) { 19928d65775dSViresh Kumar cpufreq_freq_transition_end(policy, &freqs, retval); 19938d65775dSViresh Kumar 19941c03a2d0SViresh Kumar /* 19951c03a2d0SViresh Kumar * Failed after setting to intermediate freq? Driver should have 19961c03a2d0SViresh Kumar * reverted back to initial frequency and so should we. Check 19971c03a2d0SViresh Kumar * here for intermediate_freq instead of get_intermediate, in 199858405af6SShailendra Verma * case we haven't switched to intermediate freq at all. 19991c03a2d0SViresh Kumar */ 20001c03a2d0SViresh Kumar if (unlikely(retval && intermediate_freq)) { 20011c03a2d0SViresh Kumar freqs.old = intermediate_freq; 20021c03a2d0SViresh Kumar freqs.new = policy->restore_freq; 20031c03a2d0SViresh Kumar cpufreq_freq_transition_begin(policy, &freqs); 20041c03a2d0SViresh Kumar cpufreq_freq_transition_end(policy, &freqs, 0); 20051c03a2d0SViresh Kumar } 20061c03a2d0SViresh Kumar } 20071c03a2d0SViresh Kumar 20088d65775dSViresh Kumar return retval; 20098d65775dSViresh Kumar } 20108d65775dSViresh Kumar 20111da177e4SLinus Torvalds int __cpufreq_driver_target(struct cpufreq_policy *policy, 20121da177e4SLinus Torvalds unsigned int target_freq, 20131da177e4SLinus Torvalds unsigned int relation) 20141da177e4SLinus Torvalds { 20157249924eSViresh Kumar unsigned int old_target_freq = target_freq; 20168d65775dSViresh Kumar int retval = -EINVAL; 2017c32b6b8eSAshok Raj 2018a7b422cdSKonrad Rzeszutek Wilk if (cpufreq_disabled()) 2019a7b422cdSKonrad Rzeszutek Wilk return -ENODEV; 2020a7b422cdSKonrad Rzeszutek Wilk 20217249924eSViresh Kumar /* Make sure that target_freq is within supported range */ 20227249924eSViresh Kumar if (target_freq > policy->max) 20237249924eSViresh Kumar target_freq = policy->max; 20247249924eSViresh Kumar if (target_freq < policy->min) 20257249924eSViresh Kumar target_freq = policy->min; 20267249924eSViresh Kumar 20277249924eSViresh Kumar pr_debug("target for CPU %u: %u kHz, relation %u, requested %u kHz\n", 20287249924eSViresh Kumar policy->cpu, target_freq, relation, old_target_freq); 20295a1c0228SViresh Kumar 20309c0ebcf7SViresh Kumar /* 20319c0ebcf7SViresh Kumar * This might look like a redundant call as we are checking it again 20329c0ebcf7SViresh Kumar * after finding index. But it is left intentionally for cases where 20339c0ebcf7SViresh Kumar * exactly same freq is called again and so we can save on few function 20349c0ebcf7SViresh Kumar * calls. 20359c0ebcf7SViresh Kumar */ 20365a1c0228SViresh Kumar if (target_freq == policy->cur) 20375a1c0228SViresh Kumar return 0; 20385a1c0228SViresh Kumar 20391c03a2d0SViresh Kumar /* Save last value to restore later on errors */ 20401c03a2d0SViresh Kumar policy->restore_freq = policy->cur; 20411c03a2d0SViresh Kumar 20421c3d85ddSRafael J. Wysocki if (cpufreq_driver->target) 20431c3d85ddSRafael J. Wysocki retval = cpufreq_driver->target(policy, target_freq, relation); 20449c0ebcf7SViresh Kumar else if (cpufreq_driver->target_index) { 20459c0ebcf7SViresh Kumar struct cpufreq_frequency_table *freq_table; 20469c0ebcf7SViresh Kumar int index; 204790d45d17SAshok Raj 20489c0ebcf7SViresh Kumar freq_table = cpufreq_frequency_get_table(policy->cpu); 20499c0ebcf7SViresh Kumar if (unlikely(!freq_table)) { 20509c0ebcf7SViresh Kumar pr_err("%s: Unable to find freq_table\n", __func__); 20519c0ebcf7SViresh Kumar goto out; 20529c0ebcf7SViresh Kumar } 20539c0ebcf7SViresh Kumar 20549c0ebcf7SViresh Kumar retval = cpufreq_frequency_table_target(policy, freq_table, 20559c0ebcf7SViresh Kumar target_freq, relation, &index); 20569c0ebcf7SViresh Kumar if (unlikely(retval)) { 20579c0ebcf7SViresh Kumar pr_err("%s: Unable to find matching freq\n", __func__); 20589c0ebcf7SViresh Kumar goto out; 20599c0ebcf7SViresh Kumar } 20609c0ebcf7SViresh Kumar 2061d4019f0aSViresh Kumar if (freq_table[index].frequency == policy->cur) { 20629c0ebcf7SViresh Kumar retval = 0; 2063d4019f0aSViresh Kumar goto out; 2064d4019f0aSViresh Kumar } 2065d4019f0aSViresh Kumar 20668d65775dSViresh Kumar retval = __target_index(policy, freq_table, index); 20679c0ebcf7SViresh Kumar } 20689c0ebcf7SViresh Kumar 20699c0ebcf7SViresh Kumar out: 20701da177e4SLinus Torvalds return retval; 20711da177e4SLinus Torvalds } 20721da177e4SLinus Torvalds EXPORT_SYMBOL_GPL(__cpufreq_driver_target); 20731da177e4SLinus Torvalds 20741da177e4SLinus Torvalds int cpufreq_driver_target(struct cpufreq_policy *policy, 20751da177e4SLinus Torvalds unsigned int target_freq, 20761da177e4SLinus Torvalds unsigned int relation) 20771da177e4SLinus Torvalds { 2078f1829e4aSJulia Lawall int ret = -EINVAL; 20791da177e4SLinus Torvalds 2080ad7722daSviresh kumar down_write(&policy->rwsem); 20811da177e4SLinus Torvalds 20821da177e4SLinus Torvalds ret = __cpufreq_driver_target(policy, target_freq, relation); 20831da177e4SLinus Torvalds 2084ad7722daSviresh kumar up_write(&policy->rwsem); 20851da177e4SLinus Torvalds 20861da177e4SLinus Torvalds return ret; 20871da177e4SLinus Torvalds } 20881da177e4SLinus Torvalds EXPORT_SYMBOL_GPL(cpufreq_driver_target); 20891da177e4SLinus Torvalds 2090e08f5f5bSGautham R Shenoy static int __cpufreq_governor(struct cpufreq_policy *policy, 2091e08f5f5bSGautham R Shenoy unsigned int event) 20921da177e4SLinus Torvalds { 2093cc993cabSDave Jones int ret; 20946afde10cSThomas Renninger 20956afde10cSThomas Renninger /* Only must be defined when default governor is known to have latency 20966afde10cSThomas Renninger restrictions, like e.g. conservative or ondemand. 20976afde10cSThomas Renninger That this is the case is already ensured in Kconfig 20986afde10cSThomas Renninger */ 20996afde10cSThomas Renninger #ifdef CONFIG_CPU_FREQ_GOV_PERFORMANCE 21006afde10cSThomas Renninger struct cpufreq_governor *gov = &cpufreq_gov_performance; 21016afde10cSThomas Renninger #else 21026afde10cSThomas Renninger struct cpufreq_governor *gov = NULL; 21036afde10cSThomas Renninger #endif 21041c256245SThomas Renninger 21052f0aea93SViresh Kumar /* Don't start any governor operations if we are entering suspend */ 21062f0aea93SViresh Kumar if (cpufreq_suspended) 21072f0aea93SViresh Kumar return 0; 2108cb57720bSEthan Zhao /* 2109cb57720bSEthan Zhao * Governor might not be initiated here if ACPI _PPC changed 2110cb57720bSEthan Zhao * notification happened, so check it. 2111cb57720bSEthan Zhao */ 2112cb57720bSEthan Zhao if (!policy->governor) 2113cb57720bSEthan Zhao return -EINVAL; 21142f0aea93SViresh Kumar 21151c256245SThomas Renninger if (policy->governor->max_transition_latency && 21161c256245SThomas Renninger policy->cpuinfo.transition_latency > 21171c256245SThomas Renninger policy->governor->max_transition_latency) { 21186afde10cSThomas Renninger if (!gov) 21196afde10cSThomas Renninger return -EINVAL; 21206afde10cSThomas Renninger else { 2121e837f9b5SJoe Perches pr_warn("%s governor failed, too long transition latency of HW, fallback to %s governor\n", 2122e837f9b5SJoe Perches policy->governor->name, gov->name); 21231c256245SThomas Renninger policy->governor = gov; 21241c256245SThomas Renninger } 21256afde10cSThomas Renninger } 21261da177e4SLinus Torvalds 2127fe492f3fSViresh Kumar if (event == CPUFREQ_GOV_POLICY_INIT) 21281da177e4SLinus Torvalds if (!try_module_get(policy->governor->owner)) 21291da177e4SLinus Torvalds return -EINVAL; 21301da177e4SLinus Torvalds 21312d06d8c4SDominik Brodowski pr_debug("__cpufreq_governor for CPU %u, event %u\n", 2132e08f5f5bSGautham R Shenoy policy->cpu, event); 213395731ebbSXiaoguang Chen 213495731ebbSXiaoguang Chen mutex_lock(&cpufreq_governor_lock); 213556d07db2SSrivatsa S. Bhat if ((policy->governor_enabled && event == CPUFREQ_GOV_START) 2136f73d3933SViresh Kumar || (!policy->governor_enabled 2137f73d3933SViresh Kumar && (event == CPUFREQ_GOV_LIMITS || event == CPUFREQ_GOV_STOP))) { 213895731ebbSXiaoguang Chen mutex_unlock(&cpufreq_governor_lock); 213995731ebbSXiaoguang Chen return -EBUSY; 214095731ebbSXiaoguang Chen } 214195731ebbSXiaoguang Chen 214295731ebbSXiaoguang Chen if (event == CPUFREQ_GOV_STOP) 214395731ebbSXiaoguang Chen policy->governor_enabled = false; 214495731ebbSXiaoguang Chen else if (event == CPUFREQ_GOV_START) 214595731ebbSXiaoguang Chen policy->governor_enabled = true; 214695731ebbSXiaoguang Chen 214795731ebbSXiaoguang Chen mutex_unlock(&cpufreq_governor_lock); 214895731ebbSXiaoguang Chen 21491da177e4SLinus Torvalds ret = policy->governor->governor(policy, event); 21501da177e4SLinus Torvalds 21514d5dcc42SViresh Kumar if (!ret) { 21524d5dcc42SViresh Kumar if (event == CPUFREQ_GOV_POLICY_INIT) 21538e53695fSViresh Kumar policy->governor->initialized++; 21544d5dcc42SViresh Kumar else if (event == CPUFREQ_GOV_POLICY_EXIT) 21558e53695fSViresh Kumar policy->governor->initialized--; 215695731ebbSXiaoguang Chen } else { 215795731ebbSXiaoguang Chen /* Restore original values */ 215895731ebbSXiaoguang Chen mutex_lock(&cpufreq_governor_lock); 215995731ebbSXiaoguang Chen if (event == CPUFREQ_GOV_STOP) 216095731ebbSXiaoguang Chen policy->governor_enabled = true; 216195731ebbSXiaoguang Chen else if (event == CPUFREQ_GOV_START) 216295731ebbSXiaoguang Chen policy->governor_enabled = false; 216395731ebbSXiaoguang Chen mutex_unlock(&cpufreq_governor_lock); 21644d5dcc42SViresh Kumar } 2165b394058fSViresh Kumar 2166fe492f3fSViresh Kumar if (((event == CPUFREQ_GOV_POLICY_INIT) && ret) || 2167fe492f3fSViresh Kumar ((event == CPUFREQ_GOV_POLICY_EXIT) && !ret)) 21681da177e4SLinus Torvalds module_put(policy->governor->owner); 21691da177e4SLinus Torvalds 21701da177e4SLinus Torvalds return ret; 21711da177e4SLinus Torvalds } 21721da177e4SLinus Torvalds 21731da177e4SLinus Torvalds int cpufreq_register_governor(struct cpufreq_governor *governor) 21741da177e4SLinus Torvalds { 21753bcb09a3SJeremy Fitzhardinge int err; 21761da177e4SLinus Torvalds 21771da177e4SLinus Torvalds if (!governor) 21781da177e4SLinus Torvalds return -EINVAL; 21791da177e4SLinus Torvalds 2180a7b422cdSKonrad Rzeszutek Wilk if (cpufreq_disabled()) 2181a7b422cdSKonrad Rzeszutek Wilk return -ENODEV; 2182a7b422cdSKonrad Rzeszutek Wilk 21833fc54d37Sakpm@osdl.org mutex_lock(&cpufreq_governor_mutex); 21841da177e4SLinus Torvalds 2185b394058fSViresh Kumar governor->initialized = 0; 21863bcb09a3SJeremy Fitzhardinge err = -EBUSY; 218742f91fa1SViresh Kumar if (!find_governor(governor->name)) { 21883bcb09a3SJeremy Fitzhardinge err = 0; 21891da177e4SLinus Torvalds list_add(&governor->governor_list, &cpufreq_governor_list); 21903bcb09a3SJeremy Fitzhardinge } 21911da177e4SLinus Torvalds 21923fc54d37Sakpm@osdl.org mutex_unlock(&cpufreq_governor_mutex); 21933bcb09a3SJeremy Fitzhardinge return err; 21941da177e4SLinus Torvalds } 21951da177e4SLinus Torvalds EXPORT_SYMBOL_GPL(cpufreq_register_governor); 21961da177e4SLinus Torvalds 21971da177e4SLinus Torvalds void cpufreq_unregister_governor(struct cpufreq_governor *governor) 21981da177e4SLinus Torvalds { 21994573237bSViresh Kumar struct cpufreq_policy *policy; 22004573237bSViresh Kumar unsigned long flags; 220190e41bacSPrarit Bhargava 22021da177e4SLinus Torvalds if (!governor) 22031da177e4SLinus Torvalds return; 22041da177e4SLinus Torvalds 2205a7b422cdSKonrad Rzeszutek Wilk if (cpufreq_disabled()) 2206a7b422cdSKonrad Rzeszutek Wilk return; 2207a7b422cdSKonrad Rzeszutek Wilk 22084573237bSViresh Kumar /* clear last_governor for all inactive policies */ 22094573237bSViresh Kumar read_lock_irqsave(&cpufreq_driver_lock, flags); 22104573237bSViresh Kumar for_each_inactive_policy(policy) { 221118bf3a12SViresh Kumar if (!strcmp(policy->last_governor, governor->name)) { 221218bf3a12SViresh Kumar policy->governor = NULL; 22134573237bSViresh Kumar strcpy(policy->last_governor, "\0"); 221490e41bacSPrarit Bhargava } 221518bf3a12SViresh Kumar } 22164573237bSViresh Kumar read_unlock_irqrestore(&cpufreq_driver_lock, flags); 221790e41bacSPrarit Bhargava 22183fc54d37Sakpm@osdl.org mutex_lock(&cpufreq_governor_mutex); 22191da177e4SLinus Torvalds list_del(&governor->governor_list); 22203fc54d37Sakpm@osdl.org mutex_unlock(&cpufreq_governor_mutex); 22211da177e4SLinus Torvalds return; 22221da177e4SLinus Torvalds } 22231da177e4SLinus Torvalds EXPORT_SYMBOL_GPL(cpufreq_unregister_governor); 22241da177e4SLinus Torvalds 22251da177e4SLinus Torvalds 22261da177e4SLinus Torvalds /********************************************************************* 22271da177e4SLinus Torvalds * POLICY INTERFACE * 22281da177e4SLinus Torvalds *********************************************************************/ 22291da177e4SLinus Torvalds 22301da177e4SLinus Torvalds /** 22311da177e4SLinus Torvalds * cpufreq_get_policy - get the current cpufreq_policy 223229464f28SDave Jones * @policy: struct cpufreq_policy into which the current cpufreq_policy 223329464f28SDave Jones * is written 22341da177e4SLinus Torvalds * 22351da177e4SLinus Torvalds * Reads the current cpufreq policy. 22361da177e4SLinus Torvalds */ 22371da177e4SLinus Torvalds int cpufreq_get_policy(struct cpufreq_policy *policy, unsigned int cpu) 22381da177e4SLinus Torvalds { 22391da177e4SLinus Torvalds struct cpufreq_policy *cpu_policy; 22401da177e4SLinus Torvalds if (!policy) 22411da177e4SLinus Torvalds return -EINVAL; 22421da177e4SLinus Torvalds 22431da177e4SLinus Torvalds cpu_policy = cpufreq_cpu_get(cpu); 22441da177e4SLinus Torvalds if (!cpu_policy) 22451da177e4SLinus Torvalds return -EINVAL; 22461da177e4SLinus Torvalds 2247d5b73cd8SViresh Kumar memcpy(policy, cpu_policy, sizeof(*policy)); 22481da177e4SLinus Torvalds 22491da177e4SLinus Torvalds cpufreq_cpu_put(cpu_policy); 22501da177e4SLinus Torvalds return 0; 22511da177e4SLinus Torvalds } 22521da177e4SLinus Torvalds EXPORT_SYMBOL(cpufreq_get_policy); 22531da177e4SLinus Torvalds 2254153d7f3fSArjan van de Ven /* 2255037ce839SViresh Kumar * policy : current policy. 2256037ce839SViresh Kumar * new_policy: policy to be set. 2257153d7f3fSArjan van de Ven */ 2258037ce839SViresh Kumar static int cpufreq_set_policy(struct cpufreq_policy *policy, 22593a3e9e06SViresh Kumar struct cpufreq_policy *new_policy) 22601da177e4SLinus Torvalds { 2261d9a789c7SRafael J. Wysocki struct cpufreq_governor *old_gov; 2262d9a789c7SRafael J. Wysocki int ret; 22631da177e4SLinus Torvalds 2264e837f9b5SJoe Perches pr_debug("setting new policy for CPU %u: %u - %u kHz\n", 2265e837f9b5SJoe Perches new_policy->cpu, new_policy->min, new_policy->max); 22661da177e4SLinus Torvalds 2267d5b73cd8SViresh Kumar memcpy(&new_policy->cpuinfo, &policy->cpuinfo, sizeof(policy->cpuinfo)); 22681da177e4SLinus Torvalds 2269d9a789c7SRafael J. Wysocki if (new_policy->min > policy->max || new_policy->max < policy->min) 2270d9a789c7SRafael J. Wysocki return -EINVAL; 22719c9a43edSMattia Dongili 22721da177e4SLinus Torvalds /* verify the cpu speed can be set within this limit */ 22733a3e9e06SViresh Kumar ret = cpufreq_driver->verify(new_policy); 22741da177e4SLinus Torvalds if (ret) 2275d9a789c7SRafael J. Wysocki return ret; 22761da177e4SLinus Torvalds 22771da177e4SLinus Torvalds /* adjust if necessary - all reasons */ 2278e041c683SAlan Stern blocking_notifier_call_chain(&cpufreq_policy_notifier_list, 22793a3e9e06SViresh Kumar CPUFREQ_ADJUST, new_policy); 22801da177e4SLinus Torvalds 22811da177e4SLinus Torvalds /* adjust if necessary - hardware incompatibility*/ 2282e041c683SAlan Stern blocking_notifier_call_chain(&cpufreq_policy_notifier_list, 22833a3e9e06SViresh Kumar CPUFREQ_INCOMPATIBLE, new_policy); 22841da177e4SLinus Torvalds 2285bb176f7dSViresh Kumar /* 2286bb176f7dSViresh Kumar * verify the cpu speed can be set within this limit, which might be 2287bb176f7dSViresh Kumar * different to the first one 2288bb176f7dSViresh Kumar */ 22893a3e9e06SViresh Kumar ret = cpufreq_driver->verify(new_policy); 2290e041c683SAlan Stern if (ret) 2291d9a789c7SRafael J. Wysocki return ret; 22921da177e4SLinus Torvalds 22931da177e4SLinus Torvalds /* notification of the new policy */ 2294e041c683SAlan Stern blocking_notifier_call_chain(&cpufreq_policy_notifier_list, 22953a3e9e06SViresh Kumar CPUFREQ_NOTIFY, new_policy); 22961da177e4SLinus Torvalds 22973a3e9e06SViresh Kumar policy->min = new_policy->min; 22983a3e9e06SViresh Kumar policy->max = new_policy->max; 22991da177e4SLinus Torvalds 23002d06d8c4SDominik Brodowski pr_debug("new min and max freqs are %u - %u kHz\n", 23013a3e9e06SViresh Kumar policy->min, policy->max); 23021da177e4SLinus Torvalds 23031c3d85ddSRafael J. Wysocki if (cpufreq_driver->setpolicy) { 23043a3e9e06SViresh Kumar policy->policy = new_policy->policy; 23052d06d8c4SDominik Brodowski pr_debug("setting range\n"); 2306d9a789c7SRafael J. Wysocki return cpufreq_driver->setpolicy(new_policy); 2307d9a789c7SRafael J. Wysocki } 2308d9a789c7SRafael J. Wysocki 2309d9a789c7SRafael J. Wysocki if (new_policy->governor == policy->governor) 2310d9a789c7SRafael J. Wysocki goto out; 23111da177e4SLinus Torvalds 23122d06d8c4SDominik Brodowski pr_debug("governor switch\n"); 23131da177e4SLinus Torvalds 2314d9a789c7SRafael J. Wysocki /* save old, working values */ 2315d9a789c7SRafael J. Wysocki old_gov = policy->governor; 23161da177e4SLinus Torvalds /* end old governor */ 2317d9a789c7SRafael J. Wysocki if (old_gov) { 23183a3e9e06SViresh Kumar __cpufreq_governor(policy, CPUFREQ_GOV_STOP); 2319ad7722daSviresh kumar up_write(&policy->rwsem); 2320d9a789c7SRafael J. Wysocki __cpufreq_governor(policy, CPUFREQ_GOV_POLICY_EXIT); 2321ad7722daSviresh kumar down_write(&policy->rwsem); 23227bd353a9SViresh Kumar } 23231da177e4SLinus Torvalds 23241da177e4SLinus Torvalds /* start new governor */ 23253a3e9e06SViresh Kumar policy->governor = new_policy->governor; 23263a3e9e06SViresh Kumar if (!__cpufreq_governor(policy, CPUFREQ_GOV_POLICY_INIT)) { 2327d9a789c7SRafael J. Wysocki if (!__cpufreq_governor(policy, CPUFREQ_GOV_START)) 2328d9a789c7SRafael J. Wysocki goto out; 2329d9a789c7SRafael J. Wysocki 2330ad7722daSviresh kumar up_write(&policy->rwsem); 2331d9a789c7SRafael J. Wysocki __cpufreq_governor(policy, CPUFREQ_GOV_POLICY_EXIT); 2332ad7722daSviresh kumar down_write(&policy->rwsem); 2333955ef483SViresh Kumar } 23347bd353a9SViresh Kumar 23351da177e4SLinus Torvalds /* new governor failed, so re-start old one */ 2336d9a789c7SRafael J. Wysocki pr_debug("starting governor %s failed\n", policy->governor->name); 23371da177e4SLinus Torvalds if (old_gov) { 23383a3e9e06SViresh Kumar policy->governor = old_gov; 2339d9a789c7SRafael J. Wysocki __cpufreq_governor(policy, CPUFREQ_GOV_POLICY_INIT); 2340d9a789c7SRafael J. Wysocki __cpufreq_governor(policy, CPUFREQ_GOV_START); 23411da177e4SLinus Torvalds } 23421da177e4SLinus Torvalds 2343d9a789c7SRafael J. Wysocki return -EINVAL; 2344d9a789c7SRafael J. Wysocki 2345d9a789c7SRafael J. Wysocki out: 2346d9a789c7SRafael J. Wysocki pr_debug("governor: change or update limits\n"); 2347d9a789c7SRafael J. Wysocki return __cpufreq_governor(policy, CPUFREQ_GOV_LIMITS); 23481da177e4SLinus Torvalds } 23491da177e4SLinus Torvalds 23501da177e4SLinus Torvalds /** 23511da177e4SLinus Torvalds * cpufreq_update_policy - re-evaluate an existing cpufreq policy 23521da177e4SLinus Torvalds * @cpu: CPU which shall be re-evaluated 23531da177e4SLinus Torvalds * 235425985edcSLucas De Marchi * Useful for policy notifiers which have different necessities 23551da177e4SLinus Torvalds * at different times. 23561da177e4SLinus Torvalds */ 23571da177e4SLinus Torvalds int cpufreq_update_policy(unsigned int cpu) 23581da177e4SLinus Torvalds { 23593a3e9e06SViresh Kumar struct cpufreq_policy *policy = cpufreq_cpu_get(cpu); 23603a3e9e06SViresh Kumar struct cpufreq_policy new_policy; 2361f1829e4aSJulia Lawall int ret; 23621da177e4SLinus Torvalds 2363fefa8ff8SAaron Plattner if (!policy) 2364fefa8ff8SAaron Plattner return -ENODEV; 23651da177e4SLinus Torvalds 2366ad7722daSviresh kumar down_write(&policy->rwsem); 23671da177e4SLinus Torvalds 23682d06d8c4SDominik Brodowski pr_debug("updating policy for CPU %u\n", cpu); 2369d5b73cd8SViresh Kumar memcpy(&new_policy, policy, sizeof(*policy)); 23703a3e9e06SViresh Kumar new_policy.min = policy->user_policy.min; 23713a3e9e06SViresh Kumar new_policy.max = policy->user_policy.max; 23723a3e9e06SViresh Kumar new_policy.policy = policy->user_policy.policy; 23733a3e9e06SViresh Kumar new_policy.governor = policy->user_policy.governor; 23741da177e4SLinus Torvalds 2375bb176f7dSViresh Kumar /* 2376bb176f7dSViresh Kumar * BIOS might change freq behind our back 2377bb176f7dSViresh Kumar * -> ask driver for current freq and notify governors about a change 2378bb176f7dSViresh Kumar */ 23792ed99e39SRafael J. Wysocki if (cpufreq_driver->get && !cpufreq_driver->setpolicy) { 23803a3e9e06SViresh Kumar new_policy.cur = cpufreq_driver->get(cpu); 2381bd0fa9bbSViresh Kumar if (WARN_ON(!new_policy.cur)) { 2382bd0fa9bbSViresh Kumar ret = -EIO; 2383fefa8ff8SAaron Plattner goto unlock; 2384bd0fa9bbSViresh Kumar } 2385bd0fa9bbSViresh Kumar 23863a3e9e06SViresh Kumar if (!policy->cur) { 2387e837f9b5SJoe Perches pr_debug("Driver did not initialize current freq\n"); 23883a3e9e06SViresh Kumar policy->cur = new_policy.cur; 2389a85f7bd3SThomas Renninger } else { 23909c0ebcf7SViresh Kumar if (policy->cur != new_policy.cur && has_target()) 2391a1e1dc41SViresh Kumar cpufreq_out_of_sync(policy, new_policy.cur); 23920961dd0dSThomas Renninger } 2393a85f7bd3SThomas Renninger } 23940961dd0dSThomas Renninger 2395037ce839SViresh Kumar ret = cpufreq_set_policy(policy, &new_policy); 23961da177e4SLinus Torvalds 2397fefa8ff8SAaron Plattner unlock: 2398ad7722daSviresh kumar up_write(&policy->rwsem); 23995a01f2e8SVenkatesh Pallipadi 24003a3e9e06SViresh Kumar cpufreq_cpu_put(policy); 24011da177e4SLinus Torvalds return ret; 24021da177e4SLinus Torvalds } 24031da177e4SLinus Torvalds EXPORT_SYMBOL(cpufreq_update_policy); 24041da177e4SLinus Torvalds 24052760984fSPaul Gortmaker static int cpufreq_cpu_callback(struct notifier_block *nfb, 2406c32b6b8eSAshok Raj unsigned long action, void *hcpu) 2407c32b6b8eSAshok Raj { 2408c32b6b8eSAshok Raj unsigned int cpu = (unsigned long)hcpu; 24098a25a2fdSKay Sievers struct device *dev; 2410c32b6b8eSAshok Raj 24118a25a2fdSKay Sievers dev = get_cpu_device(cpu); 24128a25a2fdSKay Sievers if (dev) { 24135302c3fbSSrivatsa S. Bhat switch (action & ~CPU_TASKS_FROZEN) { 2414c32b6b8eSAshok Raj case CPU_ONLINE: 241523faf0b7SViresh Kumar cpufreq_add_dev(dev, NULL); 2416c32b6b8eSAshok Raj break; 24175302c3fbSSrivatsa S. Bhat 2418c32b6b8eSAshok Raj case CPU_DOWN_PREPARE: 241996bbbe4aSViresh Kumar __cpufreq_remove_dev_prepare(dev, NULL); 24201aee40acSSrivatsa S. Bhat break; 24211aee40acSSrivatsa S. Bhat 24221aee40acSSrivatsa S. Bhat case CPU_POST_DEAD: 242396bbbe4aSViresh Kumar __cpufreq_remove_dev_finish(dev, NULL); 2424c32b6b8eSAshok Raj break; 24255302c3fbSSrivatsa S. Bhat 24265a01f2e8SVenkatesh Pallipadi case CPU_DOWN_FAILED: 242723faf0b7SViresh Kumar cpufreq_add_dev(dev, NULL); 2428c32b6b8eSAshok Raj break; 2429c32b6b8eSAshok Raj } 2430c32b6b8eSAshok Raj } 2431c32b6b8eSAshok Raj return NOTIFY_OK; 2432c32b6b8eSAshok Raj } 2433c32b6b8eSAshok Raj 24349c36f746SNeal Buckendahl static struct notifier_block __refdata cpufreq_cpu_notifier = { 2435c32b6b8eSAshok Raj .notifier_call = cpufreq_cpu_callback, 2436c32b6b8eSAshok Raj }; 24371da177e4SLinus Torvalds 24381da177e4SLinus Torvalds /********************************************************************* 24396f19efc0SLukasz Majewski * BOOST * 24406f19efc0SLukasz Majewski *********************************************************************/ 24416f19efc0SLukasz Majewski static int cpufreq_boost_set_sw(int state) 24426f19efc0SLukasz Majewski { 24436f19efc0SLukasz Majewski struct cpufreq_frequency_table *freq_table; 24446f19efc0SLukasz Majewski struct cpufreq_policy *policy; 24456f19efc0SLukasz Majewski int ret = -EINVAL; 24466f19efc0SLukasz Majewski 2447f963735aSViresh Kumar for_each_active_policy(policy) { 24486f19efc0SLukasz Majewski freq_table = cpufreq_frequency_get_table(policy->cpu); 24496f19efc0SLukasz Majewski if (freq_table) { 24506f19efc0SLukasz Majewski ret = cpufreq_frequency_table_cpuinfo(policy, 24516f19efc0SLukasz Majewski freq_table); 24526f19efc0SLukasz Majewski if (ret) { 24536f19efc0SLukasz Majewski pr_err("%s: Policy frequency update failed\n", 24546f19efc0SLukasz Majewski __func__); 24556f19efc0SLukasz Majewski break; 24566f19efc0SLukasz Majewski } 24576f19efc0SLukasz Majewski policy->user_policy.max = policy->max; 24586f19efc0SLukasz Majewski __cpufreq_governor(policy, CPUFREQ_GOV_LIMITS); 24596f19efc0SLukasz Majewski } 24606f19efc0SLukasz Majewski } 24616f19efc0SLukasz Majewski 24626f19efc0SLukasz Majewski return ret; 24636f19efc0SLukasz Majewski } 24646f19efc0SLukasz Majewski 24656f19efc0SLukasz Majewski int cpufreq_boost_trigger_state(int state) 24666f19efc0SLukasz Majewski { 24676f19efc0SLukasz Majewski unsigned long flags; 24686f19efc0SLukasz Majewski int ret = 0; 24696f19efc0SLukasz Majewski 24706f19efc0SLukasz Majewski if (cpufreq_driver->boost_enabled == state) 24716f19efc0SLukasz Majewski return 0; 24726f19efc0SLukasz Majewski 24736f19efc0SLukasz Majewski write_lock_irqsave(&cpufreq_driver_lock, flags); 24746f19efc0SLukasz Majewski cpufreq_driver->boost_enabled = state; 24756f19efc0SLukasz Majewski write_unlock_irqrestore(&cpufreq_driver_lock, flags); 24766f19efc0SLukasz Majewski 24776f19efc0SLukasz Majewski ret = cpufreq_driver->set_boost(state); 24786f19efc0SLukasz Majewski if (ret) { 24796f19efc0SLukasz Majewski write_lock_irqsave(&cpufreq_driver_lock, flags); 24806f19efc0SLukasz Majewski cpufreq_driver->boost_enabled = !state; 24816f19efc0SLukasz Majewski write_unlock_irqrestore(&cpufreq_driver_lock, flags); 24826f19efc0SLukasz Majewski 2483e837f9b5SJoe Perches pr_err("%s: Cannot %s BOOST\n", 2484e837f9b5SJoe Perches __func__, state ? "enable" : "disable"); 24856f19efc0SLukasz Majewski } 24866f19efc0SLukasz Majewski 24876f19efc0SLukasz Majewski return ret; 24886f19efc0SLukasz Majewski } 24896f19efc0SLukasz Majewski 24906f19efc0SLukasz Majewski int cpufreq_boost_supported(void) 24916f19efc0SLukasz Majewski { 24926f19efc0SLukasz Majewski if (likely(cpufreq_driver)) 24936f19efc0SLukasz Majewski return cpufreq_driver->boost_supported; 24946f19efc0SLukasz Majewski 24956f19efc0SLukasz Majewski return 0; 24966f19efc0SLukasz Majewski } 24976f19efc0SLukasz Majewski EXPORT_SYMBOL_GPL(cpufreq_boost_supported); 24986f19efc0SLukasz Majewski 24996f19efc0SLukasz Majewski int cpufreq_boost_enabled(void) 25006f19efc0SLukasz Majewski { 25016f19efc0SLukasz Majewski return cpufreq_driver->boost_enabled; 25026f19efc0SLukasz Majewski } 25036f19efc0SLukasz Majewski EXPORT_SYMBOL_GPL(cpufreq_boost_enabled); 25046f19efc0SLukasz Majewski 25056f19efc0SLukasz Majewski /********************************************************************* 25061da177e4SLinus Torvalds * REGISTER / UNREGISTER CPUFREQ DRIVER * 25071da177e4SLinus Torvalds *********************************************************************/ 25081da177e4SLinus Torvalds 25091da177e4SLinus Torvalds /** 25101da177e4SLinus Torvalds * cpufreq_register_driver - register a CPU Frequency driver 25111da177e4SLinus Torvalds * @driver_data: A struct cpufreq_driver containing the values# 25121da177e4SLinus Torvalds * submitted by the CPU Frequency driver. 25131da177e4SLinus Torvalds * 25141da177e4SLinus Torvalds * Registers a CPU Frequency driver to this core code. This code 25151da177e4SLinus Torvalds * returns zero on success, -EBUSY when another driver got here first 25161da177e4SLinus Torvalds * (and isn't unregistered in the meantime). 25171da177e4SLinus Torvalds * 25181da177e4SLinus Torvalds */ 2519221dee28SLinus Torvalds int cpufreq_register_driver(struct cpufreq_driver *driver_data) 25201da177e4SLinus Torvalds { 25211da177e4SLinus Torvalds unsigned long flags; 25221da177e4SLinus Torvalds int ret; 25231da177e4SLinus Torvalds 2524a7b422cdSKonrad Rzeszutek Wilk if (cpufreq_disabled()) 2525a7b422cdSKonrad Rzeszutek Wilk return -ENODEV; 2526a7b422cdSKonrad Rzeszutek Wilk 25271da177e4SLinus Torvalds if (!driver_data || !driver_data->verify || !driver_data->init || 25289c0ebcf7SViresh Kumar !(driver_data->setpolicy || driver_data->target_index || 25299832235fSRafael J. Wysocki driver_data->target) || 25309832235fSRafael J. Wysocki (driver_data->setpolicy && (driver_data->target_index || 25311c03a2d0SViresh Kumar driver_data->target)) || 25321c03a2d0SViresh Kumar (!!driver_data->get_intermediate != !!driver_data->target_intermediate)) 25331da177e4SLinus Torvalds return -EINVAL; 25341da177e4SLinus Torvalds 25352d06d8c4SDominik Brodowski pr_debug("trying to register driver %s\n", driver_data->name); 25361da177e4SLinus Torvalds 25370d1857a1SNathan Zimmer write_lock_irqsave(&cpufreq_driver_lock, flags); 25381c3d85ddSRafael J. Wysocki if (cpufreq_driver) { 25390d1857a1SNathan Zimmer write_unlock_irqrestore(&cpufreq_driver_lock, flags); 25404dea5806SYinghai Lu return -EEXIST; 25411da177e4SLinus Torvalds } 25421c3d85ddSRafael J. Wysocki cpufreq_driver = driver_data; 25430d1857a1SNathan Zimmer write_unlock_irqrestore(&cpufreq_driver_lock, flags); 25441da177e4SLinus Torvalds 2545bc68b7dfSViresh Kumar if (driver_data->setpolicy) 2546bc68b7dfSViresh Kumar driver_data->flags |= CPUFREQ_CONST_LOOPS; 2547bc68b7dfSViresh Kumar 25486f19efc0SLukasz Majewski if (cpufreq_boost_supported()) { 25496f19efc0SLukasz Majewski /* 25506f19efc0SLukasz Majewski * Check if driver provides function to enable boost - 25516f19efc0SLukasz Majewski * if not, use cpufreq_boost_set_sw as default 25526f19efc0SLukasz Majewski */ 25536f19efc0SLukasz Majewski if (!cpufreq_driver->set_boost) 25546f19efc0SLukasz Majewski cpufreq_driver->set_boost = cpufreq_boost_set_sw; 25556f19efc0SLukasz Majewski 25566f19efc0SLukasz Majewski ret = cpufreq_sysfs_create_file(&boost.attr); 25576f19efc0SLukasz Majewski if (ret) { 25586f19efc0SLukasz Majewski pr_err("%s: cannot register global BOOST sysfs file\n", 25596f19efc0SLukasz Majewski __func__); 25606f19efc0SLukasz Majewski goto err_null_driver; 25616f19efc0SLukasz Majewski } 25626f19efc0SLukasz Majewski } 25636f19efc0SLukasz Majewski 25648a25a2fdSKay Sievers ret = subsys_interface_register(&cpufreq_interface); 25658f5bc2abSJiri Slaby if (ret) 25666f19efc0SLukasz Majewski goto err_boost_unreg; 25671da177e4SLinus Torvalds 2568ce1bcfe9SViresh Kumar if (!(cpufreq_driver->flags & CPUFREQ_STICKY) && 2569ce1bcfe9SViresh Kumar list_empty(&cpufreq_policy_list)) { 25701da177e4SLinus Torvalds /* if all ->init() calls failed, unregister */ 2571ce1bcfe9SViresh Kumar pr_debug("%s: No CPU initialized for driver %s\n", __func__, 2572e08f5f5bSGautham R Shenoy driver_data->name); 25738a25a2fdSKay Sievers goto err_if_unreg; 25741da177e4SLinus Torvalds } 25751da177e4SLinus Torvalds 257665edc68cSChandra Seetharaman register_hotcpu_notifier(&cpufreq_cpu_notifier); 25772d06d8c4SDominik Brodowski pr_debug("driver %s up and running\n", driver_data->name); 25781da177e4SLinus Torvalds 25798f5bc2abSJiri Slaby return 0; 25808a25a2fdSKay Sievers err_if_unreg: 25818a25a2fdSKay Sievers subsys_interface_unregister(&cpufreq_interface); 25826f19efc0SLukasz Majewski err_boost_unreg: 25836f19efc0SLukasz Majewski if (cpufreq_boost_supported()) 25846f19efc0SLukasz Majewski cpufreq_sysfs_remove_file(&boost.attr); 25858f5bc2abSJiri Slaby err_null_driver: 25860d1857a1SNathan Zimmer write_lock_irqsave(&cpufreq_driver_lock, flags); 25871c3d85ddSRafael J. Wysocki cpufreq_driver = NULL; 25880d1857a1SNathan Zimmer write_unlock_irqrestore(&cpufreq_driver_lock, flags); 25894d34a67dSDave Jones return ret; 25901da177e4SLinus Torvalds } 25911da177e4SLinus Torvalds EXPORT_SYMBOL_GPL(cpufreq_register_driver); 25921da177e4SLinus Torvalds 25931da177e4SLinus Torvalds /** 25941da177e4SLinus Torvalds * cpufreq_unregister_driver - unregister the current CPUFreq driver 25951da177e4SLinus Torvalds * 25961da177e4SLinus Torvalds * Unregister the current CPUFreq driver. Only call this if you have 25971da177e4SLinus Torvalds * the right to do so, i.e. if you have succeeded in initialising before! 25981da177e4SLinus Torvalds * Returns zero if successful, and -EINVAL if the cpufreq_driver is 25991da177e4SLinus Torvalds * currently not initialised. 26001da177e4SLinus Torvalds */ 2601221dee28SLinus Torvalds int cpufreq_unregister_driver(struct cpufreq_driver *driver) 26021da177e4SLinus Torvalds { 26031da177e4SLinus Torvalds unsigned long flags; 26041da177e4SLinus Torvalds 26051c3d85ddSRafael J. Wysocki if (!cpufreq_driver || (driver != cpufreq_driver)) 26061da177e4SLinus Torvalds return -EINVAL; 26071da177e4SLinus Torvalds 26082d06d8c4SDominik Brodowski pr_debug("unregistering driver %s\n", driver->name); 26091da177e4SLinus Torvalds 26108a25a2fdSKay Sievers subsys_interface_unregister(&cpufreq_interface); 26116f19efc0SLukasz Majewski if (cpufreq_boost_supported()) 26126f19efc0SLukasz Majewski cpufreq_sysfs_remove_file(&boost.attr); 26136f19efc0SLukasz Majewski 261465edc68cSChandra Seetharaman unregister_hotcpu_notifier(&cpufreq_cpu_notifier); 26151da177e4SLinus Torvalds 26166eed9404SViresh Kumar down_write(&cpufreq_rwsem); 26170d1857a1SNathan Zimmer write_lock_irqsave(&cpufreq_driver_lock, flags); 26186eed9404SViresh Kumar 26191c3d85ddSRafael J. Wysocki cpufreq_driver = NULL; 26206eed9404SViresh Kumar 26210d1857a1SNathan Zimmer write_unlock_irqrestore(&cpufreq_driver_lock, flags); 26226eed9404SViresh Kumar up_write(&cpufreq_rwsem); 26231da177e4SLinus Torvalds 26241da177e4SLinus Torvalds return 0; 26251da177e4SLinus Torvalds } 26261da177e4SLinus Torvalds EXPORT_SYMBOL_GPL(cpufreq_unregister_driver); 26275a01f2e8SVenkatesh Pallipadi 262890de2a4aSDoug Anderson /* 262990de2a4aSDoug Anderson * Stop cpufreq at shutdown to make sure it isn't holding any locks 263090de2a4aSDoug Anderson * or mutexes when secondary CPUs are halted. 263190de2a4aSDoug Anderson */ 263290de2a4aSDoug Anderson static struct syscore_ops cpufreq_syscore_ops = { 263390de2a4aSDoug Anderson .shutdown = cpufreq_suspend, 263490de2a4aSDoug Anderson }; 263590de2a4aSDoug Anderson 26365a01f2e8SVenkatesh Pallipadi static int __init cpufreq_core_init(void) 26375a01f2e8SVenkatesh Pallipadi { 2638a7b422cdSKonrad Rzeszutek Wilk if (cpufreq_disabled()) 2639a7b422cdSKonrad Rzeszutek Wilk return -ENODEV; 2640a7b422cdSKonrad Rzeszutek Wilk 26412361be23SViresh Kumar cpufreq_global_kobject = kobject_create(); 26428aa84ad8SThomas Renninger BUG_ON(!cpufreq_global_kobject); 26438aa84ad8SThomas Renninger 264490de2a4aSDoug Anderson register_syscore_ops(&cpufreq_syscore_ops); 264590de2a4aSDoug Anderson 26465a01f2e8SVenkatesh Pallipadi return 0; 26475a01f2e8SVenkatesh Pallipadi } 26485a01f2e8SVenkatesh Pallipadi core_initcall(cpufreq_core_init); 2649