11da177e4SLinus Torvalds /* 21da177e4SLinus Torvalds * linux/drivers/cpufreq/cpufreq.c 31da177e4SLinus Torvalds * 41da177e4SLinus Torvalds * Copyright (C) 2001 Russell King 51da177e4SLinus Torvalds * (C) 2002 - 2003 Dominik Brodowski <linux@brodo.de> 6bb176f7dSViresh Kumar * (C) 2013 Viresh Kumar <viresh.kumar@linaro.org> 71da177e4SLinus Torvalds * 8c32b6b8eSAshok Raj * Oct 2005 - Ashok Raj <ashok.raj@intel.com> 9c32b6b8eSAshok Raj * Added handling for CPU hotplug 108ff69732SDave Jones * Feb 2006 - Jacob Shin <jacob.shin@amd.com> 118ff69732SDave Jones * Fix handling for CPU hotplug -- affected CPUs 12c32b6b8eSAshok Raj * 131da177e4SLinus Torvalds * This program is free software; you can redistribute it and/or modify 141da177e4SLinus Torvalds * it under the terms of the GNU General Public License version 2 as 151da177e4SLinus Torvalds * published by the Free Software Foundation. 161da177e4SLinus Torvalds */ 171da177e4SLinus Torvalds 18db701151SViresh Kumar #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 19db701151SViresh Kumar 205ff0a268SViresh Kumar #include <linux/cpu.h> 211da177e4SLinus Torvalds #include <linux/cpufreq.h> 221da177e4SLinus Torvalds #include <linux/delay.h> 231da177e4SLinus Torvalds #include <linux/device.h> 245ff0a268SViresh Kumar #include <linux/init.h> 255ff0a268SViresh Kumar #include <linux/kernel_stat.h> 265ff0a268SViresh Kumar #include <linux/module.h> 273fc54d37Sakpm@osdl.org #include <linux/mutex.h> 285ff0a268SViresh Kumar #include <linux/slab.h> 292f0aea93SViresh Kumar #include <linux/suspend.h> 3090de2a4aSDoug Anderson #include <linux/syscore_ops.h> 315ff0a268SViresh Kumar #include <linux/tick.h> 326f4f2723SThomas Renninger #include <trace/events/power.h> 336f4f2723SThomas Renninger 34b4f0676fSViresh Kumar static LIST_HEAD(cpufreq_policy_list); 35f963735aSViresh Kumar 36f963735aSViresh Kumar static inline bool policy_is_inactive(struct cpufreq_policy *policy) 37f963735aSViresh Kumar { 38f963735aSViresh Kumar return cpumask_empty(policy->cpus); 39f963735aSViresh Kumar } 40f963735aSViresh Kumar 41f963735aSViresh Kumar static bool suitable_policy(struct cpufreq_policy *policy, bool active) 42f963735aSViresh Kumar { 43f963735aSViresh Kumar return active == !policy_is_inactive(policy); 44f963735aSViresh Kumar } 45f963735aSViresh Kumar 46f963735aSViresh Kumar /* Finds Next Acive/Inactive policy */ 47f963735aSViresh Kumar static struct cpufreq_policy *next_policy(struct cpufreq_policy *policy, 48f963735aSViresh Kumar bool active) 49f963735aSViresh Kumar { 50f963735aSViresh Kumar do { 51f963735aSViresh Kumar policy = list_next_entry(policy, policy_list); 52f963735aSViresh Kumar 53f963735aSViresh Kumar /* No more policies in the list */ 54f963735aSViresh Kumar if (&policy->policy_list == &cpufreq_policy_list) 55f963735aSViresh Kumar return NULL; 56f963735aSViresh Kumar } while (!suitable_policy(policy, active)); 57f963735aSViresh Kumar 58f963735aSViresh Kumar return policy; 59f963735aSViresh Kumar } 60f963735aSViresh Kumar 61f963735aSViresh Kumar static struct cpufreq_policy *first_policy(bool active) 62f963735aSViresh Kumar { 63f963735aSViresh Kumar struct cpufreq_policy *policy; 64f963735aSViresh Kumar 65f963735aSViresh Kumar /* No policies in the list */ 66f963735aSViresh Kumar if (list_empty(&cpufreq_policy_list)) 67f963735aSViresh Kumar return NULL; 68f963735aSViresh Kumar 69f963735aSViresh Kumar policy = list_first_entry(&cpufreq_policy_list, typeof(*policy), 70f963735aSViresh Kumar policy_list); 71f963735aSViresh Kumar 72f963735aSViresh Kumar if (!suitable_policy(policy, active)) 73f963735aSViresh Kumar policy = next_policy(policy, active); 74f963735aSViresh Kumar 75f963735aSViresh Kumar return policy; 76f963735aSViresh Kumar } 77f963735aSViresh Kumar 78f963735aSViresh Kumar /* Macros to iterate over CPU policies */ 79f963735aSViresh Kumar #define for_each_suitable_policy(__policy, __active) \ 80f963735aSViresh Kumar for (__policy = first_policy(__active); \ 81f963735aSViresh Kumar __policy; \ 82f963735aSViresh Kumar __policy = next_policy(__policy, __active)) 83f963735aSViresh Kumar 84f963735aSViresh Kumar #define for_each_active_policy(__policy) \ 85f963735aSViresh Kumar for_each_suitable_policy(__policy, true) 86f963735aSViresh Kumar #define for_each_inactive_policy(__policy) \ 87f963735aSViresh Kumar for_each_suitable_policy(__policy, false) 88f963735aSViresh Kumar 89b4f0676fSViresh Kumar #define for_each_policy(__policy) \ 90b4f0676fSViresh Kumar list_for_each_entry(__policy, &cpufreq_policy_list, policy_list) 91b4f0676fSViresh Kumar 92f7b27061SViresh Kumar /* Iterate over governors */ 93f7b27061SViresh Kumar static LIST_HEAD(cpufreq_governor_list); 94f7b27061SViresh Kumar #define for_each_governor(__governor) \ 95f7b27061SViresh Kumar list_for_each_entry(__governor, &cpufreq_governor_list, governor_list) 96f7b27061SViresh Kumar 971da177e4SLinus Torvalds /** 98cd878479SDave Jones * The "cpufreq driver" - the arch- or hardware-dependent low 991da177e4SLinus Torvalds * level driver of CPUFreq support, and its spinlock. This lock 1001da177e4SLinus Torvalds * also protects the cpufreq_cpu_data array. 1011da177e4SLinus Torvalds */ 1021c3d85ddSRafael J. Wysocki static struct cpufreq_driver *cpufreq_driver; 1037a6aedfaSMike Travis static DEFINE_PER_CPU(struct cpufreq_policy *, cpufreq_cpu_data); 104bb176f7dSViresh Kumar static DEFINE_RWLOCK(cpufreq_driver_lock); 1056f1e4efdSJane Li DEFINE_MUTEX(cpufreq_governor_lock); 106bb176f7dSViresh Kumar 1072f0aea93SViresh Kumar /* Flag to suspend/resume CPUFreq governors */ 1082f0aea93SViresh Kumar static bool cpufreq_suspended; 1091da177e4SLinus Torvalds 1109c0ebcf7SViresh Kumar static inline bool has_target(void) 1119c0ebcf7SViresh Kumar { 1129c0ebcf7SViresh Kumar return cpufreq_driver->target_index || cpufreq_driver->target; 1139c0ebcf7SViresh Kumar } 1149c0ebcf7SViresh Kumar 1155a01f2e8SVenkatesh Pallipadi /* 1166eed9404SViresh Kumar * rwsem to guarantee that cpufreq driver module doesn't unload during critical 1176eed9404SViresh Kumar * sections 1186eed9404SViresh Kumar */ 1196eed9404SViresh Kumar static DECLARE_RWSEM(cpufreq_rwsem); 1206eed9404SViresh Kumar 1211da177e4SLinus Torvalds /* internal prototypes */ 12229464f28SDave Jones static int __cpufreq_governor(struct cpufreq_policy *policy, 12329464f28SDave Jones unsigned int event); 124d92d50a4SViresh Kumar static unsigned int __cpufreq_get(struct cpufreq_policy *policy); 12565f27f38SDavid Howells static void handle_update(struct work_struct *work); 1261da177e4SLinus Torvalds 1271da177e4SLinus Torvalds /** 1281da177e4SLinus Torvalds * Two notifier lists: the "policy" list is involved in the 1291da177e4SLinus Torvalds * validation process for a new CPU frequency policy; the 1301da177e4SLinus Torvalds * "transition" list for kernel code that needs to handle 1311da177e4SLinus Torvalds * changes to devices when the CPU clock speed changes. 1321da177e4SLinus Torvalds * The mutex locks both lists. 1331da177e4SLinus Torvalds */ 134e041c683SAlan Stern static BLOCKING_NOTIFIER_HEAD(cpufreq_policy_notifier_list); 135b4dfdbb3SAlan Stern static struct srcu_notifier_head cpufreq_transition_notifier_list; 1361da177e4SLinus Torvalds 13774212ca4SCesar Eduardo Barros static bool init_cpufreq_transition_notifier_list_called; 138b4dfdbb3SAlan Stern static int __init init_cpufreq_transition_notifier_list(void) 139b4dfdbb3SAlan Stern { 140b4dfdbb3SAlan Stern srcu_init_notifier_head(&cpufreq_transition_notifier_list); 14174212ca4SCesar Eduardo Barros init_cpufreq_transition_notifier_list_called = true; 142b4dfdbb3SAlan Stern return 0; 143b4dfdbb3SAlan Stern } 144b3438f82SLinus Torvalds pure_initcall(init_cpufreq_transition_notifier_list); 1451da177e4SLinus Torvalds 146a7b422cdSKonrad Rzeszutek Wilk static int off __read_mostly; 147da584455SViresh Kumar static int cpufreq_disabled(void) 148a7b422cdSKonrad Rzeszutek Wilk { 149a7b422cdSKonrad Rzeszutek Wilk return off; 150a7b422cdSKonrad Rzeszutek Wilk } 151a7b422cdSKonrad Rzeszutek Wilk void disable_cpufreq(void) 152a7b422cdSKonrad Rzeszutek Wilk { 153a7b422cdSKonrad Rzeszutek Wilk off = 1; 154a7b422cdSKonrad Rzeszutek Wilk } 1553fc54d37Sakpm@osdl.org static DEFINE_MUTEX(cpufreq_governor_mutex); 1561da177e4SLinus Torvalds 1574d5dcc42SViresh Kumar bool have_governor_per_policy(void) 1584d5dcc42SViresh Kumar { 1590b981e70SViresh Kumar return !!(cpufreq_driver->flags & CPUFREQ_HAVE_GOVERNOR_PER_POLICY); 1604d5dcc42SViresh Kumar } 1613f869d6dSViresh Kumar EXPORT_SYMBOL_GPL(have_governor_per_policy); 1624d5dcc42SViresh Kumar 163944e9a03SViresh Kumar struct kobject *get_governor_parent_kobj(struct cpufreq_policy *policy) 164944e9a03SViresh Kumar { 165944e9a03SViresh Kumar if (have_governor_per_policy()) 166944e9a03SViresh Kumar return &policy->kobj; 167944e9a03SViresh Kumar else 168944e9a03SViresh Kumar return cpufreq_global_kobject; 169944e9a03SViresh Kumar } 170944e9a03SViresh Kumar EXPORT_SYMBOL_GPL(get_governor_parent_kobj); 171944e9a03SViresh Kumar 1725a31d594SViresh Kumar struct cpufreq_frequency_table *cpufreq_frequency_get_table(unsigned int cpu) 1735a31d594SViresh Kumar { 1745a31d594SViresh Kumar struct cpufreq_policy *policy = per_cpu(cpufreq_cpu_data, cpu); 1755a31d594SViresh Kumar 1765a31d594SViresh Kumar return policy && !policy_is_inactive(policy) ? 1775a31d594SViresh Kumar policy->freq_table : NULL; 1785a31d594SViresh Kumar } 1795a31d594SViresh Kumar EXPORT_SYMBOL_GPL(cpufreq_frequency_get_table); 1805a31d594SViresh Kumar 18172a4ce34SViresh Kumar static inline u64 get_cpu_idle_time_jiffy(unsigned int cpu, u64 *wall) 18272a4ce34SViresh Kumar { 18372a4ce34SViresh Kumar u64 idle_time; 18472a4ce34SViresh Kumar u64 cur_wall_time; 18572a4ce34SViresh Kumar u64 busy_time; 18672a4ce34SViresh Kumar 18772a4ce34SViresh Kumar cur_wall_time = jiffies64_to_cputime64(get_jiffies_64()); 18872a4ce34SViresh Kumar 18972a4ce34SViresh Kumar busy_time = kcpustat_cpu(cpu).cpustat[CPUTIME_USER]; 19072a4ce34SViresh Kumar busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_SYSTEM]; 19172a4ce34SViresh Kumar busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_IRQ]; 19272a4ce34SViresh Kumar busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_SOFTIRQ]; 19372a4ce34SViresh Kumar busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_STEAL]; 19472a4ce34SViresh Kumar busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_NICE]; 19572a4ce34SViresh Kumar 19672a4ce34SViresh Kumar idle_time = cur_wall_time - busy_time; 19772a4ce34SViresh Kumar if (wall) 19872a4ce34SViresh Kumar *wall = cputime_to_usecs(cur_wall_time); 19972a4ce34SViresh Kumar 20072a4ce34SViresh Kumar return cputime_to_usecs(idle_time); 20172a4ce34SViresh Kumar } 20272a4ce34SViresh Kumar 20372a4ce34SViresh Kumar u64 get_cpu_idle_time(unsigned int cpu, u64 *wall, int io_busy) 20472a4ce34SViresh Kumar { 20572a4ce34SViresh Kumar u64 idle_time = get_cpu_idle_time_us(cpu, io_busy ? wall : NULL); 20672a4ce34SViresh Kumar 20772a4ce34SViresh Kumar if (idle_time == -1ULL) 20872a4ce34SViresh Kumar return get_cpu_idle_time_jiffy(cpu, wall); 20972a4ce34SViresh Kumar else if (!io_busy) 21072a4ce34SViresh Kumar idle_time += get_cpu_iowait_time_us(cpu, wall); 21172a4ce34SViresh Kumar 21272a4ce34SViresh Kumar return idle_time; 21372a4ce34SViresh Kumar } 21472a4ce34SViresh Kumar EXPORT_SYMBOL_GPL(get_cpu_idle_time); 21572a4ce34SViresh Kumar 21670e9e778SViresh Kumar /* 21770e9e778SViresh Kumar * This is a generic cpufreq init() routine which can be used by cpufreq 21870e9e778SViresh Kumar * drivers of SMP systems. It will do following: 21970e9e778SViresh Kumar * - validate & show freq table passed 22070e9e778SViresh Kumar * - set policies transition latency 22170e9e778SViresh Kumar * - policy->cpus with all possible CPUs 22270e9e778SViresh Kumar */ 22370e9e778SViresh Kumar int cpufreq_generic_init(struct cpufreq_policy *policy, 22470e9e778SViresh Kumar struct cpufreq_frequency_table *table, 22570e9e778SViresh Kumar unsigned int transition_latency) 22670e9e778SViresh Kumar { 22770e9e778SViresh Kumar int ret; 22870e9e778SViresh Kumar 22970e9e778SViresh Kumar ret = cpufreq_table_validate_and_show(policy, table); 23070e9e778SViresh Kumar if (ret) { 23170e9e778SViresh Kumar pr_err("%s: invalid frequency table: %d\n", __func__, ret); 23270e9e778SViresh Kumar return ret; 23370e9e778SViresh Kumar } 23470e9e778SViresh Kumar 23570e9e778SViresh Kumar policy->cpuinfo.transition_latency = transition_latency; 23670e9e778SViresh Kumar 23770e9e778SViresh Kumar /* 23858405af6SShailendra Verma * The driver only supports the SMP configuration where all processors 23970e9e778SViresh Kumar * share the clock and voltage and clock. 24070e9e778SViresh Kumar */ 24170e9e778SViresh Kumar cpumask_setall(policy->cpus); 24270e9e778SViresh Kumar 24370e9e778SViresh Kumar return 0; 24470e9e778SViresh Kumar } 24570e9e778SViresh Kumar EXPORT_SYMBOL_GPL(cpufreq_generic_init); 24670e9e778SViresh Kumar 247988bed09SViresh Kumar /* Only for cpufreq core internal use */ 248988bed09SViresh Kumar struct cpufreq_policy *cpufreq_cpu_get_raw(unsigned int cpu) 249652ed95dSViresh Kumar { 250652ed95dSViresh Kumar struct cpufreq_policy *policy = per_cpu(cpufreq_cpu_data, cpu); 251652ed95dSViresh Kumar 252988bed09SViresh Kumar return policy && cpumask_test_cpu(cpu, policy->cpus) ? policy : NULL; 253988bed09SViresh Kumar } 254988bed09SViresh Kumar 255988bed09SViresh Kumar unsigned int cpufreq_generic_get(unsigned int cpu) 256988bed09SViresh Kumar { 257988bed09SViresh Kumar struct cpufreq_policy *policy = cpufreq_cpu_get_raw(cpu); 258988bed09SViresh Kumar 259652ed95dSViresh Kumar if (!policy || IS_ERR(policy->clk)) { 260e837f9b5SJoe Perches pr_err("%s: No %s associated to cpu: %d\n", 261e837f9b5SJoe Perches __func__, policy ? "clk" : "policy", cpu); 262652ed95dSViresh Kumar return 0; 263652ed95dSViresh Kumar } 264652ed95dSViresh Kumar 265652ed95dSViresh Kumar return clk_get_rate(policy->clk) / 1000; 266652ed95dSViresh Kumar } 267652ed95dSViresh Kumar EXPORT_SYMBOL_GPL(cpufreq_generic_get); 268652ed95dSViresh Kumar 26950e9c852SViresh Kumar /** 27050e9c852SViresh Kumar * cpufreq_cpu_get: returns policy for a cpu and marks it busy. 27150e9c852SViresh Kumar * 27250e9c852SViresh Kumar * @cpu: cpu to find policy for. 27350e9c852SViresh Kumar * 27450e9c852SViresh Kumar * This returns policy for 'cpu', returns NULL if it doesn't exist. 27550e9c852SViresh Kumar * It also increments the kobject reference count to mark it busy and so would 27650e9c852SViresh Kumar * require a corresponding call to cpufreq_cpu_put() to decrement it back. 27750e9c852SViresh Kumar * If corresponding call cpufreq_cpu_put() isn't made, the policy wouldn't be 27850e9c852SViresh Kumar * freed as that depends on the kobj count. 27950e9c852SViresh Kumar * 28050e9c852SViresh Kumar * It also takes a read-lock of 'cpufreq_rwsem' and doesn't put it back if a 28150e9c852SViresh Kumar * valid policy is found. This is done to make sure the driver doesn't get 28250e9c852SViresh Kumar * unregistered while the policy is being used. 28350e9c852SViresh Kumar * 28450e9c852SViresh Kumar * Return: A valid policy on success, otherwise NULL on failure. 28550e9c852SViresh Kumar */ 2866eed9404SViresh Kumar struct cpufreq_policy *cpufreq_cpu_get(unsigned int cpu) 2871da177e4SLinus Torvalds { 2886eed9404SViresh Kumar struct cpufreq_policy *policy = NULL; 2891da177e4SLinus Torvalds unsigned long flags; 2901da177e4SLinus Torvalds 2911b947c90SViresh Kumar if (WARN_ON(cpu >= nr_cpu_ids)) 2926eed9404SViresh Kumar return NULL; 2936eed9404SViresh Kumar 2946eed9404SViresh Kumar if (!down_read_trylock(&cpufreq_rwsem)) 2956eed9404SViresh Kumar return NULL; 2961da177e4SLinus Torvalds 2971da177e4SLinus Torvalds /* get the cpufreq driver */ 2980d1857a1SNathan Zimmer read_lock_irqsave(&cpufreq_driver_lock, flags); 2991da177e4SLinus Torvalds 3006eed9404SViresh Kumar if (cpufreq_driver) { 3011da177e4SLinus Torvalds /* get the CPU */ 302988bed09SViresh Kumar policy = cpufreq_cpu_get_raw(cpu); 3036eed9404SViresh Kumar if (policy) 3046eed9404SViresh Kumar kobject_get(&policy->kobj); 3056eed9404SViresh Kumar } 3066eed9404SViresh Kumar 3076eed9404SViresh Kumar read_unlock_irqrestore(&cpufreq_driver_lock, flags); 3081da177e4SLinus Torvalds 3093a3e9e06SViresh Kumar if (!policy) 3106eed9404SViresh Kumar up_read(&cpufreq_rwsem); 3111da177e4SLinus Torvalds 3123a3e9e06SViresh Kumar return policy; 313a9144436SStephen Boyd } 3141da177e4SLinus Torvalds EXPORT_SYMBOL_GPL(cpufreq_cpu_get); 3151da177e4SLinus Torvalds 31650e9c852SViresh Kumar /** 31750e9c852SViresh Kumar * cpufreq_cpu_put: Decrements the usage count of a policy 31850e9c852SViresh Kumar * 31950e9c852SViresh Kumar * @policy: policy earlier returned by cpufreq_cpu_get(). 32050e9c852SViresh Kumar * 32150e9c852SViresh Kumar * This decrements the kobject reference count incremented earlier by calling 32250e9c852SViresh Kumar * cpufreq_cpu_get(). 32350e9c852SViresh Kumar * 32450e9c852SViresh Kumar * It also drops the read-lock of 'cpufreq_rwsem' taken at cpufreq_cpu_get(). 32550e9c852SViresh Kumar */ 3263a3e9e06SViresh Kumar void cpufreq_cpu_put(struct cpufreq_policy *policy) 327a9144436SStephen Boyd { 3286eed9404SViresh Kumar kobject_put(&policy->kobj); 3296eed9404SViresh Kumar up_read(&cpufreq_rwsem); 330a9144436SStephen Boyd } 3311da177e4SLinus Torvalds EXPORT_SYMBOL_GPL(cpufreq_cpu_put); 3321da177e4SLinus Torvalds 3331da177e4SLinus Torvalds /********************************************************************* 3341da177e4SLinus Torvalds * EXTERNALLY AFFECTING FREQUENCY CHANGES * 3351da177e4SLinus Torvalds *********************************************************************/ 3361da177e4SLinus Torvalds 3371da177e4SLinus Torvalds /** 3381da177e4SLinus Torvalds * adjust_jiffies - adjust the system "loops_per_jiffy" 3391da177e4SLinus Torvalds * 3401da177e4SLinus Torvalds * This function alters the system "loops_per_jiffy" for the clock 3411da177e4SLinus Torvalds * speed change. Note that loops_per_jiffy cannot be updated on SMP 3421da177e4SLinus Torvalds * systems as each CPU might be scaled differently. So, use the arch 3431da177e4SLinus Torvalds * per-CPU loops_per_jiffy value wherever possible. 3441da177e4SLinus Torvalds */ 34539c132eeSViresh Kumar static void adjust_jiffies(unsigned long val, struct cpufreq_freqs *ci) 34639c132eeSViresh Kumar { 3471da177e4SLinus Torvalds #ifndef CONFIG_SMP 3481da177e4SLinus Torvalds static unsigned long l_p_j_ref; 3491da177e4SLinus Torvalds static unsigned int l_p_j_ref_freq; 3501da177e4SLinus Torvalds 3511da177e4SLinus Torvalds if (ci->flags & CPUFREQ_CONST_LOOPS) 3521da177e4SLinus Torvalds return; 3531da177e4SLinus Torvalds 3541da177e4SLinus Torvalds if (!l_p_j_ref_freq) { 3551da177e4SLinus Torvalds l_p_j_ref = loops_per_jiffy; 3561da177e4SLinus Torvalds l_p_j_ref_freq = ci->old; 357e837f9b5SJoe Perches pr_debug("saving %lu as reference value for loops_per_jiffy; freq is %u kHz\n", 358e837f9b5SJoe Perches l_p_j_ref, l_p_j_ref_freq); 3591da177e4SLinus Torvalds } 3600b443eadSViresh Kumar if (val == CPUFREQ_POSTCHANGE && ci->old != ci->new) { 361e08f5f5bSGautham R Shenoy loops_per_jiffy = cpufreq_scale(l_p_j_ref, l_p_j_ref_freq, 362e08f5f5bSGautham R Shenoy ci->new); 363e837f9b5SJoe Perches pr_debug("scaling loops_per_jiffy to %lu for frequency %u kHz\n", 364e837f9b5SJoe Perches loops_per_jiffy, ci->new); 3651da177e4SLinus Torvalds } 3661da177e4SLinus Torvalds #endif 36739c132eeSViresh Kumar } 3681da177e4SLinus Torvalds 3690956df9cSViresh Kumar static void __cpufreq_notify_transition(struct cpufreq_policy *policy, 370b43a7ffbSViresh Kumar struct cpufreq_freqs *freqs, unsigned int state) 3711da177e4SLinus Torvalds { 3721da177e4SLinus Torvalds BUG_ON(irqs_disabled()); 3731da177e4SLinus Torvalds 374d5aaffa9SDirk Brandewie if (cpufreq_disabled()) 375d5aaffa9SDirk Brandewie return; 376d5aaffa9SDirk Brandewie 3771c3d85ddSRafael J. Wysocki freqs->flags = cpufreq_driver->flags; 3782d06d8c4SDominik Brodowski pr_debug("notification %u of frequency transition to %u kHz\n", 379e4472cb3SDave Jones state, freqs->new); 3801da177e4SLinus Torvalds 3811da177e4SLinus Torvalds switch (state) { 382e4472cb3SDave Jones 3831da177e4SLinus Torvalds case CPUFREQ_PRECHANGE: 384e4472cb3SDave Jones /* detect if the driver reported a value as "old frequency" 385e4472cb3SDave Jones * which is not equal to what the cpufreq core thinks is 386e4472cb3SDave Jones * "old frequency". 3871da177e4SLinus Torvalds */ 3881c3d85ddSRafael J. Wysocki if (!(cpufreq_driver->flags & CPUFREQ_CONST_LOOPS)) { 389e4472cb3SDave Jones if ((policy) && (policy->cpu == freqs->cpu) && 390e4472cb3SDave Jones (policy->cur) && (policy->cur != freqs->old)) { 391e837f9b5SJoe Perches pr_debug("Warning: CPU frequency is %u, cpufreq assumed %u kHz\n", 392e4472cb3SDave Jones freqs->old, policy->cur); 393e4472cb3SDave Jones freqs->old = policy->cur; 3941da177e4SLinus Torvalds } 3951da177e4SLinus Torvalds } 396b4dfdbb3SAlan Stern srcu_notifier_call_chain(&cpufreq_transition_notifier_list, 397e4472cb3SDave Jones CPUFREQ_PRECHANGE, freqs); 3981da177e4SLinus Torvalds adjust_jiffies(CPUFREQ_PRECHANGE, freqs); 3991da177e4SLinus Torvalds break; 400e4472cb3SDave Jones 4011da177e4SLinus Torvalds case CPUFREQ_POSTCHANGE: 4021da177e4SLinus Torvalds adjust_jiffies(CPUFREQ_POSTCHANGE, freqs); 403e837f9b5SJoe Perches pr_debug("FREQ: %lu - CPU: %lu\n", 404e837f9b5SJoe Perches (unsigned long)freqs->new, (unsigned long)freqs->cpu); 40525e41933SThomas Renninger trace_cpu_frequency(freqs->new, freqs->cpu); 406b4dfdbb3SAlan Stern srcu_notifier_call_chain(&cpufreq_transition_notifier_list, 407e4472cb3SDave Jones CPUFREQ_POSTCHANGE, freqs); 408e4472cb3SDave Jones if (likely(policy) && likely(policy->cpu == freqs->cpu)) 409e4472cb3SDave Jones policy->cur = freqs->new; 4101da177e4SLinus Torvalds break; 4111da177e4SLinus Torvalds } 4121da177e4SLinus Torvalds } 413bb176f7dSViresh Kumar 414b43a7ffbSViresh Kumar /** 415b43a7ffbSViresh Kumar * cpufreq_notify_transition - call notifier chain and adjust_jiffies 416b43a7ffbSViresh Kumar * on frequency transition. 417b43a7ffbSViresh Kumar * 418b43a7ffbSViresh Kumar * This function calls the transition notifiers and the "adjust_jiffies" 419b43a7ffbSViresh Kumar * function. It is called twice on all CPU frequency changes that have 420b43a7ffbSViresh Kumar * external effects. 421b43a7ffbSViresh Kumar */ 422236a9800SViresh Kumar static void cpufreq_notify_transition(struct cpufreq_policy *policy, 423b43a7ffbSViresh Kumar struct cpufreq_freqs *freqs, unsigned int state) 424b43a7ffbSViresh Kumar { 425b43a7ffbSViresh Kumar for_each_cpu(freqs->cpu, policy->cpus) 426b43a7ffbSViresh Kumar __cpufreq_notify_transition(policy, freqs, state); 427b43a7ffbSViresh Kumar } 4281da177e4SLinus Torvalds 429f7ba3b41SViresh Kumar /* Do post notifications when there are chances that transition has failed */ 430236a9800SViresh Kumar static void cpufreq_notify_post_transition(struct cpufreq_policy *policy, 431f7ba3b41SViresh Kumar struct cpufreq_freqs *freqs, int transition_failed) 432f7ba3b41SViresh Kumar { 433f7ba3b41SViresh Kumar cpufreq_notify_transition(policy, freqs, CPUFREQ_POSTCHANGE); 434f7ba3b41SViresh Kumar if (!transition_failed) 435f7ba3b41SViresh Kumar return; 436f7ba3b41SViresh Kumar 437f7ba3b41SViresh Kumar swap(freqs->old, freqs->new); 438f7ba3b41SViresh Kumar cpufreq_notify_transition(policy, freqs, CPUFREQ_PRECHANGE); 439f7ba3b41SViresh Kumar cpufreq_notify_transition(policy, freqs, CPUFREQ_POSTCHANGE); 440f7ba3b41SViresh Kumar } 441f7ba3b41SViresh Kumar 44212478cf0SSrivatsa S. Bhat void cpufreq_freq_transition_begin(struct cpufreq_policy *policy, 44312478cf0SSrivatsa S. Bhat struct cpufreq_freqs *freqs) 44412478cf0SSrivatsa S. Bhat { 445ca654dc3SSrivatsa S. Bhat 446ca654dc3SSrivatsa S. Bhat /* 447ca654dc3SSrivatsa S. Bhat * Catch double invocations of _begin() which lead to self-deadlock. 448ca654dc3SSrivatsa S. Bhat * ASYNC_NOTIFICATION drivers are left out because the cpufreq core 449ca654dc3SSrivatsa S. Bhat * doesn't invoke _begin() on their behalf, and hence the chances of 450ca654dc3SSrivatsa S. Bhat * double invocations are very low. Moreover, there are scenarios 451ca654dc3SSrivatsa S. Bhat * where these checks can emit false-positive warnings in these 452ca654dc3SSrivatsa S. Bhat * drivers; so we avoid that by skipping them altogether. 453ca654dc3SSrivatsa S. Bhat */ 454ca654dc3SSrivatsa S. Bhat WARN_ON(!(cpufreq_driver->flags & CPUFREQ_ASYNC_NOTIFICATION) 455ca654dc3SSrivatsa S. Bhat && current == policy->transition_task); 456ca654dc3SSrivatsa S. Bhat 45712478cf0SSrivatsa S. Bhat wait: 45812478cf0SSrivatsa S. Bhat wait_event(policy->transition_wait, !policy->transition_ongoing); 45912478cf0SSrivatsa S. Bhat 46012478cf0SSrivatsa S. Bhat spin_lock(&policy->transition_lock); 46112478cf0SSrivatsa S. Bhat 46212478cf0SSrivatsa S. Bhat if (unlikely(policy->transition_ongoing)) { 46312478cf0SSrivatsa S. Bhat spin_unlock(&policy->transition_lock); 46412478cf0SSrivatsa S. Bhat goto wait; 46512478cf0SSrivatsa S. Bhat } 46612478cf0SSrivatsa S. Bhat 46712478cf0SSrivatsa S. Bhat policy->transition_ongoing = true; 468ca654dc3SSrivatsa S. Bhat policy->transition_task = current; 46912478cf0SSrivatsa S. Bhat 47012478cf0SSrivatsa S. Bhat spin_unlock(&policy->transition_lock); 47112478cf0SSrivatsa S. Bhat 47212478cf0SSrivatsa S. Bhat cpufreq_notify_transition(policy, freqs, CPUFREQ_PRECHANGE); 47312478cf0SSrivatsa S. Bhat } 47412478cf0SSrivatsa S. Bhat EXPORT_SYMBOL_GPL(cpufreq_freq_transition_begin); 47512478cf0SSrivatsa S. Bhat 47612478cf0SSrivatsa S. Bhat void cpufreq_freq_transition_end(struct cpufreq_policy *policy, 47712478cf0SSrivatsa S. Bhat struct cpufreq_freqs *freqs, int transition_failed) 47812478cf0SSrivatsa S. Bhat { 47912478cf0SSrivatsa S. Bhat if (unlikely(WARN_ON(!policy->transition_ongoing))) 48012478cf0SSrivatsa S. Bhat return; 48112478cf0SSrivatsa S. Bhat 48212478cf0SSrivatsa S. Bhat cpufreq_notify_post_transition(policy, freqs, transition_failed); 48312478cf0SSrivatsa S. Bhat 48412478cf0SSrivatsa S. Bhat policy->transition_ongoing = false; 485ca654dc3SSrivatsa S. Bhat policy->transition_task = NULL; 48612478cf0SSrivatsa S. Bhat 48712478cf0SSrivatsa S. Bhat wake_up(&policy->transition_wait); 48812478cf0SSrivatsa S. Bhat } 48912478cf0SSrivatsa S. Bhat EXPORT_SYMBOL_GPL(cpufreq_freq_transition_end); 49012478cf0SSrivatsa S. Bhat 4911da177e4SLinus Torvalds 4921da177e4SLinus Torvalds /********************************************************************* 4931da177e4SLinus Torvalds * SYSFS INTERFACE * 4941da177e4SLinus Torvalds *********************************************************************/ 4958a5c74a1SRashika Kheria static ssize_t show_boost(struct kobject *kobj, 4966f19efc0SLukasz Majewski struct attribute *attr, char *buf) 4976f19efc0SLukasz Majewski { 4986f19efc0SLukasz Majewski return sprintf(buf, "%d\n", cpufreq_driver->boost_enabled); 4996f19efc0SLukasz Majewski } 5006f19efc0SLukasz Majewski 5016f19efc0SLukasz Majewski static ssize_t store_boost(struct kobject *kobj, struct attribute *attr, 5026f19efc0SLukasz Majewski const char *buf, size_t count) 5036f19efc0SLukasz Majewski { 5046f19efc0SLukasz Majewski int ret, enable; 5056f19efc0SLukasz Majewski 5066f19efc0SLukasz Majewski ret = sscanf(buf, "%d", &enable); 5076f19efc0SLukasz Majewski if (ret != 1 || enable < 0 || enable > 1) 5086f19efc0SLukasz Majewski return -EINVAL; 5096f19efc0SLukasz Majewski 5106f19efc0SLukasz Majewski if (cpufreq_boost_trigger_state(enable)) { 511e837f9b5SJoe Perches pr_err("%s: Cannot %s BOOST!\n", 512e837f9b5SJoe Perches __func__, enable ? "enable" : "disable"); 5136f19efc0SLukasz Majewski return -EINVAL; 5146f19efc0SLukasz Majewski } 5156f19efc0SLukasz Majewski 516e837f9b5SJoe Perches pr_debug("%s: cpufreq BOOST %s\n", 517e837f9b5SJoe Perches __func__, enable ? "enabled" : "disabled"); 5186f19efc0SLukasz Majewski 5196f19efc0SLukasz Majewski return count; 5206f19efc0SLukasz Majewski } 5216f19efc0SLukasz Majewski define_one_global_rw(boost); 5221da177e4SLinus Torvalds 52342f91fa1SViresh Kumar static struct cpufreq_governor *find_governor(const char *str_governor) 5243bcb09a3SJeremy Fitzhardinge { 5253bcb09a3SJeremy Fitzhardinge struct cpufreq_governor *t; 5263bcb09a3SJeremy Fitzhardinge 527f7b27061SViresh Kumar for_each_governor(t) 5287c4f4539SRasmus Villemoes if (!strncasecmp(str_governor, t->name, CPUFREQ_NAME_LEN)) 5293bcb09a3SJeremy Fitzhardinge return t; 5303bcb09a3SJeremy Fitzhardinge 5313bcb09a3SJeremy Fitzhardinge return NULL; 5323bcb09a3SJeremy Fitzhardinge } 5333bcb09a3SJeremy Fitzhardinge 5341da177e4SLinus Torvalds /** 5351da177e4SLinus Torvalds * cpufreq_parse_governor - parse a governor string 5361da177e4SLinus Torvalds */ 5371da177e4SLinus Torvalds static int cpufreq_parse_governor(char *str_governor, unsigned int *policy, 5381da177e4SLinus Torvalds struct cpufreq_governor **governor) 5391da177e4SLinus Torvalds { 5403bcb09a3SJeremy Fitzhardinge int err = -EINVAL; 5413bcb09a3SJeremy Fitzhardinge 5421c3d85ddSRafael J. Wysocki if (!cpufreq_driver) 5433bcb09a3SJeremy Fitzhardinge goto out; 5443bcb09a3SJeremy Fitzhardinge 5451c3d85ddSRafael J. Wysocki if (cpufreq_driver->setpolicy) { 5467c4f4539SRasmus Villemoes if (!strncasecmp(str_governor, "performance", CPUFREQ_NAME_LEN)) { 5471da177e4SLinus Torvalds *policy = CPUFREQ_POLICY_PERFORMANCE; 5483bcb09a3SJeremy Fitzhardinge err = 0; 5497c4f4539SRasmus Villemoes } else if (!strncasecmp(str_governor, "powersave", 550e08f5f5bSGautham R Shenoy CPUFREQ_NAME_LEN)) { 5511da177e4SLinus Torvalds *policy = CPUFREQ_POLICY_POWERSAVE; 5523bcb09a3SJeremy Fitzhardinge err = 0; 5531da177e4SLinus Torvalds } 5542e1cc3a5SViresh Kumar } else { 5551da177e4SLinus Torvalds struct cpufreq_governor *t; 5563bcb09a3SJeremy Fitzhardinge 5573fc54d37Sakpm@osdl.org mutex_lock(&cpufreq_governor_mutex); 5583bcb09a3SJeremy Fitzhardinge 55942f91fa1SViresh Kumar t = find_governor(str_governor); 5603bcb09a3SJeremy Fitzhardinge 561ea714970SJeremy Fitzhardinge if (t == NULL) { 562ea714970SJeremy Fitzhardinge int ret; 563ea714970SJeremy Fitzhardinge 564ea714970SJeremy Fitzhardinge mutex_unlock(&cpufreq_governor_mutex); 5651a8e1463SKees Cook ret = request_module("cpufreq_%s", str_governor); 566ea714970SJeremy Fitzhardinge mutex_lock(&cpufreq_governor_mutex); 567ea714970SJeremy Fitzhardinge 568ea714970SJeremy Fitzhardinge if (ret == 0) 56942f91fa1SViresh Kumar t = find_governor(str_governor); 570ea714970SJeremy Fitzhardinge } 571ea714970SJeremy Fitzhardinge 5723bcb09a3SJeremy Fitzhardinge if (t != NULL) { 5731da177e4SLinus Torvalds *governor = t; 5743bcb09a3SJeremy Fitzhardinge err = 0; 5751da177e4SLinus Torvalds } 5763bcb09a3SJeremy Fitzhardinge 5773bcb09a3SJeremy Fitzhardinge mutex_unlock(&cpufreq_governor_mutex); 5781da177e4SLinus Torvalds } 5791da177e4SLinus Torvalds out: 5803bcb09a3SJeremy Fitzhardinge return err; 5811da177e4SLinus Torvalds } 5821da177e4SLinus Torvalds 5831da177e4SLinus Torvalds /** 584e08f5f5bSGautham R Shenoy * cpufreq_per_cpu_attr_read() / show_##file_name() - 585e08f5f5bSGautham R Shenoy * print out cpufreq information 5861da177e4SLinus Torvalds * 5871da177e4SLinus Torvalds * Write out information from cpufreq_driver->policy[cpu]; object must be 5881da177e4SLinus Torvalds * "unsigned int". 5891da177e4SLinus Torvalds */ 5901da177e4SLinus Torvalds 5911da177e4SLinus Torvalds #define show_one(file_name, object) \ 5921da177e4SLinus Torvalds static ssize_t show_##file_name \ 5931da177e4SLinus Torvalds (struct cpufreq_policy *policy, char *buf) \ 5941da177e4SLinus Torvalds { \ 5951da177e4SLinus Torvalds return sprintf(buf, "%u\n", policy->object); \ 5961da177e4SLinus Torvalds } 5971da177e4SLinus Torvalds 5981da177e4SLinus Torvalds show_one(cpuinfo_min_freq, cpuinfo.min_freq); 5991da177e4SLinus Torvalds show_one(cpuinfo_max_freq, cpuinfo.max_freq); 600ed129784SThomas Renninger show_one(cpuinfo_transition_latency, cpuinfo.transition_latency); 6011da177e4SLinus Torvalds show_one(scaling_min_freq, min); 6021da177e4SLinus Torvalds show_one(scaling_max_freq, max); 603c034b02eSDirk Brandewie 60409347b29SViresh Kumar static ssize_t show_scaling_cur_freq(struct cpufreq_policy *policy, char *buf) 605c034b02eSDirk Brandewie { 606c034b02eSDirk Brandewie ssize_t ret; 607c034b02eSDirk Brandewie 608c034b02eSDirk Brandewie if (cpufreq_driver && cpufreq_driver->setpolicy && cpufreq_driver->get) 609c034b02eSDirk Brandewie ret = sprintf(buf, "%u\n", cpufreq_driver->get(policy->cpu)); 610c034b02eSDirk Brandewie else 611c034b02eSDirk Brandewie ret = sprintf(buf, "%u\n", policy->cur); 612c034b02eSDirk Brandewie return ret; 613c034b02eSDirk Brandewie } 6141da177e4SLinus Torvalds 615037ce839SViresh Kumar static int cpufreq_set_policy(struct cpufreq_policy *policy, 6163a3e9e06SViresh Kumar struct cpufreq_policy *new_policy); 6177970e08bSThomas Renninger 6181da177e4SLinus Torvalds /** 6191da177e4SLinus Torvalds * cpufreq_per_cpu_attr_write() / store_##file_name() - sysfs write access 6201da177e4SLinus Torvalds */ 6211da177e4SLinus Torvalds #define store_one(file_name, object) \ 6221da177e4SLinus Torvalds static ssize_t store_##file_name \ 6231da177e4SLinus Torvalds (struct cpufreq_policy *policy, const char *buf, size_t count) \ 6241da177e4SLinus Torvalds { \ 625619c144cSVince Hsu int ret, temp; \ 6261da177e4SLinus Torvalds struct cpufreq_policy new_policy; \ 6271da177e4SLinus Torvalds \ 6281da177e4SLinus Torvalds ret = cpufreq_get_policy(&new_policy, policy->cpu); \ 6291da177e4SLinus Torvalds if (ret) \ 6301da177e4SLinus Torvalds return -EINVAL; \ 6311da177e4SLinus Torvalds \ 6321da177e4SLinus Torvalds ret = sscanf(buf, "%u", &new_policy.object); \ 6331da177e4SLinus Torvalds if (ret != 1) \ 6341da177e4SLinus Torvalds return -EINVAL; \ 6351da177e4SLinus Torvalds \ 636619c144cSVince Hsu temp = new_policy.object; \ 637037ce839SViresh Kumar ret = cpufreq_set_policy(policy, &new_policy); \ 638619c144cSVince Hsu if (!ret) \ 639619c144cSVince Hsu policy->user_policy.object = temp; \ 6401da177e4SLinus Torvalds \ 6411da177e4SLinus Torvalds return ret ? ret : count; \ 6421da177e4SLinus Torvalds } 6431da177e4SLinus Torvalds 6441da177e4SLinus Torvalds store_one(scaling_min_freq, min); 6451da177e4SLinus Torvalds store_one(scaling_max_freq, max); 6461da177e4SLinus Torvalds 6471da177e4SLinus Torvalds /** 6481da177e4SLinus Torvalds * show_cpuinfo_cur_freq - current CPU frequency as detected by hardware 6491da177e4SLinus Torvalds */ 650e08f5f5bSGautham R Shenoy static ssize_t show_cpuinfo_cur_freq(struct cpufreq_policy *policy, 651e08f5f5bSGautham R Shenoy char *buf) 6521da177e4SLinus Torvalds { 653d92d50a4SViresh Kumar unsigned int cur_freq = __cpufreq_get(policy); 6541da177e4SLinus Torvalds if (!cur_freq) 6551da177e4SLinus Torvalds return sprintf(buf, "<unknown>"); 6561da177e4SLinus Torvalds return sprintf(buf, "%u\n", cur_freq); 6571da177e4SLinus Torvalds } 6581da177e4SLinus Torvalds 6591da177e4SLinus Torvalds /** 6601da177e4SLinus Torvalds * show_scaling_governor - show the current policy for the specified CPU 6611da177e4SLinus Torvalds */ 662905d77cdSDave Jones static ssize_t show_scaling_governor(struct cpufreq_policy *policy, char *buf) 6631da177e4SLinus Torvalds { 6641da177e4SLinus Torvalds if (policy->policy == CPUFREQ_POLICY_POWERSAVE) 6651da177e4SLinus Torvalds return sprintf(buf, "powersave\n"); 6661da177e4SLinus Torvalds else if (policy->policy == CPUFREQ_POLICY_PERFORMANCE) 6671da177e4SLinus Torvalds return sprintf(buf, "performance\n"); 6681da177e4SLinus Torvalds else if (policy->governor) 6694b972f0bSviresh kumar return scnprintf(buf, CPUFREQ_NAME_PLEN, "%s\n", 67029464f28SDave Jones policy->governor->name); 6711da177e4SLinus Torvalds return -EINVAL; 6721da177e4SLinus Torvalds } 6731da177e4SLinus Torvalds 6741da177e4SLinus Torvalds /** 6751da177e4SLinus Torvalds * store_scaling_governor - store policy for the specified CPU 6761da177e4SLinus Torvalds */ 6771da177e4SLinus Torvalds static ssize_t store_scaling_governor(struct cpufreq_policy *policy, 6781da177e4SLinus Torvalds const char *buf, size_t count) 6791da177e4SLinus Torvalds { 6805136fa56SSrivatsa S. Bhat int ret; 6811da177e4SLinus Torvalds char str_governor[16]; 6821da177e4SLinus Torvalds struct cpufreq_policy new_policy; 6831da177e4SLinus Torvalds 6841da177e4SLinus Torvalds ret = cpufreq_get_policy(&new_policy, policy->cpu); 6851da177e4SLinus Torvalds if (ret) 6861da177e4SLinus Torvalds return ret; 6871da177e4SLinus Torvalds 6881da177e4SLinus Torvalds ret = sscanf(buf, "%15s", str_governor); 6891da177e4SLinus Torvalds if (ret != 1) 6901da177e4SLinus Torvalds return -EINVAL; 6911da177e4SLinus Torvalds 692e08f5f5bSGautham R Shenoy if (cpufreq_parse_governor(str_governor, &new_policy.policy, 693e08f5f5bSGautham R Shenoy &new_policy.governor)) 6941da177e4SLinus Torvalds return -EINVAL; 6951da177e4SLinus Torvalds 696037ce839SViresh Kumar ret = cpufreq_set_policy(policy, &new_policy); 6977970e08bSThomas Renninger 6987970e08bSThomas Renninger policy->user_policy.policy = policy->policy; 6997970e08bSThomas Renninger policy->user_policy.governor = policy->governor; 7007970e08bSThomas Renninger 701e08f5f5bSGautham R Shenoy if (ret) 702e08f5f5bSGautham R Shenoy return ret; 703e08f5f5bSGautham R Shenoy else 704e08f5f5bSGautham R Shenoy return count; 7051da177e4SLinus Torvalds } 7061da177e4SLinus Torvalds 7071da177e4SLinus Torvalds /** 7081da177e4SLinus Torvalds * show_scaling_driver - show the cpufreq driver currently loaded 7091da177e4SLinus Torvalds */ 7101da177e4SLinus Torvalds static ssize_t show_scaling_driver(struct cpufreq_policy *policy, char *buf) 7111da177e4SLinus Torvalds { 7121c3d85ddSRafael J. Wysocki return scnprintf(buf, CPUFREQ_NAME_PLEN, "%s\n", cpufreq_driver->name); 7131da177e4SLinus Torvalds } 7141da177e4SLinus Torvalds 7151da177e4SLinus Torvalds /** 7161da177e4SLinus Torvalds * show_scaling_available_governors - show the available CPUfreq governors 7171da177e4SLinus Torvalds */ 7181da177e4SLinus Torvalds static ssize_t show_scaling_available_governors(struct cpufreq_policy *policy, 7191da177e4SLinus Torvalds char *buf) 7201da177e4SLinus Torvalds { 7211da177e4SLinus Torvalds ssize_t i = 0; 7221da177e4SLinus Torvalds struct cpufreq_governor *t; 7231da177e4SLinus Torvalds 7249c0ebcf7SViresh Kumar if (!has_target()) { 7251da177e4SLinus Torvalds i += sprintf(buf, "performance powersave"); 7261da177e4SLinus Torvalds goto out; 7271da177e4SLinus Torvalds } 7281da177e4SLinus Torvalds 729f7b27061SViresh Kumar for_each_governor(t) { 73029464f28SDave Jones if (i >= (ssize_t) ((PAGE_SIZE / sizeof(char)) 73129464f28SDave Jones - (CPUFREQ_NAME_LEN + 2))) 7321da177e4SLinus Torvalds goto out; 7334b972f0bSviresh kumar i += scnprintf(&buf[i], CPUFREQ_NAME_PLEN, "%s ", t->name); 7341da177e4SLinus Torvalds } 7351da177e4SLinus Torvalds out: 7361da177e4SLinus Torvalds i += sprintf(&buf[i], "\n"); 7371da177e4SLinus Torvalds return i; 7381da177e4SLinus Torvalds } 739e8628dd0SDarrick J. Wong 740f4fd3797SLan Tianyu ssize_t cpufreq_show_cpus(const struct cpumask *mask, char *buf) 7411da177e4SLinus Torvalds { 7421da177e4SLinus Torvalds ssize_t i = 0; 7431da177e4SLinus Torvalds unsigned int cpu; 7441da177e4SLinus Torvalds 745835481d9SRusty Russell for_each_cpu(cpu, mask) { 7461da177e4SLinus Torvalds if (i) 7471da177e4SLinus Torvalds i += scnprintf(&buf[i], (PAGE_SIZE - i - 2), " "); 7481da177e4SLinus Torvalds i += scnprintf(&buf[i], (PAGE_SIZE - i - 2), "%u", cpu); 7491da177e4SLinus Torvalds if (i >= (PAGE_SIZE - 5)) 7501da177e4SLinus Torvalds break; 7511da177e4SLinus Torvalds } 7521da177e4SLinus Torvalds i += sprintf(&buf[i], "\n"); 7531da177e4SLinus Torvalds return i; 7541da177e4SLinus Torvalds } 755f4fd3797SLan Tianyu EXPORT_SYMBOL_GPL(cpufreq_show_cpus); 7561da177e4SLinus Torvalds 757e8628dd0SDarrick J. Wong /** 758e8628dd0SDarrick J. Wong * show_related_cpus - show the CPUs affected by each transition even if 759e8628dd0SDarrick J. Wong * hw coordination is in use 760e8628dd0SDarrick J. Wong */ 761e8628dd0SDarrick J. Wong static ssize_t show_related_cpus(struct cpufreq_policy *policy, char *buf) 762e8628dd0SDarrick J. Wong { 763f4fd3797SLan Tianyu return cpufreq_show_cpus(policy->related_cpus, buf); 764e8628dd0SDarrick J. Wong } 765e8628dd0SDarrick J. Wong 766e8628dd0SDarrick J. Wong /** 767e8628dd0SDarrick J. Wong * show_affected_cpus - show the CPUs affected by each transition 768e8628dd0SDarrick J. Wong */ 769e8628dd0SDarrick J. Wong static ssize_t show_affected_cpus(struct cpufreq_policy *policy, char *buf) 770e8628dd0SDarrick J. Wong { 771f4fd3797SLan Tianyu return cpufreq_show_cpus(policy->cpus, buf); 772e8628dd0SDarrick J. Wong } 773e8628dd0SDarrick J. Wong 7749e76988eSVenki Pallipadi static ssize_t store_scaling_setspeed(struct cpufreq_policy *policy, 7759e76988eSVenki Pallipadi const char *buf, size_t count) 7769e76988eSVenki Pallipadi { 7779e76988eSVenki Pallipadi unsigned int freq = 0; 7789e76988eSVenki Pallipadi unsigned int ret; 7799e76988eSVenki Pallipadi 780879000f9SCHIKAMA masaki if (!policy->governor || !policy->governor->store_setspeed) 7819e76988eSVenki Pallipadi return -EINVAL; 7829e76988eSVenki Pallipadi 7839e76988eSVenki Pallipadi ret = sscanf(buf, "%u", &freq); 7849e76988eSVenki Pallipadi if (ret != 1) 7859e76988eSVenki Pallipadi return -EINVAL; 7869e76988eSVenki Pallipadi 7879e76988eSVenki Pallipadi policy->governor->store_setspeed(policy, freq); 7889e76988eSVenki Pallipadi 7899e76988eSVenki Pallipadi return count; 7909e76988eSVenki Pallipadi } 7919e76988eSVenki Pallipadi 7929e76988eSVenki Pallipadi static ssize_t show_scaling_setspeed(struct cpufreq_policy *policy, char *buf) 7939e76988eSVenki Pallipadi { 794879000f9SCHIKAMA masaki if (!policy->governor || !policy->governor->show_setspeed) 7959e76988eSVenki Pallipadi return sprintf(buf, "<unsupported>\n"); 7969e76988eSVenki Pallipadi 7979e76988eSVenki Pallipadi return policy->governor->show_setspeed(policy, buf); 7989e76988eSVenki Pallipadi } 7991da177e4SLinus Torvalds 800e2f74f35SThomas Renninger /** 8018bf1ac72Sviresh kumar * show_bios_limit - show the current cpufreq HW/BIOS limitation 802e2f74f35SThomas Renninger */ 803e2f74f35SThomas Renninger static ssize_t show_bios_limit(struct cpufreq_policy *policy, char *buf) 804e2f74f35SThomas Renninger { 805e2f74f35SThomas Renninger unsigned int limit; 806e2f74f35SThomas Renninger int ret; 8071c3d85ddSRafael J. Wysocki if (cpufreq_driver->bios_limit) { 8081c3d85ddSRafael J. Wysocki ret = cpufreq_driver->bios_limit(policy->cpu, &limit); 809e2f74f35SThomas Renninger if (!ret) 810e2f74f35SThomas Renninger return sprintf(buf, "%u\n", limit); 811e2f74f35SThomas Renninger } 812e2f74f35SThomas Renninger return sprintf(buf, "%u\n", policy->cpuinfo.max_freq); 813e2f74f35SThomas Renninger } 814e2f74f35SThomas Renninger 8156dad2a29SBorislav Petkov cpufreq_freq_attr_ro_perm(cpuinfo_cur_freq, 0400); 8166dad2a29SBorislav Petkov cpufreq_freq_attr_ro(cpuinfo_min_freq); 8176dad2a29SBorislav Petkov cpufreq_freq_attr_ro(cpuinfo_max_freq); 8186dad2a29SBorislav Petkov cpufreq_freq_attr_ro(cpuinfo_transition_latency); 8196dad2a29SBorislav Petkov cpufreq_freq_attr_ro(scaling_available_governors); 8206dad2a29SBorislav Petkov cpufreq_freq_attr_ro(scaling_driver); 8216dad2a29SBorislav Petkov cpufreq_freq_attr_ro(scaling_cur_freq); 8226dad2a29SBorislav Petkov cpufreq_freq_attr_ro(bios_limit); 8236dad2a29SBorislav Petkov cpufreq_freq_attr_ro(related_cpus); 8246dad2a29SBorislav Petkov cpufreq_freq_attr_ro(affected_cpus); 8256dad2a29SBorislav Petkov cpufreq_freq_attr_rw(scaling_min_freq); 8266dad2a29SBorislav Petkov cpufreq_freq_attr_rw(scaling_max_freq); 8276dad2a29SBorislav Petkov cpufreq_freq_attr_rw(scaling_governor); 8286dad2a29SBorislav Petkov cpufreq_freq_attr_rw(scaling_setspeed); 8291da177e4SLinus Torvalds 8301da177e4SLinus Torvalds static struct attribute *default_attrs[] = { 8311da177e4SLinus Torvalds &cpuinfo_min_freq.attr, 8321da177e4SLinus Torvalds &cpuinfo_max_freq.attr, 833ed129784SThomas Renninger &cpuinfo_transition_latency.attr, 8341da177e4SLinus Torvalds &scaling_min_freq.attr, 8351da177e4SLinus Torvalds &scaling_max_freq.attr, 8361da177e4SLinus Torvalds &affected_cpus.attr, 837e8628dd0SDarrick J. Wong &related_cpus.attr, 8381da177e4SLinus Torvalds &scaling_governor.attr, 8391da177e4SLinus Torvalds &scaling_driver.attr, 8401da177e4SLinus Torvalds &scaling_available_governors.attr, 8419e76988eSVenki Pallipadi &scaling_setspeed.attr, 8421da177e4SLinus Torvalds NULL 8431da177e4SLinus Torvalds }; 8441da177e4SLinus Torvalds 8451da177e4SLinus Torvalds #define to_policy(k) container_of(k, struct cpufreq_policy, kobj) 8461da177e4SLinus Torvalds #define to_attr(a) container_of(a, struct freq_attr, attr) 8471da177e4SLinus Torvalds 8481da177e4SLinus Torvalds static ssize_t show(struct kobject *kobj, struct attribute *attr, char *buf) 8491da177e4SLinus Torvalds { 8501da177e4SLinus Torvalds struct cpufreq_policy *policy = to_policy(kobj); 8511da177e4SLinus Torvalds struct freq_attr *fattr = to_attr(attr); 8521b750e3bSViresh Kumar ssize_t ret; 8536eed9404SViresh Kumar 8546eed9404SViresh Kumar if (!down_read_trylock(&cpufreq_rwsem)) 8551b750e3bSViresh Kumar return -EINVAL; 8565a01f2e8SVenkatesh Pallipadi 857ad7722daSviresh kumar down_read(&policy->rwsem); 8585a01f2e8SVenkatesh Pallipadi 859e08f5f5bSGautham R Shenoy if (fattr->show) 860e08f5f5bSGautham R Shenoy ret = fattr->show(policy, buf); 861e08f5f5bSGautham R Shenoy else 862e08f5f5bSGautham R Shenoy ret = -EIO; 863e08f5f5bSGautham R Shenoy 864ad7722daSviresh kumar up_read(&policy->rwsem); 8656eed9404SViresh Kumar up_read(&cpufreq_rwsem); 8661b750e3bSViresh Kumar 8671da177e4SLinus Torvalds return ret; 8681da177e4SLinus Torvalds } 8691da177e4SLinus Torvalds 8701da177e4SLinus Torvalds static ssize_t store(struct kobject *kobj, struct attribute *attr, 8711da177e4SLinus Torvalds const char *buf, size_t count) 8721da177e4SLinus Torvalds { 8731da177e4SLinus Torvalds struct cpufreq_policy *policy = to_policy(kobj); 8741da177e4SLinus Torvalds struct freq_attr *fattr = to_attr(attr); 875a07530b4SDave Jones ssize_t ret = -EINVAL; 8766eed9404SViresh Kumar 8774f750c93SSrivatsa S. Bhat get_online_cpus(); 8784f750c93SSrivatsa S. Bhat 8794f750c93SSrivatsa S. Bhat if (!cpu_online(policy->cpu)) 8804f750c93SSrivatsa S. Bhat goto unlock; 8814f750c93SSrivatsa S. Bhat 8826eed9404SViresh Kumar if (!down_read_trylock(&cpufreq_rwsem)) 8834f750c93SSrivatsa S. Bhat goto unlock; 8845a01f2e8SVenkatesh Pallipadi 885ad7722daSviresh kumar down_write(&policy->rwsem); 8865a01f2e8SVenkatesh Pallipadi 88711e584cfSViresh Kumar /* Updating inactive policies is invalid, so avoid doing that. */ 88811e584cfSViresh Kumar if (unlikely(policy_is_inactive(policy))) { 88911e584cfSViresh Kumar ret = -EBUSY; 89011e584cfSViresh Kumar goto unlock_policy_rwsem; 89111e584cfSViresh Kumar } 89211e584cfSViresh Kumar 893e08f5f5bSGautham R Shenoy if (fattr->store) 894e08f5f5bSGautham R Shenoy ret = fattr->store(policy, buf, count); 895e08f5f5bSGautham R Shenoy else 896e08f5f5bSGautham R Shenoy ret = -EIO; 897e08f5f5bSGautham R Shenoy 89811e584cfSViresh Kumar unlock_policy_rwsem: 899ad7722daSviresh kumar up_write(&policy->rwsem); 9006eed9404SViresh Kumar 9016eed9404SViresh Kumar up_read(&cpufreq_rwsem); 9024f750c93SSrivatsa S. Bhat unlock: 9034f750c93SSrivatsa S. Bhat put_online_cpus(); 9044f750c93SSrivatsa S. Bhat 9051da177e4SLinus Torvalds return ret; 9061da177e4SLinus Torvalds } 9071da177e4SLinus Torvalds 9081da177e4SLinus Torvalds static void cpufreq_sysfs_release(struct kobject *kobj) 9091da177e4SLinus Torvalds { 9101da177e4SLinus Torvalds struct cpufreq_policy *policy = to_policy(kobj); 9112d06d8c4SDominik Brodowski pr_debug("last reference is dropped\n"); 9121da177e4SLinus Torvalds complete(&policy->kobj_unregister); 9131da177e4SLinus Torvalds } 9141da177e4SLinus Torvalds 91552cf25d0SEmese Revfy static const struct sysfs_ops sysfs_ops = { 9161da177e4SLinus Torvalds .show = show, 9171da177e4SLinus Torvalds .store = store, 9181da177e4SLinus Torvalds }; 9191da177e4SLinus Torvalds 9201da177e4SLinus Torvalds static struct kobj_type ktype_cpufreq = { 9211da177e4SLinus Torvalds .sysfs_ops = &sysfs_ops, 9221da177e4SLinus Torvalds .default_attrs = default_attrs, 9231da177e4SLinus Torvalds .release = cpufreq_sysfs_release, 9241da177e4SLinus Torvalds }; 9251da177e4SLinus Torvalds 9262361be23SViresh Kumar struct kobject *cpufreq_global_kobject; 9272361be23SViresh Kumar EXPORT_SYMBOL(cpufreq_global_kobject); 9282361be23SViresh Kumar 9292361be23SViresh Kumar static int cpufreq_global_kobject_usage; 9302361be23SViresh Kumar 9312361be23SViresh Kumar int cpufreq_get_global_kobject(void) 9322361be23SViresh Kumar { 9332361be23SViresh Kumar if (!cpufreq_global_kobject_usage++) 9342361be23SViresh Kumar return kobject_add(cpufreq_global_kobject, 9352361be23SViresh Kumar &cpu_subsys.dev_root->kobj, "%s", "cpufreq"); 9362361be23SViresh Kumar 9372361be23SViresh Kumar return 0; 9382361be23SViresh Kumar } 9392361be23SViresh Kumar EXPORT_SYMBOL(cpufreq_get_global_kobject); 9402361be23SViresh Kumar 9412361be23SViresh Kumar void cpufreq_put_global_kobject(void) 9422361be23SViresh Kumar { 9432361be23SViresh Kumar if (!--cpufreq_global_kobject_usage) 9442361be23SViresh Kumar kobject_del(cpufreq_global_kobject); 9452361be23SViresh Kumar } 9462361be23SViresh Kumar EXPORT_SYMBOL(cpufreq_put_global_kobject); 9472361be23SViresh Kumar 9482361be23SViresh Kumar int cpufreq_sysfs_create_file(const struct attribute *attr) 9492361be23SViresh Kumar { 9502361be23SViresh Kumar int ret = cpufreq_get_global_kobject(); 9512361be23SViresh Kumar 9522361be23SViresh Kumar if (!ret) { 9532361be23SViresh Kumar ret = sysfs_create_file(cpufreq_global_kobject, attr); 9542361be23SViresh Kumar if (ret) 9552361be23SViresh Kumar cpufreq_put_global_kobject(); 9562361be23SViresh Kumar } 9572361be23SViresh Kumar 9582361be23SViresh Kumar return ret; 9592361be23SViresh Kumar } 9602361be23SViresh Kumar EXPORT_SYMBOL(cpufreq_sysfs_create_file); 9612361be23SViresh Kumar 9622361be23SViresh Kumar void cpufreq_sysfs_remove_file(const struct attribute *attr) 9632361be23SViresh Kumar { 9642361be23SViresh Kumar sysfs_remove_file(cpufreq_global_kobject, attr); 9652361be23SViresh Kumar cpufreq_put_global_kobject(); 9662361be23SViresh Kumar } 9672361be23SViresh Kumar EXPORT_SYMBOL(cpufreq_sysfs_remove_file); 9682361be23SViresh Kumar 96987549141SViresh Kumar static int add_cpu_dev_symlink(struct cpufreq_policy *policy, int cpu) 97087549141SViresh Kumar { 97187549141SViresh Kumar struct device *cpu_dev; 97287549141SViresh Kumar 97387549141SViresh Kumar pr_debug("%s: Adding symlink for CPU: %u\n", __func__, cpu); 97487549141SViresh Kumar 97587549141SViresh Kumar if (!policy) 97687549141SViresh Kumar return 0; 97787549141SViresh Kumar 97887549141SViresh Kumar cpu_dev = get_cpu_device(cpu); 97987549141SViresh Kumar if (WARN_ON(!cpu_dev)) 98087549141SViresh Kumar return 0; 98187549141SViresh Kumar 98287549141SViresh Kumar return sysfs_create_link(&cpu_dev->kobj, &policy->kobj, "cpufreq"); 98387549141SViresh Kumar } 98487549141SViresh Kumar 98587549141SViresh Kumar static void remove_cpu_dev_symlink(struct cpufreq_policy *policy, int cpu) 98687549141SViresh Kumar { 98787549141SViresh Kumar struct device *cpu_dev; 98887549141SViresh Kumar 98987549141SViresh Kumar pr_debug("%s: Removing symlink for CPU: %u\n", __func__, cpu); 99087549141SViresh Kumar 99187549141SViresh Kumar cpu_dev = get_cpu_device(cpu); 99287549141SViresh Kumar if (WARN_ON(!cpu_dev)) 99387549141SViresh Kumar return; 99487549141SViresh Kumar 99587549141SViresh Kumar sysfs_remove_link(&cpu_dev->kobj, "cpufreq"); 99687549141SViresh Kumar } 99787549141SViresh Kumar 99887549141SViresh Kumar /* Add/remove symlinks for all related CPUs */ 999308b60e7SViresh Kumar static int cpufreq_add_dev_symlink(struct cpufreq_policy *policy) 100019d6f7ecSDave Jones { 100119d6f7ecSDave Jones unsigned int j; 100219d6f7ecSDave Jones int ret = 0; 100319d6f7ecSDave Jones 100487549141SViresh Kumar /* Some related CPUs might not be present (physically hotplugged) */ 1005*559ed407SRafael J. Wysocki for_each_cpu(j, policy->real_cpus) { 10069d16f207SSaravana Kannan if (j == policy->kobj_cpu) 100719d6f7ecSDave Jones continue; 100819d6f7ecSDave Jones 100987549141SViresh Kumar ret = add_cpu_dev_symlink(policy, j); 101071c3461eSRafael J. Wysocki if (ret) 101171c3461eSRafael J. Wysocki break; 101219d6f7ecSDave Jones } 101387549141SViresh Kumar 101419d6f7ecSDave Jones return ret; 101519d6f7ecSDave Jones } 101619d6f7ecSDave Jones 101787549141SViresh Kumar static void cpufreq_remove_dev_symlink(struct cpufreq_policy *policy) 101887549141SViresh Kumar { 101987549141SViresh Kumar unsigned int j; 102087549141SViresh Kumar 102187549141SViresh Kumar /* Some related CPUs might not be present (physically hotplugged) */ 1022*559ed407SRafael J. Wysocki for_each_cpu(j, policy->real_cpus) { 102387549141SViresh Kumar if (j == policy->kobj_cpu) 102487549141SViresh Kumar continue; 102587549141SViresh Kumar 102687549141SViresh Kumar remove_cpu_dev_symlink(policy, j); 102787549141SViresh Kumar } 102887549141SViresh Kumar } 102987549141SViresh Kumar 1030308b60e7SViresh Kumar static int cpufreq_add_dev_interface(struct cpufreq_policy *policy, 10318a25a2fdSKay Sievers struct device *dev) 1032909a694eSDave Jones { 1033909a694eSDave Jones struct freq_attr **drv_attr; 1034909a694eSDave Jones int ret = 0; 1035909a694eSDave Jones 1036909a694eSDave Jones /* set up files for this cpu device */ 10371c3d85ddSRafael J. Wysocki drv_attr = cpufreq_driver->attr; 1038f13f1184SViresh Kumar while (drv_attr && *drv_attr) { 1039909a694eSDave Jones ret = sysfs_create_file(&policy->kobj, &((*drv_attr)->attr)); 1040909a694eSDave Jones if (ret) 10416d4e81edSTomeu Vizoso return ret; 1042909a694eSDave Jones drv_attr++; 1043909a694eSDave Jones } 10441c3d85ddSRafael J. Wysocki if (cpufreq_driver->get) { 1045909a694eSDave Jones ret = sysfs_create_file(&policy->kobj, &cpuinfo_cur_freq.attr); 1046909a694eSDave Jones if (ret) 10476d4e81edSTomeu Vizoso return ret; 1048909a694eSDave Jones } 1049c034b02eSDirk Brandewie 1050909a694eSDave Jones ret = sysfs_create_file(&policy->kobj, &scaling_cur_freq.attr); 1051909a694eSDave Jones if (ret) 10526d4e81edSTomeu Vizoso return ret; 1053c034b02eSDirk Brandewie 10541c3d85ddSRafael J. Wysocki if (cpufreq_driver->bios_limit) { 1055e2f74f35SThomas Renninger ret = sysfs_create_file(&policy->kobj, &bios_limit.attr); 1056e2f74f35SThomas Renninger if (ret) 10576d4e81edSTomeu Vizoso return ret; 1058e2f74f35SThomas Renninger } 1059909a694eSDave Jones 10606d4e81edSTomeu Vizoso return cpufreq_add_dev_symlink(policy); 1061e18f1682SSrivatsa S. Bhat } 1062e18f1682SSrivatsa S. Bhat 1063e18f1682SSrivatsa S. Bhat static void cpufreq_init_policy(struct cpufreq_policy *policy) 1064e18f1682SSrivatsa S. Bhat { 10656e2c89d1Sviresh kumar struct cpufreq_governor *gov = NULL; 1066e18f1682SSrivatsa S. Bhat struct cpufreq_policy new_policy; 1067e18f1682SSrivatsa S. Bhat int ret = 0; 1068e18f1682SSrivatsa S. Bhat 1069d5b73cd8SViresh Kumar memcpy(&new_policy, policy, sizeof(*policy)); 1070a27a9ab7SJason Baron 10716e2c89d1Sviresh kumar /* Update governor of new_policy to the governor used before hotplug */ 10724573237bSViresh Kumar gov = find_governor(policy->last_governor); 10736e2c89d1Sviresh kumar if (gov) 10746e2c89d1Sviresh kumar pr_debug("Restoring governor %s for cpu %d\n", 10756e2c89d1Sviresh kumar policy->governor->name, policy->cpu); 10766e2c89d1Sviresh kumar else 10776e2c89d1Sviresh kumar gov = CPUFREQ_DEFAULT_GOVERNOR; 10786e2c89d1Sviresh kumar 10796e2c89d1Sviresh kumar new_policy.governor = gov; 10806e2c89d1Sviresh kumar 1081a27a9ab7SJason Baron /* Use the default policy if its valid. */ 1082a27a9ab7SJason Baron if (cpufreq_driver->setpolicy) 10836e2c89d1Sviresh kumar cpufreq_parse_governor(gov->name, &new_policy.policy, NULL); 1084ecf7e461SDave Jones 1085ecf7e461SDave Jones /* set default policy */ 1086037ce839SViresh Kumar ret = cpufreq_set_policy(policy, &new_policy); 1087ecf7e461SDave Jones if (ret) { 10882d06d8c4SDominik Brodowski pr_debug("setting policy failed\n"); 10891c3d85ddSRafael J. Wysocki if (cpufreq_driver->exit) 10901c3d85ddSRafael J. Wysocki cpufreq_driver->exit(policy); 1091ecf7e461SDave Jones } 1092909a694eSDave Jones } 1093909a694eSDave Jones 1094d8d3b471SViresh Kumar static int cpufreq_add_policy_cpu(struct cpufreq_policy *policy, 109542f921a6SViresh Kumar unsigned int cpu, struct device *dev) 1096fcf80582SViresh Kumar { 10979c0ebcf7SViresh Kumar int ret = 0; 1098fcf80582SViresh Kumar 1099bb29ae15SViresh Kumar /* Has this CPU been taken care of already? */ 1100bb29ae15SViresh Kumar if (cpumask_test_cpu(cpu, policy->cpus)) 1101bb29ae15SViresh Kumar return 0; 1102bb29ae15SViresh Kumar 11039c0ebcf7SViresh Kumar if (has_target()) { 11043de9bdebSViresh Kumar ret = __cpufreq_governor(policy, CPUFREQ_GOV_STOP); 11053de9bdebSViresh Kumar if (ret) { 11063de9bdebSViresh Kumar pr_err("%s: Failed to stop governor\n", __func__); 11073de9bdebSViresh Kumar return ret; 11083de9bdebSViresh Kumar } 11093de9bdebSViresh Kumar } 1110fcf80582SViresh Kumar 1111ad7722daSviresh kumar down_write(&policy->rwsem); 1112fcf80582SViresh Kumar cpumask_set_cpu(cpu, policy->cpus); 1113ad7722daSviresh kumar up_write(&policy->rwsem); 11142eaa3e2dSViresh Kumar 11159c0ebcf7SViresh Kumar if (has_target()) { 1116e5c87b76SStratos Karafotis ret = __cpufreq_governor(policy, CPUFREQ_GOV_START); 1117e5c87b76SStratos Karafotis if (!ret) 1118e5c87b76SStratos Karafotis ret = __cpufreq_governor(policy, CPUFREQ_GOV_LIMITS); 1119e5c87b76SStratos Karafotis 1120e5c87b76SStratos Karafotis if (ret) { 11213de9bdebSViresh Kumar pr_err("%s: Failed to start governor\n", __func__); 11223de9bdebSViresh Kumar return ret; 11233de9bdebSViresh Kumar } 1124820c6ca2SViresh Kumar } 1125fcf80582SViresh Kumar 112687549141SViresh Kumar return 0; 1127fcf80582SViresh Kumar } 11281da177e4SLinus Torvalds 11298414809cSSrivatsa S. Bhat static struct cpufreq_policy *cpufreq_policy_restore(unsigned int cpu) 11308414809cSSrivatsa S. Bhat { 11318414809cSSrivatsa S. Bhat struct cpufreq_policy *policy; 11328414809cSSrivatsa S. Bhat unsigned long flags; 11338414809cSSrivatsa S. Bhat 113444871c9cSLan Tianyu read_lock_irqsave(&cpufreq_driver_lock, flags); 11353914d379SViresh Kumar policy = per_cpu(cpufreq_cpu_data, cpu); 113644871c9cSLan Tianyu read_unlock_irqrestore(&cpufreq_driver_lock, flags); 11378414809cSSrivatsa S. Bhat 11383914d379SViresh Kumar if (likely(policy)) { 11393914d379SViresh Kumar /* Policy should be inactive here */ 11403914d379SViresh Kumar WARN_ON(!policy_is_inactive(policy)); 114137829029SViresh Kumar 114237829029SViresh Kumar down_write(&policy->rwsem); 114337829029SViresh Kumar policy->cpu = cpu; 114435afd02eSViresh Kumar policy->governor = NULL; 114537829029SViresh Kumar up_write(&policy->rwsem); 11463914d379SViresh Kumar } 11476e2c89d1Sviresh kumar 11488414809cSSrivatsa S. Bhat return policy; 11498414809cSSrivatsa S. Bhat } 11508414809cSSrivatsa S. Bhat 11512fc3384dSViresh Kumar static struct cpufreq_policy *cpufreq_policy_alloc(struct device *dev) 1152e9698cc5SSrivatsa S. Bhat { 1153e9698cc5SSrivatsa S. Bhat struct cpufreq_policy *policy; 11542fc3384dSViresh Kumar int ret; 1155e9698cc5SSrivatsa S. Bhat 1156e9698cc5SSrivatsa S. Bhat policy = kzalloc(sizeof(*policy), GFP_KERNEL); 1157e9698cc5SSrivatsa S. Bhat if (!policy) 1158e9698cc5SSrivatsa S. Bhat return NULL; 1159e9698cc5SSrivatsa S. Bhat 1160e9698cc5SSrivatsa S. Bhat if (!alloc_cpumask_var(&policy->cpus, GFP_KERNEL)) 1161e9698cc5SSrivatsa S. Bhat goto err_free_policy; 1162e9698cc5SSrivatsa S. Bhat 1163e9698cc5SSrivatsa S. Bhat if (!zalloc_cpumask_var(&policy->related_cpus, GFP_KERNEL)) 1164e9698cc5SSrivatsa S. Bhat goto err_free_cpumask; 1165e9698cc5SSrivatsa S. Bhat 1166*559ed407SRafael J. Wysocki if (!zalloc_cpumask_var(&policy->real_cpus, GFP_KERNEL)) 1167*559ed407SRafael J. Wysocki goto err_free_rcpumask; 1168*559ed407SRafael J. Wysocki 11692fc3384dSViresh Kumar ret = kobject_init_and_add(&policy->kobj, &ktype_cpufreq, &dev->kobj, 11702fc3384dSViresh Kumar "cpufreq"); 11712fc3384dSViresh Kumar if (ret) { 11722fc3384dSViresh Kumar pr_err("%s: failed to init policy->kobj: %d\n", __func__, ret); 1173*559ed407SRafael J. Wysocki goto err_free_real_cpus; 11742fc3384dSViresh Kumar } 11752fc3384dSViresh Kumar 1176c88a1f8bSLukasz Majewski INIT_LIST_HEAD(&policy->policy_list); 1177ad7722daSviresh kumar init_rwsem(&policy->rwsem); 117812478cf0SSrivatsa S. Bhat spin_lock_init(&policy->transition_lock); 117912478cf0SSrivatsa S. Bhat init_waitqueue_head(&policy->transition_wait); 1180818c5712SViresh Kumar init_completion(&policy->kobj_unregister); 1181818c5712SViresh Kumar INIT_WORK(&policy->update, handle_update); 1182ad7722daSviresh kumar 11832fc3384dSViresh Kumar policy->cpu = dev->id; 118487549141SViresh Kumar 118587549141SViresh Kumar /* Set this once on allocation */ 11862fc3384dSViresh Kumar policy->kobj_cpu = dev->id; 118787549141SViresh Kumar 1188e9698cc5SSrivatsa S. Bhat return policy; 1189e9698cc5SSrivatsa S. Bhat 1190*559ed407SRafael J. Wysocki err_free_real_cpus: 1191*559ed407SRafael J. Wysocki free_cpumask_var(policy->real_cpus); 11922fc3384dSViresh Kumar err_free_rcpumask: 11932fc3384dSViresh Kumar free_cpumask_var(policy->related_cpus); 1194e9698cc5SSrivatsa S. Bhat err_free_cpumask: 1195e9698cc5SSrivatsa S. Bhat free_cpumask_var(policy->cpus); 1196e9698cc5SSrivatsa S. Bhat err_free_policy: 1197e9698cc5SSrivatsa S. Bhat kfree(policy); 1198e9698cc5SSrivatsa S. Bhat 1199e9698cc5SSrivatsa S. Bhat return NULL; 1200e9698cc5SSrivatsa S. Bhat } 1201e9698cc5SSrivatsa S. Bhat 12022fc3384dSViresh Kumar static void cpufreq_policy_put_kobj(struct cpufreq_policy *policy, bool notify) 120342f921a6SViresh Kumar { 120442f921a6SViresh Kumar struct kobject *kobj; 120542f921a6SViresh Kumar struct completion *cmp; 120642f921a6SViresh Kumar 12072fc3384dSViresh Kumar if (notify) 1208fcd7af91SViresh Kumar blocking_notifier_call_chain(&cpufreq_policy_notifier_list, 1209fcd7af91SViresh Kumar CPUFREQ_REMOVE_POLICY, policy); 1210fcd7af91SViresh Kumar 121187549141SViresh Kumar down_write(&policy->rwsem); 121287549141SViresh Kumar cpufreq_remove_dev_symlink(policy); 121342f921a6SViresh Kumar kobj = &policy->kobj; 121442f921a6SViresh Kumar cmp = &policy->kobj_unregister; 121587549141SViresh Kumar up_write(&policy->rwsem); 121642f921a6SViresh Kumar kobject_put(kobj); 121742f921a6SViresh Kumar 121842f921a6SViresh Kumar /* 121942f921a6SViresh Kumar * We need to make sure that the underlying kobj is 122042f921a6SViresh Kumar * actually not referenced anymore by anybody before we 122142f921a6SViresh Kumar * proceed with unloading. 122242f921a6SViresh Kumar */ 122342f921a6SViresh Kumar pr_debug("waiting for dropping of refcount\n"); 122442f921a6SViresh Kumar wait_for_completion(cmp); 122542f921a6SViresh Kumar pr_debug("wait complete\n"); 122642f921a6SViresh Kumar } 122742f921a6SViresh Kumar 12283654c5ccSViresh Kumar static void cpufreq_policy_free(struct cpufreq_policy *policy, bool notify) 1229e9698cc5SSrivatsa S. Bhat { 1230988bed09SViresh Kumar unsigned long flags; 1231988bed09SViresh Kumar int cpu; 1232988bed09SViresh Kumar 1233988bed09SViresh Kumar /* Remove policy from list */ 1234988bed09SViresh Kumar write_lock_irqsave(&cpufreq_driver_lock, flags); 1235988bed09SViresh Kumar list_del(&policy->policy_list); 1236988bed09SViresh Kumar 1237988bed09SViresh Kumar for_each_cpu(cpu, policy->related_cpus) 1238988bed09SViresh Kumar per_cpu(cpufreq_cpu_data, cpu) = NULL; 1239988bed09SViresh Kumar write_unlock_irqrestore(&cpufreq_driver_lock, flags); 1240988bed09SViresh Kumar 12413654c5ccSViresh Kumar cpufreq_policy_put_kobj(policy, notify); 1242*559ed407SRafael J. Wysocki free_cpumask_var(policy->real_cpus); 1243e9698cc5SSrivatsa S. Bhat free_cpumask_var(policy->related_cpus); 1244e9698cc5SSrivatsa S. Bhat free_cpumask_var(policy->cpus); 1245e9698cc5SSrivatsa S. Bhat kfree(policy); 1246e9698cc5SSrivatsa S. Bhat } 1247e9698cc5SSrivatsa S. Bhat 124823faf0b7SViresh Kumar /** 124923faf0b7SViresh Kumar * cpufreq_add_dev - add a CPU device 125023faf0b7SViresh Kumar * 125123faf0b7SViresh Kumar * Adds the cpufreq interface for a CPU device. 125223faf0b7SViresh Kumar * 125323faf0b7SViresh Kumar * The Oracle says: try running cpufreq registration/unregistration concurrently 125423faf0b7SViresh Kumar * with with cpu hotplugging and all hell will break loose. Tried to clean this 125523faf0b7SViresh Kumar * mess up, but more thorough testing is needed. - Mathieu 125623faf0b7SViresh Kumar */ 125723faf0b7SViresh Kumar static int cpufreq_add_dev(struct device *dev, struct subsys_interface *sif) 12581da177e4SLinus Torvalds { 1259fcf80582SViresh Kumar unsigned int j, cpu = dev->id; 126065922465SViresh Kumar int ret = -ENOMEM; 12617f0c020aSViresh Kumar struct cpufreq_policy *policy; 12621da177e4SLinus Torvalds unsigned long flags; 126387549141SViresh Kumar bool recover_policy = !sif; 1264c32b6b8eSAshok Raj 12652d06d8c4SDominik Brodowski pr_debug("adding CPU %u\n", cpu); 12661da177e4SLinus Torvalds 1267*559ed407SRafael J. Wysocki if (cpu_is_offline(cpu)) { 126887549141SViresh Kumar /* 1269*559ed407SRafael J. Wysocki * Only possible if we are here from the subsys_interface add 1270*559ed407SRafael J. Wysocki * callback. A hotplug notifier will follow and we will handle 1271*559ed407SRafael J. Wysocki * it as CPU online then. For now, just create the sysfs link, 1272*559ed407SRafael J. Wysocki * unless there is no policy or the link is already present. 127387549141SViresh Kumar */ 1274*559ed407SRafael J. Wysocki policy = per_cpu(cpufreq_cpu_data, cpu); 1275*559ed407SRafael J. Wysocki return policy && !cpumask_test_and_set_cpu(cpu, policy->real_cpus) 1276*559ed407SRafael J. Wysocki ? add_cpu_dev_symlink(policy, cpu) : 0; 1277*559ed407SRafael J. Wysocki } 127887549141SViresh Kumar 12796eed9404SViresh Kumar if (!down_read_trylock(&cpufreq_rwsem)) 12806eed9404SViresh Kumar return 0; 12816eed9404SViresh Kumar 1282bb29ae15SViresh Kumar /* Check if this CPU already has a policy to manage it */ 12839104bb26SViresh Kumar policy = per_cpu(cpufreq_cpu_data, cpu); 12849104bb26SViresh Kumar if (policy && !policy_is_inactive(policy)) { 12859104bb26SViresh Kumar WARN_ON(!cpumask_test_cpu(cpu, policy->related_cpus)); 12867f0c020aSViresh Kumar ret = cpufreq_add_policy_cpu(policy, cpu, dev); 12876eed9404SViresh Kumar up_read(&cpufreq_rwsem); 12886eed9404SViresh Kumar return ret; 1289fcf80582SViresh Kumar } 12901da177e4SLinus Torvalds 129172368d12SRafael J. Wysocki /* 129272368d12SRafael J. Wysocki * Restore the saved policy when doing light-weight init and fall back 129372368d12SRafael J. Wysocki * to the full init if that fails. 129472368d12SRafael J. Wysocki */ 129596bbbe4aSViresh Kumar policy = recover_policy ? cpufreq_policy_restore(cpu) : NULL; 129672368d12SRafael J. Wysocki if (!policy) { 129796bbbe4aSViresh Kumar recover_policy = false; 12982fc3384dSViresh Kumar policy = cpufreq_policy_alloc(dev); 1299059019a3SDave Jones if (!policy) 13001da177e4SLinus Torvalds goto nomem_out; 130172368d12SRafael J. Wysocki } 13020d66b91eSSrivatsa S. Bhat 1303835481d9SRusty Russell cpumask_copy(policy->cpus, cpumask_of(cpu)); 13041da177e4SLinus Torvalds 13051da177e4SLinus Torvalds /* call driver. From then on the cpufreq must be able 13061da177e4SLinus Torvalds * to accept all calls to ->verify and ->setpolicy for this CPU 13071da177e4SLinus Torvalds */ 13081c3d85ddSRafael J. Wysocki ret = cpufreq_driver->init(policy); 13091da177e4SLinus Torvalds if (ret) { 13102d06d8c4SDominik Brodowski pr_debug("initialization failed\n"); 13112eaa3e2dSViresh Kumar goto err_set_policy_cpu; 13121da177e4SLinus Torvalds } 1313643ae6e8SViresh Kumar 13146d4e81edSTomeu Vizoso down_write(&policy->rwsem); 13156d4e81edSTomeu Vizoso 13165a7e56a5SViresh Kumar /* related cpus should atleast have policy->cpus */ 13175a7e56a5SViresh Kumar cpumask_or(policy->related_cpus, policy->related_cpus, policy->cpus); 13185a7e56a5SViresh Kumar 1319*559ed407SRafael J. Wysocki /* Remember which CPUs have been present at the policy creation time. */ 1320*559ed407SRafael J. Wysocki if (!recover_policy) 1321*559ed407SRafael J. Wysocki cpumask_and(policy->real_cpus, policy->cpus, cpu_present_mask); 1322*559ed407SRafael J. Wysocki 13235a7e56a5SViresh Kumar /* 13245a7e56a5SViresh Kumar * affected cpus must always be the one, which are online. We aren't 13255a7e56a5SViresh Kumar * managing offline cpus here. 13265a7e56a5SViresh Kumar */ 13275a7e56a5SViresh Kumar cpumask_and(policy->cpus, policy->cpus, cpu_online_mask); 13285a7e56a5SViresh Kumar 132996bbbe4aSViresh Kumar if (!recover_policy) { 13305a7e56a5SViresh Kumar policy->user_policy.min = policy->min; 13315a7e56a5SViresh Kumar policy->user_policy.max = policy->max; 13326d4e81edSTomeu Vizoso 1333652ed95dSViresh Kumar write_lock_irqsave(&cpufreq_driver_lock, flags); 1334988bed09SViresh Kumar for_each_cpu(j, policy->related_cpus) 1335652ed95dSViresh Kumar per_cpu(cpufreq_cpu_data, j) = policy; 1336652ed95dSViresh Kumar write_unlock_irqrestore(&cpufreq_driver_lock, flags); 1337988bed09SViresh Kumar } 1338652ed95dSViresh Kumar 13392ed99e39SRafael J. Wysocki if (cpufreq_driver->get && !cpufreq_driver->setpolicy) { 1340da60ce9fSViresh Kumar policy->cur = cpufreq_driver->get(policy->cpu); 1341da60ce9fSViresh Kumar if (!policy->cur) { 1342da60ce9fSViresh Kumar pr_err("%s: ->get() failed\n", __func__); 1343da60ce9fSViresh Kumar goto err_get_freq; 1344da60ce9fSViresh Kumar } 1345da60ce9fSViresh Kumar } 1346da60ce9fSViresh Kumar 1347d3916691SViresh Kumar /* 1348d3916691SViresh Kumar * Sometimes boot loaders set CPU frequency to a value outside of 1349d3916691SViresh Kumar * frequency table present with cpufreq core. In such cases CPU might be 1350d3916691SViresh Kumar * unstable if it has to run on that frequency for long duration of time 1351d3916691SViresh Kumar * and so its better to set it to a frequency which is specified in 1352d3916691SViresh Kumar * freq-table. This also makes cpufreq stats inconsistent as 1353d3916691SViresh Kumar * cpufreq-stats would fail to register because current frequency of CPU 1354d3916691SViresh Kumar * isn't found in freq-table. 1355d3916691SViresh Kumar * 1356d3916691SViresh Kumar * Because we don't want this change to effect boot process badly, we go 1357d3916691SViresh Kumar * for the next freq which is >= policy->cur ('cur' must be set by now, 1358d3916691SViresh Kumar * otherwise we will end up setting freq to lowest of the table as 'cur' 1359d3916691SViresh Kumar * is initialized to zero). 1360d3916691SViresh Kumar * 1361d3916691SViresh Kumar * We are passing target-freq as "policy->cur - 1" otherwise 1362d3916691SViresh Kumar * __cpufreq_driver_target() would simply fail, as policy->cur will be 1363d3916691SViresh Kumar * equal to target-freq. 1364d3916691SViresh Kumar */ 1365d3916691SViresh Kumar if ((cpufreq_driver->flags & CPUFREQ_NEED_INITIAL_FREQ_CHECK) 1366d3916691SViresh Kumar && has_target()) { 1367d3916691SViresh Kumar /* Are we running at unknown frequency ? */ 1368d3916691SViresh Kumar ret = cpufreq_frequency_table_get_index(policy, policy->cur); 1369d3916691SViresh Kumar if (ret == -EINVAL) { 1370d3916691SViresh Kumar /* Warn user and fix it */ 1371d3916691SViresh Kumar pr_warn("%s: CPU%d: Running at unlisted freq: %u KHz\n", 1372d3916691SViresh Kumar __func__, policy->cpu, policy->cur); 1373d3916691SViresh Kumar ret = __cpufreq_driver_target(policy, policy->cur - 1, 1374d3916691SViresh Kumar CPUFREQ_RELATION_L); 1375d3916691SViresh Kumar 1376d3916691SViresh Kumar /* 1377d3916691SViresh Kumar * Reaching here after boot in a few seconds may not 1378d3916691SViresh Kumar * mean that system will remain stable at "unknown" 1379d3916691SViresh Kumar * frequency for longer duration. Hence, a BUG_ON(). 1380d3916691SViresh Kumar */ 1381d3916691SViresh Kumar BUG_ON(ret); 1382d3916691SViresh Kumar pr_warn("%s: CPU%d: Unlisted initial frequency changed to: %u KHz\n", 1383d3916691SViresh Kumar __func__, policy->cpu, policy->cur); 1384d3916691SViresh Kumar } 1385d3916691SViresh Kumar } 1386d3916691SViresh Kumar 1387a1531acdSThomas Renninger blocking_notifier_call_chain(&cpufreq_policy_notifier_list, 1388a1531acdSThomas Renninger CPUFREQ_START, policy); 1389a1531acdSThomas Renninger 139096bbbe4aSViresh Kumar if (!recover_policy) { 1391308b60e7SViresh Kumar ret = cpufreq_add_dev_interface(policy, dev); 139219d6f7ecSDave Jones if (ret) 13930142f9dcSAhmed S. Darwish goto err_out_unregister; 1394fcd7af91SViresh Kumar blocking_notifier_call_chain(&cpufreq_policy_notifier_list, 1395fcd7af91SViresh Kumar CPUFREQ_CREATE_POLICY, policy); 1396c88a1f8bSLukasz Majewski 1397c88a1f8bSLukasz Majewski write_lock_irqsave(&cpufreq_driver_lock, flags); 1398c88a1f8bSLukasz Majewski list_add(&policy->policy_list, &cpufreq_policy_list); 1399c88a1f8bSLukasz Majewski write_unlock_irqrestore(&cpufreq_driver_lock, flags); 1400988bed09SViresh Kumar } 14018ff69732SDave Jones 1402e18f1682SSrivatsa S. Bhat cpufreq_init_policy(policy); 1403e18f1682SSrivatsa S. Bhat 140496bbbe4aSViresh Kumar if (!recover_policy) { 140508fd8c1cSViresh Kumar policy->user_policy.policy = policy->policy; 140608fd8c1cSViresh Kumar policy->user_policy.governor = policy->governor; 140708fd8c1cSViresh Kumar } 14084e97b631SViresh Kumar up_write(&policy->rwsem); 140908fd8c1cSViresh Kumar 1410038c5b3eSGreg Kroah-Hartman kobject_uevent(&policy->kobj, KOBJ_ADD); 14117c45cf31SViresh Kumar 14126eed9404SViresh Kumar up_read(&cpufreq_rwsem); 14136eed9404SViresh Kumar 14147c45cf31SViresh Kumar /* Callback for handling stuff after policy is ready */ 14157c45cf31SViresh Kumar if (cpufreq_driver->ready) 14167c45cf31SViresh Kumar cpufreq_driver->ready(policy); 14177c45cf31SViresh Kumar 14182d06d8c4SDominik Brodowski pr_debug("initialization complete\n"); 14191da177e4SLinus Torvalds 14201da177e4SLinus Torvalds return 0; 14211da177e4SLinus Torvalds 14221da177e4SLinus Torvalds err_out_unregister: 1423652ed95dSViresh Kumar err_get_freq: 14247106e02bSPrarit Bhargava up_write(&policy->rwsem); 14257106e02bSPrarit Bhargava 1426da60ce9fSViresh Kumar if (cpufreq_driver->exit) 1427da60ce9fSViresh Kumar cpufreq_driver->exit(policy); 14282eaa3e2dSViresh Kumar err_set_policy_cpu: 14293654c5ccSViresh Kumar cpufreq_policy_free(policy, recover_policy); 14301da177e4SLinus Torvalds nomem_out: 14316eed9404SViresh Kumar up_read(&cpufreq_rwsem); 14326eed9404SViresh Kumar 14331da177e4SLinus Torvalds return ret; 14341da177e4SLinus Torvalds } 14351da177e4SLinus Torvalds 1436*559ed407SRafael J. Wysocki static int __cpufreq_remove_dev_prepare(struct device *dev) 14371da177e4SLinus Torvalds { 14389591becbSViresh Kumar unsigned int cpu = dev->id; 14399591becbSViresh Kumar int ret = 0; 14403a3e9e06SViresh Kumar struct cpufreq_policy *policy; 14411da177e4SLinus Torvalds 1442b8eed8afSViresh Kumar pr_debug("%s: unregistering CPU %u\n", __func__, cpu); 14431da177e4SLinus Torvalds 1444988bed09SViresh Kumar policy = cpufreq_cpu_get_raw(cpu); 14453a3e9e06SViresh Kumar if (!policy) { 1446b8eed8afSViresh Kumar pr_debug("%s: No cpu_data found\n", __func__); 14471da177e4SLinus Torvalds return -EINVAL; 14481da177e4SLinus Torvalds } 14491da177e4SLinus Torvalds 14509c0ebcf7SViresh Kumar if (has_target()) { 14513de9bdebSViresh Kumar ret = __cpufreq_governor(policy, CPUFREQ_GOV_STOP); 1452*559ed407SRafael J. Wysocki if (ret) 14533de9bdebSViresh Kumar pr_err("%s: Failed to stop governor\n", __func__); 1454db5f2995SViresh Kumar } 14551da177e4SLinus Torvalds 14564573237bSViresh Kumar down_write(&policy->rwsem); 14579591becbSViresh Kumar cpumask_clear_cpu(cpu, policy->cpus); 14584573237bSViresh Kumar 14599591becbSViresh Kumar if (policy_is_inactive(policy)) { 14609591becbSViresh Kumar if (has_target()) 14614573237bSViresh Kumar strncpy(policy->last_governor, policy->governor->name, 14624573237bSViresh Kumar CPUFREQ_NAME_LEN); 14639591becbSViresh Kumar } else if (cpu == policy->cpu) { 14649591becbSViresh Kumar /* Nominate new CPU */ 14659591becbSViresh Kumar policy->cpu = cpumask_any(policy->cpus); 14669591becbSViresh Kumar } 14674573237bSViresh Kumar up_write(&policy->rwsem); 14681da177e4SLinus Torvalds 14699591becbSViresh Kumar /* Start governor again for active policy */ 14709591becbSViresh Kumar if (!policy_is_inactive(policy)) { 14719591becbSViresh Kumar if (has_target()) { 14729591becbSViresh Kumar ret = __cpufreq_governor(policy, CPUFREQ_GOV_START); 14739591becbSViresh Kumar if (!ret) 14749591becbSViresh Kumar ret = __cpufreq_governor(policy, CPUFREQ_GOV_LIMITS); 147587549141SViresh Kumar 14769591becbSViresh Kumar if (ret) 14779591becbSViresh Kumar pr_err("%s: Failed to start governor\n", __func__); 14789591becbSViresh Kumar } 14799591becbSViresh Kumar } else if (cpufreq_driver->stop_cpu) { 1480367dc4aaSDirk Brandewie cpufreq_driver->stop_cpu(policy); 14819591becbSViresh Kumar } 1482b8eed8afSViresh Kumar 14839591becbSViresh Kumar return ret; 1484cedb70afSSrivatsa S. Bhat } 1485cedb70afSSrivatsa S. Bhat 1486*559ed407SRafael J. Wysocki static int __cpufreq_remove_dev_finish(struct device *dev) 1487cedb70afSSrivatsa S. Bhat { 1488988bed09SViresh Kumar unsigned int cpu = dev->id; 1489cedb70afSSrivatsa S. Bhat int ret; 14909591becbSViresh Kumar struct cpufreq_policy *policy = per_cpu(cpufreq_cpu_data, cpu); 1491cedb70afSSrivatsa S. Bhat 1492cedb70afSSrivatsa S. Bhat if (!policy) { 1493cedb70afSSrivatsa S. Bhat pr_debug("%s: No cpu_data found\n", __func__); 1494cedb70afSSrivatsa S. Bhat return -EINVAL; 1495cedb70afSSrivatsa S. Bhat } 1496cedb70afSSrivatsa S. Bhat 14979591becbSViresh Kumar /* Only proceed for inactive policies */ 14989591becbSViresh Kumar if (!policy_is_inactive(policy)) 149987549141SViresh Kumar return 0; 150087549141SViresh Kumar 150187549141SViresh Kumar /* If cpu is last user of policy, free policy */ 150287549141SViresh Kumar if (has_target()) { 150387549141SViresh Kumar ret = __cpufreq_governor(policy, CPUFREQ_GOV_POLICY_EXIT); 1504*559ed407SRafael J. Wysocki if (ret) 150587549141SViresh Kumar pr_err("%s: Failed to exit governor\n", __func__); 15063de9bdebSViresh Kumar } 15072a998599SRafael J. Wysocki 15088414809cSSrivatsa S. Bhat /* 15098414809cSSrivatsa S. Bhat * Perform the ->exit() even during light-weight tear-down, 15108414809cSSrivatsa S. Bhat * since this is a core component, and is essential for the 15118414809cSSrivatsa S. Bhat * subsequent light-weight ->init() to succeed. 15128414809cSSrivatsa S. Bhat */ 15131c3d85ddSRafael J. Wysocki if (cpufreq_driver->exit) 15143a3e9e06SViresh Kumar cpufreq_driver->exit(policy); 151527ecddc2SJacob Shin 15161da177e4SLinus Torvalds return 0; 15171da177e4SLinus Torvalds } 15181da177e4SLinus Torvalds 1519cedb70afSSrivatsa S. Bhat /** 152027a862e9SViresh Kumar * cpufreq_remove_dev - remove a CPU device 1521cedb70afSSrivatsa S. Bhat * 1522cedb70afSSrivatsa S. Bhat * Removes the cpufreq interface for a CPU device. 1523cedb70afSSrivatsa S. Bhat */ 15248a25a2fdSKay Sievers static int cpufreq_remove_dev(struct device *dev, struct subsys_interface *sif) 15255a01f2e8SVenkatesh Pallipadi { 15268a25a2fdSKay Sievers unsigned int cpu = dev->id; 152787549141SViresh Kumar struct cpufreq_policy *policy = per_cpu(cpufreq_cpu_data, cpu); 152887549141SViresh Kumar 152987549141SViresh Kumar if (!policy) 1530ec28297aSVenki Pallipadi return 0; 1531ec28297aSVenki Pallipadi 1532*559ed407SRafael J. Wysocki if (cpu_online(cpu)) { 1533*559ed407SRafael J. Wysocki __cpufreq_remove_dev_prepare(dev); 1534*559ed407SRafael J. Wysocki __cpufreq_remove_dev_finish(dev); 153587549141SViresh Kumar } 153687549141SViresh Kumar 1537*559ed407SRafael J. Wysocki cpumask_clear_cpu(cpu, policy->real_cpus); 1538*559ed407SRafael J. Wysocki 1539*559ed407SRafael J. Wysocki if (cpumask_empty(policy->real_cpus)) { 15403654c5ccSViresh Kumar cpufreq_policy_free(policy, true); 154187549141SViresh Kumar return 0; 154287549141SViresh Kumar } 154387549141SViresh Kumar 1544*559ed407SRafael J. Wysocki if (cpu != policy->kobj_cpu) { 1545*559ed407SRafael J. Wysocki remove_cpu_dev_symlink(policy, cpu); 1546*559ed407SRafael J. Wysocki } else { 1547*559ed407SRafael J. Wysocki /* 1548*559ed407SRafael J. Wysocki * The CPU owning the policy object is going away. Move it to 1549*559ed407SRafael J. Wysocki * another suitable CPU. 1550*559ed407SRafael J. Wysocki */ 1551*559ed407SRafael J. Wysocki unsigned int new_cpu = cpumask_first(policy->real_cpus); 1552*559ed407SRafael J. Wysocki struct device *new_dev = get_cpu_device(new_cpu); 155327a862e9SViresh Kumar 1554*559ed407SRafael J. Wysocki dev_dbg(dev, "%s: Moving policy object to CPU%u\n", __func__, new_cpu); 155527a862e9SViresh Kumar 1556*559ed407SRafael J. Wysocki sysfs_remove_link(&new_dev->kobj, "cpufreq"); 1557*559ed407SRafael J. Wysocki policy->kobj_cpu = new_cpu; 1558*559ed407SRafael J. Wysocki WARN_ON(kobject_move(&policy->kobj, &new_dev->kobj)); 1559*559ed407SRafael J. Wysocki } 1560*559ed407SRafael J. Wysocki 1561*559ed407SRafael J. Wysocki return 0; 15625a01f2e8SVenkatesh Pallipadi } 15635a01f2e8SVenkatesh Pallipadi 156465f27f38SDavid Howells static void handle_update(struct work_struct *work) 15651da177e4SLinus Torvalds { 156665f27f38SDavid Howells struct cpufreq_policy *policy = 156765f27f38SDavid Howells container_of(work, struct cpufreq_policy, update); 156865f27f38SDavid Howells unsigned int cpu = policy->cpu; 15692d06d8c4SDominik Brodowski pr_debug("handle_update for cpu %u called\n", cpu); 15701da177e4SLinus Torvalds cpufreq_update_policy(cpu); 15711da177e4SLinus Torvalds } 15721da177e4SLinus Torvalds 15731da177e4SLinus Torvalds /** 1574bb176f7dSViresh Kumar * cpufreq_out_of_sync - If actual and saved CPU frequency differs, we're 1575bb176f7dSViresh Kumar * in deep trouble. 1576a1e1dc41SViresh Kumar * @policy: policy managing CPUs 15771da177e4SLinus Torvalds * @new_freq: CPU frequency the CPU actually runs at 15781da177e4SLinus Torvalds * 157929464f28SDave Jones * We adjust to current frequency first, and need to clean up later. 158029464f28SDave Jones * So either call to cpufreq_update_policy() or schedule handle_update()). 15811da177e4SLinus Torvalds */ 1582a1e1dc41SViresh Kumar static void cpufreq_out_of_sync(struct cpufreq_policy *policy, 1583e08f5f5bSGautham R Shenoy unsigned int new_freq) 15841da177e4SLinus Torvalds { 15851da177e4SLinus Torvalds struct cpufreq_freqs freqs; 1586b43a7ffbSViresh Kumar 1587e837f9b5SJoe Perches pr_debug("Warning: CPU frequency out of sync: cpufreq and timing core thinks of %u, is %u kHz\n", 1588a1e1dc41SViresh Kumar policy->cur, new_freq); 15891da177e4SLinus Torvalds 1590a1e1dc41SViresh Kumar freqs.old = policy->cur; 15911da177e4SLinus Torvalds freqs.new = new_freq; 1592b43a7ffbSViresh Kumar 15938fec051eSViresh Kumar cpufreq_freq_transition_begin(policy, &freqs); 15948fec051eSViresh Kumar cpufreq_freq_transition_end(policy, &freqs, 0); 15951da177e4SLinus Torvalds } 15961da177e4SLinus Torvalds 15971da177e4SLinus Torvalds /** 15984ab70df4SDhaval Giani * cpufreq_quick_get - get the CPU frequency (in kHz) from policy->cur 159995235ca2SVenkatesh Pallipadi * @cpu: CPU number 160095235ca2SVenkatesh Pallipadi * 160195235ca2SVenkatesh Pallipadi * This is the last known freq, without actually getting it from the driver. 160295235ca2SVenkatesh Pallipadi * Return value will be same as what is shown in scaling_cur_freq in sysfs. 160395235ca2SVenkatesh Pallipadi */ 160495235ca2SVenkatesh Pallipadi unsigned int cpufreq_quick_get(unsigned int cpu) 160595235ca2SVenkatesh Pallipadi { 16069e21ba8bSDirk Brandewie struct cpufreq_policy *policy; 1607e08f5f5bSGautham R Shenoy unsigned int ret_freq = 0; 160895235ca2SVenkatesh Pallipadi 16091c3d85ddSRafael J. Wysocki if (cpufreq_driver && cpufreq_driver->setpolicy && cpufreq_driver->get) 16101c3d85ddSRafael J. Wysocki return cpufreq_driver->get(cpu); 16119e21ba8bSDirk Brandewie 16129e21ba8bSDirk Brandewie policy = cpufreq_cpu_get(cpu); 161395235ca2SVenkatesh Pallipadi if (policy) { 1614e08f5f5bSGautham R Shenoy ret_freq = policy->cur; 161595235ca2SVenkatesh Pallipadi cpufreq_cpu_put(policy); 161695235ca2SVenkatesh Pallipadi } 161795235ca2SVenkatesh Pallipadi 16184d34a67dSDave Jones return ret_freq; 161995235ca2SVenkatesh Pallipadi } 162095235ca2SVenkatesh Pallipadi EXPORT_SYMBOL(cpufreq_quick_get); 162195235ca2SVenkatesh Pallipadi 16223d737108SJesse Barnes /** 16233d737108SJesse Barnes * cpufreq_quick_get_max - get the max reported CPU frequency for this CPU 16243d737108SJesse Barnes * @cpu: CPU number 16253d737108SJesse Barnes * 16263d737108SJesse Barnes * Just return the max possible frequency for a given CPU. 16273d737108SJesse Barnes */ 16283d737108SJesse Barnes unsigned int cpufreq_quick_get_max(unsigned int cpu) 16293d737108SJesse Barnes { 16303d737108SJesse Barnes struct cpufreq_policy *policy = cpufreq_cpu_get(cpu); 16313d737108SJesse Barnes unsigned int ret_freq = 0; 16323d737108SJesse Barnes 16333d737108SJesse Barnes if (policy) { 16343d737108SJesse Barnes ret_freq = policy->max; 16353d737108SJesse Barnes cpufreq_cpu_put(policy); 16363d737108SJesse Barnes } 16373d737108SJesse Barnes 16383d737108SJesse Barnes return ret_freq; 16393d737108SJesse Barnes } 16403d737108SJesse Barnes EXPORT_SYMBOL(cpufreq_quick_get_max); 16413d737108SJesse Barnes 1642d92d50a4SViresh Kumar static unsigned int __cpufreq_get(struct cpufreq_policy *policy) 16431da177e4SLinus Torvalds { 1644e08f5f5bSGautham R Shenoy unsigned int ret_freq = 0; 16451da177e4SLinus Torvalds 16461c3d85ddSRafael J. Wysocki if (!cpufreq_driver->get) 16474d34a67dSDave Jones return ret_freq; 16481da177e4SLinus Torvalds 1649d92d50a4SViresh Kumar ret_freq = cpufreq_driver->get(policy->cpu); 16501da177e4SLinus Torvalds 165111e584cfSViresh Kumar /* Updating inactive policies is invalid, so avoid doing that. */ 165211e584cfSViresh Kumar if (unlikely(policy_is_inactive(policy))) 165311e584cfSViresh Kumar return ret_freq; 165411e584cfSViresh Kumar 1655e08f5f5bSGautham R Shenoy if (ret_freq && policy->cur && 16561c3d85ddSRafael J. Wysocki !(cpufreq_driver->flags & CPUFREQ_CONST_LOOPS)) { 1657e08f5f5bSGautham R Shenoy /* verify no discrepancy between actual and 1658e08f5f5bSGautham R Shenoy saved value exists */ 1659e08f5f5bSGautham R Shenoy if (unlikely(ret_freq != policy->cur)) { 1660a1e1dc41SViresh Kumar cpufreq_out_of_sync(policy, ret_freq); 16611da177e4SLinus Torvalds schedule_work(&policy->update); 16621da177e4SLinus Torvalds } 16631da177e4SLinus Torvalds } 16641da177e4SLinus Torvalds 16654d34a67dSDave Jones return ret_freq; 16665a01f2e8SVenkatesh Pallipadi } 16671da177e4SLinus Torvalds 16685a01f2e8SVenkatesh Pallipadi /** 16695a01f2e8SVenkatesh Pallipadi * cpufreq_get - get the current CPU frequency (in kHz) 16705a01f2e8SVenkatesh Pallipadi * @cpu: CPU number 16715a01f2e8SVenkatesh Pallipadi * 16725a01f2e8SVenkatesh Pallipadi * Get the CPU current (static) CPU frequency 16735a01f2e8SVenkatesh Pallipadi */ 16745a01f2e8SVenkatesh Pallipadi unsigned int cpufreq_get(unsigned int cpu) 16755a01f2e8SVenkatesh Pallipadi { 1676999976e0SAaron Plattner struct cpufreq_policy *policy = cpufreq_cpu_get(cpu); 16775a01f2e8SVenkatesh Pallipadi unsigned int ret_freq = 0; 16785a01f2e8SVenkatesh Pallipadi 1679999976e0SAaron Plattner if (policy) { 1680ad7722daSviresh kumar down_read(&policy->rwsem); 1681d92d50a4SViresh Kumar ret_freq = __cpufreq_get(policy); 1682ad7722daSviresh kumar up_read(&policy->rwsem); 1683999976e0SAaron Plattner 1684999976e0SAaron Plattner cpufreq_cpu_put(policy); 1685999976e0SAaron Plattner } 16866eed9404SViresh Kumar 16874d34a67dSDave Jones return ret_freq; 16881da177e4SLinus Torvalds } 16891da177e4SLinus Torvalds EXPORT_SYMBOL(cpufreq_get); 16901da177e4SLinus Torvalds 16918a25a2fdSKay Sievers static struct subsys_interface cpufreq_interface = { 16928a25a2fdSKay Sievers .name = "cpufreq", 16938a25a2fdSKay Sievers .subsys = &cpu_subsys, 16948a25a2fdSKay Sievers .add_dev = cpufreq_add_dev, 16958a25a2fdSKay Sievers .remove_dev = cpufreq_remove_dev, 1696e00e56dfSRafael J. Wysocki }; 1697e00e56dfSRafael J. Wysocki 1698e28867eaSViresh Kumar /* 1699e28867eaSViresh Kumar * In case platform wants some specific frequency to be configured 1700e28867eaSViresh Kumar * during suspend.. 170142d4dc3fSBenjamin Herrenschmidt */ 1702e28867eaSViresh Kumar int cpufreq_generic_suspend(struct cpufreq_policy *policy) 170342d4dc3fSBenjamin Herrenschmidt { 1704e28867eaSViresh Kumar int ret; 17054bc5d341SDave Jones 1706e28867eaSViresh Kumar if (!policy->suspend_freq) { 1707e28867eaSViresh Kumar pr_err("%s: suspend_freq can't be zero\n", __func__); 1708e28867eaSViresh Kumar return -EINVAL; 170942d4dc3fSBenjamin Herrenschmidt } 171042d4dc3fSBenjamin Herrenschmidt 1711e28867eaSViresh Kumar pr_debug("%s: Setting suspend-freq: %u\n", __func__, 1712e28867eaSViresh Kumar policy->suspend_freq); 1713e28867eaSViresh Kumar 1714e28867eaSViresh Kumar ret = __cpufreq_driver_target(policy, policy->suspend_freq, 1715e28867eaSViresh Kumar CPUFREQ_RELATION_H); 1716e28867eaSViresh Kumar if (ret) 1717e28867eaSViresh Kumar pr_err("%s: unable to set suspend-freq: %u. err: %d\n", 1718e28867eaSViresh Kumar __func__, policy->suspend_freq, ret); 1719e28867eaSViresh Kumar 1720c9060494SDave Jones return ret; 172142d4dc3fSBenjamin Herrenschmidt } 1722e28867eaSViresh Kumar EXPORT_SYMBOL(cpufreq_generic_suspend); 172342d4dc3fSBenjamin Herrenschmidt 172442d4dc3fSBenjamin Herrenschmidt /** 17252f0aea93SViresh Kumar * cpufreq_suspend() - Suspend CPUFreq governors 17261da177e4SLinus Torvalds * 17272f0aea93SViresh Kumar * Called during system wide Suspend/Hibernate cycles for suspending governors 17282f0aea93SViresh Kumar * as some platforms can't change frequency after this point in suspend cycle. 17292f0aea93SViresh Kumar * Because some of the devices (like: i2c, regulators, etc) they use for 17302f0aea93SViresh Kumar * changing frequency are suspended quickly after this point. 17311da177e4SLinus Torvalds */ 17322f0aea93SViresh Kumar void cpufreq_suspend(void) 17331da177e4SLinus Torvalds { 17343a3e9e06SViresh Kumar struct cpufreq_policy *policy; 17351da177e4SLinus Torvalds 17362f0aea93SViresh Kumar if (!cpufreq_driver) 1737e00e56dfSRafael J. Wysocki return; 17381da177e4SLinus Torvalds 17392f0aea93SViresh Kumar if (!has_target()) 1740b1b12babSViresh Kumar goto suspend; 17411da177e4SLinus Torvalds 17422f0aea93SViresh Kumar pr_debug("%s: Suspending Governors\n", __func__); 17432f0aea93SViresh Kumar 1744f963735aSViresh Kumar for_each_active_policy(policy) { 17452f0aea93SViresh Kumar if (__cpufreq_governor(policy, CPUFREQ_GOV_STOP)) 17462f0aea93SViresh Kumar pr_err("%s: Failed to stop governor for policy: %p\n", 17472f0aea93SViresh Kumar __func__, policy); 17482f0aea93SViresh Kumar else if (cpufreq_driver->suspend 17492f0aea93SViresh Kumar && cpufreq_driver->suspend(policy)) 17502f0aea93SViresh Kumar pr_err("%s: Failed to suspend driver: %p\n", __func__, 17512f0aea93SViresh Kumar policy); 17521da177e4SLinus Torvalds } 1753b1b12babSViresh Kumar 1754b1b12babSViresh Kumar suspend: 1755b1b12babSViresh Kumar cpufreq_suspended = true; 17561da177e4SLinus Torvalds } 17571da177e4SLinus Torvalds 17581da177e4SLinus Torvalds /** 17592f0aea93SViresh Kumar * cpufreq_resume() - Resume CPUFreq governors 17601da177e4SLinus Torvalds * 17612f0aea93SViresh Kumar * Called during system wide Suspend/Hibernate cycle for resuming governors that 17622f0aea93SViresh Kumar * are suspended with cpufreq_suspend(). 17631da177e4SLinus Torvalds */ 17642f0aea93SViresh Kumar void cpufreq_resume(void) 17651da177e4SLinus Torvalds { 17661da177e4SLinus Torvalds struct cpufreq_policy *policy; 17671da177e4SLinus Torvalds 17682f0aea93SViresh Kumar if (!cpufreq_driver) 17691da177e4SLinus Torvalds return; 17701da177e4SLinus Torvalds 17718e30444eSLan Tianyu cpufreq_suspended = false; 17728e30444eSLan Tianyu 17732f0aea93SViresh Kumar if (!has_target()) 17742f0aea93SViresh Kumar return; 17751da177e4SLinus Torvalds 17762f0aea93SViresh Kumar pr_debug("%s: Resuming Governors\n", __func__); 17772f0aea93SViresh Kumar 1778f963735aSViresh Kumar for_each_active_policy(policy) { 17790c5aa405SViresh Kumar if (cpufreq_driver->resume && cpufreq_driver->resume(policy)) 17800c5aa405SViresh Kumar pr_err("%s: Failed to resume driver: %p\n", __func__, 17810c5aa405SViresh Kumar policy); 17820c5aa405SViresh Kumar else if (__cpufreq_governor(policy, CPUFREQ_GOV_START) 17832f0aea93SViresh Kumar || __cpufreq_governor(policy, CPUFREQ_GOV_LIMITS)) 17842f0aea93SViresh Kumar pr_err("%s: Failed to start governor for policy: %p\n", 17852f0aea93SViresh Kumar __func__, policy); 1786c75de0acSViresh Kumar } 17872f0aea93SViresh Kumar 17882f0aea93SViresh Kumar /* 1789c75de0acSViresh Kumar * schedule call cpufreq_update_policy() for first-online CPU, as that 1790c75de0acSViresh Kumar * wouldn't be hotplugged-out on suspend. It will verify that the 1791c75de0acSViresh Kumar * current freq is in sync with what we believe it to be. 17922f0aea93SViresh Kumar */ 1793c75de0acSViresh Kumar policy = cpufreq_cpu_get_raw(cpumask_first(cpu_online_mask)); 1794c75de0acSViresh Kumar if (WARN_ON(!policy)) 1795c75de0acSViresh Kumar return; 1796c75de0acSViresh Kumar 17973a3e9e06SViresh Kumar schedule_work(&policy->update); 17981da177e4SLinus Torvalds } 17991da177e4SLinus Torvalds 18009d95046eSBorislav Petkov /** 18019d95046eSBorislav Petkov * cpufreq_get_current_driver - return current driver's name 18029d95046eSBorislav Petkov * 18039d95046eSBorislav Petkov * Return the name string of the currently loaded cpufreq driver 18049d95046eSBorislav Petkov * or NULL, if none. 18059d95046eSBorislav Petkov */ 18069d95046eSBorislav Petkov const char *cpufreq_get_current_driver(void) 18079d95046eSBorislav Petkov { 18081c3d85ddSRafael J. Wysocki if (cpufreq_driver) 18091c3d85ddSRafael J. Wysocki return cpufreq_driver->name; 18101c3d85ddSRafael J. Wysocki 18111c3d85ddSRafael J. Wysocki return NULL; 18129d95046eSBorislav Petkov } 18139d95046eSBorislav Petkov EXPORT_SYMBOL_GPL(cpufreq_get_current_driver); 18141da177e4SLinus Torvalds 181551315cdfSThomas Petazzoni /** 181651315cdfSThomas Petazzoni * cpufreq_get_driver_data - return current driver data 181751315cdfSThomas Petazzoni * 181851315cdfSThomas Petazzoni * Return the private data of the currently loaded cpufreq 181951315cdfSThomas Petazzoni * driver, or NULL if no cpufreq driver is loaded. 182051315cdfSThomas Petazzoni */ 182151315cdfSThomas Petazzoni void *cpufreq_get_driver_data(void) 182251315cdfSThomas Petazzoni { 182351315cdfSThomas Petazzoni if (cpufreq_driver) 182451315cdfSThomas Petazzoni return cpufreq_driver->driver_data; 182551315cdfSThomas Petazzoni 182651315cdfSThomas Petazzoni return NULL; 182751315cdfSThomas Petazzoni } 182851315cdfSThomas Petazzoni EXPORT_SYMBOL_GPL(cpufreq_get_driver_data); 182951315cdfSThomas Petazzoni 18301da177e4SLinus Torvalds /********************************************************************* 18311da177e4SLinus Torvalds * NOTIFIER LISTS INTERFACE * 18321da177e4SLinus Torvalds *********************************************************************/ 18331da177e4SLinus Torvalds 18341da177e4SLinus Torvalds /** 18351da177e4SLinus Torvalds * cpufreq_register_notifier - register a driver with cpufreq 18361da177e4SLinus Torvalds * @nb: notifier function to register 18371da177e4SLinus Torvalds * @list: CPUFREQ_TRANSITION_NOTIFIER or CPUFREQ_POLICY_NOTIFIER 18381da177e4SLinus Torvalds * 18391da177e4SLinus Torvalds * Add a driver to one of two lists: either a list of drivers that 18401da177e4SLinus Torvalds * are notified about clock rate changes (once before and once after 18411da177e4SLinus Torvalds * the transition), or a list of drivers that are notified about 18421da177e4SLinus Torvalds * changes in cpufreq policy. 18431da177e4SLinus Torvalds * 18441da177e4SLinus Torvalds * This function may sleep, and has the same return conditions as 1845e041c683SAlan Stern * blocking_notifier_chain_register. 18461da177e4SLinus Torvalds */ 18471da177e4SLinus Torvalds int cpufreq_register_notifier(struct notifier_block *nb, unsigned int list) 18481da177e4SLinus Torvalds { 18491da177e4SLinus Torvalds int ret; 18501da177e4SLinus Torvalds 1851d5aaffa9SDirk Brandewie if (cpufreq_disabled()) 1852d5aaffa9SDirk Brandewie return -EINVAL; 1853d5aaffa9SDirk Brandewie 185474212ca4SCesar Eduardo Barros WARN_ON(!init_cpufreq_transition_notifier_list_called); 185574212ca4SCesar Eduardo Barros 18561da177e4SLinus Torvalds switch (list) { 18571da177e4SLinus Torvalds case CPUFREQ_TRANSITION_NOTIFIER: 1858b4dfdbb3SAlan Stern ret = srcu_notifier_chain_register( 1859e041c683SAlan Stern &cpufreq_transition_notifier_list, nb); 18601da177e4SLinus Torvalds break; 18611da177e4SLinus Torvalds case CPUFREQ_POLICY_NOTIFIER: 1862e041c683SAlan Stern ret = blocking_notifier_chain_register( 1863e041c683SAlan Stern &cpufreq_policy_notifier_list, nb); 18641da177e4SLinus Torvalds break; 18651da177e4SLinus Torvalds default: 18661da177e4SLinus Torvalds ret = -EINVAL; 18671da177e4SLinus Torvalds } 18681da177e4SLinus Torvalds 18691da177e4SLinus Torvalds return ret; 18701da177e4SLinus Torvalds } 18711da177e4SLinus Torvalds EXPORT_SYMBOL(cpufreq_register_notifier); 18721da177e4SLinus Torvalds 18731da177e4SLinus Torvalds /** 18741da177e4SLinus Torvalds * cpufreq_unregister_notifier - unregister a driver with cpufreq 18751da177e4SLinus Torvalds * @nb: notifier block to be unregistered 18761da177e4SLinus Torvalds * @list: CPUFREQ_TRANSITION_NOTIFIER or CPUFREQ_POLICY_NOTIFIER 18771da177e4SLinus Torvalds * 18781da177e4SLinus Torvalds * Remove a driver from the CPU frequency notifier list. 18791da177e4SLinus Torvalds * 18801da177e4SLinus Torvalds * This function may sleep, and has the same return conditions as 1881e041c683SAlan Stern * blocking_notifier_chain_unregister. 18821da177e4SLinus Torvalds */ 18831da177e4SLinus Torvalds int cpufreq_unregister_notifier(struct notifier_block *nb, unsigned int list) 18841da177e4SLinus Torvalds { 18851da177e4SLinus Torvalds int ret; 18861da177e4SLinus Torvalds 1887d5aaffa9SDirk Brandewie if (cpufreq_disabled()) 1888d5aaffa9SDirk Brandewie return -EINVAL; 1889d5aaffa9SDirk Brandewie 18901da177e4SLinus Torvalds switch (list) { 18911da177e4SLinus Torvalds case CPUFREQ_TRANSITION_NOTIFIER: 1892b4dfdbb3SAlan Stern ret = srcu_notifier_chain_unregister( 1893e041c683SAlan Stern &cpufreq_transition_notifier_list, nb); 18941da177e4SLinus Torvalds break; 18951da177e4SLinus Torvalds case CPUFREQ_POLICY_NOTIFIER: 1896e041c683SAlan Stern ret = blocking_notifier_chain_unregister( 1897e041c683SAlan Stern &cpufreq_policy_notifier_list, nb); 18981da177e4SLinus Torvalds break; 18991da177e4SLinus Torvalds default: 19001da177e4SLinus Torvalds ret = -EINVAL; 19011da177e4SLinus Torvalds } 19021da177e4SLinus Torvalds 19031da177e4SLinus Torvalds return ret; 19041da177e4SLinus Torvalds } 19051da177e4SLinus Torvalds EXPORT_SYMBOL(cpufreq_unregister_notifier); 19061da177e4SLinus Torvalds 19071da177e4SLinus Torvalds 19081da177e4SLinus Torvalds /********************************************************************* 19091da177e4SLinus Torvalds * GOVERNORS * 19101da177e4SLinus Torvalds *********************************************************************/ 19111da177e4SLinus Torvalds 19121c03a2d0SViresh Kumar /* Must set freqs->new to intermediate frequency */ 19131c03a2d0SViresh Kumar static int __target_intermediate(struct cpufreq_policy *policy, 19141c03a2d0SViresh Kumar struct cpufreq_freqs *freqs, int index) 19151c03a2d0SViresh Kumar { 19161c03a2d0SViresh Kumar int ret; 19171c03a2d0SViresh Kumar 19181c03a2d0SViresh Kumar freqs->new = cpufreq_driver->get_intermediate(policy, index); 19191c03a2d0SViresh Kumar 19201c03a2d0SViresh Kumar /* We don't need to switch to intermediate freq */ 19211c03a2d0SViresh Kumar if (!freqs->new) 19221c03a2d0SViresh Kumar return 0; 19231c03a2d0SViresh Kumar 19241c03a2d0SViresh Kumar pr_debug("%s: cpu: %d, switching to intermediate freq: oldfreq: %u, intermediate freq: %u\n", 19251c03a2d0SViresh Kumar __func__, policy->cpu, freqs->old, freqs->new); 19261c03a2d0SViresh Kumar 19271c03a2d0SViresh Kumar cpufreq_freq_transition_begin(policy, freqs); 19281c03a2d0SViresh Kumar ret = cpufreq_driver->target_intermediate(policy, index); 19291c03a2d0SViresh Kumar cpufreq_freq_transition_end(policy, freqs, ret); 19301c03a2d0SViresh Kumar 19311c03a2d0SViresh Kumar if (ret) 19321c03a2d0SViresh Kumar pr_err("%s: Failed to change to intermediate frequency: %d\n", 19331c03a2d0SViresh Kumar __func__, ret); 19341c03a2d0SViresh Kumar 19351c03a2d0SViresh Kumar return ret; 19361c03a2d0SViresh Kumar } 19371c03a2d0SViresh Kumar 19388d65775dSViresh Kumar static int __target_index(struct cpufreq_policy *policy, 19398d65775dSViresh Kumar struct cpufreq_frequency_table *freq_table, int index) 19408d65775dSViresh Kumar { 19411c03a2d0SViresh Kumar struct cpufreq_freqs freqs = {.old = policy->cur, .flags = 0}; 19421c03a2d0SViresh Kumar unsigned int intermediate_freq = 0; 19438d65775dSViresh Kumar int retval = -EINVAL; 19448d65775dSViresh Kumar bool notify; 19458d65775dSViresh Kumar 19468d65775dSViresh Kumar notify = !(cpufreq_driver->flags & CPUFREQ_ASYNC_NOTIFICATION); 19478d65775dSViresh Kumar if (notify) { 19481c03a2d0SViresh Kumar /* Handle switching to intermediate frequency */ 19491c03a2d0SViresh Kumar if (cpufreq_driver->get_intermediate) { 19501c03a2d0SViresh Kumar retval = __target_intermediate(policy, &freqs, index); 19511c03a2d0SViresh Kumar if (retval) 19521c03a2d0SViresh Kumar return retval; 19538d65775dSViresh Kumar 19541c03a2d0SViresh Kumar intermediate_freq = freqs.new; 19551c03a2d0SViresh Kumar /* Set old freq to intermediate */ 19561c03a2d0SViresh Kumar if (intermediate_freq) 19571c03a2d0SViresh Kumar freqs.old = freqs.new; 19581c03a2d0SViresh Kumar } 19591c03a2d0SViresh Kumar 19601c03a2d0SViresh Kumar freqs.new = freq_table[index].frequency; 19618d65775dSViresh Kumar pr_debug("%s: cpu: %d, oldfreq: %u, new freq: %u\n", 19628d65775dSViresh Kumar __func__, policy->cpu, freqs.old, freqs.new); 19638d65775dSViresh Kumar 19648d65775dSViresh Kumar cpufreq_freq_transition_begin(policy, &freqs); 19658d65775dSViresh Kumar } 19668d65775dSViresh Kumar 19678d65775dSViresh Kumar retval = cpufreq_driver->target_index(policy, index); 19688d65775dSViresh Kumar if (retval) 19698d65775dSViresh Kumar pr_err("%s: Failed to change cpu frequency: %d\n", __func__, 19708d65775dSViresh Kumar retval); 19718d65775dSViresh Kumar 19721c03a2d0SViresh Kumar if (notify) { 19738d65775dSViresh Kumar cpufreq_freq_transition_end(policy, &freqs, retval); 19748d65775dSViresh Kumar 19751c03a2d0SViresh Kumar /* 19761c03a2d0SViresh Kumar * Failed after setting to intermediate freq? Driver should have 19771c03a2d0SViresh Kumar * reverted back to initial frequency and so should we. Check 19781c03a2d0SViresh Kumar * here for intermediate_freq instead of get_intermediate, in 197958405af6SShailendra Verma * case we haven't switched to intermediate freq at all. 19801c03a2d0SViresh Kumar */ 19811c03a2d0SViresh Kumar if (unlikely(retval && intermediate_freq)) { 19821c03a2d0SViresh Kumar freqs.old = intermediate_freq; 19831c03a2d0SViresh Kumar freqs.new = policy->restore_freq; 19841c03a2d0SViresh Kumar cpufreq_freq_transition_begin(policy, &freqs); 19851c03a2d0SViresh Kumar cpufreq_freq_transition_end(policy, &freqs, 0); 19861c03a2d0SViresh Kumar } 19871c03a2d0SViresh Kumar } 19881c03a2d0SViresh Kumar 19898d65775dSViresh Kumar return retval; 19908d65775dSViresh Kumar } 19918d65775dSViresh Kumar 19921da177e4SLinus Torvalds int __cpufreq_driver_target(struct cpufreq_policy *policy, 19931da177e4SLinus Torvalds unsigned int target_freq, 19941da177e4SLinus Torvalds unsigned int relation) 19951da177e4SLinus Torvalds { 19967249924eSViresh Kumar unsigned int old_target_freq = target_freq; 19978d65775dSViresh Kumar int retval = -EINVAL; 1998c32b6b8eSAshok Raj 1999a7b422cdSKonrad Rzeszutek Wilk if (cpufreq_disabled()) 2000a7b422cdSKonrad Rzeszutek Wilk return -ENODEV; 2001a7b422cdSKonrad Rzeszutek Wilk 20027249924eSViresh Kumar /* Make sure that target_freq is within supported range */ 20037249924eSViresh Kumar if (target_freq > policy->max) 20047249924eSViresh Kumar target_freq = policy->max; 20057249924eSViresh Kumar if (target_freq < policy->min) 20067249924eSViresh Kumar target_freq = policy->min; 20077249924eSViresh Kumar 20087249924eSViresh Kumar pr_debug("target for CPU %u: %u kHz, relation %u, requested %u kHz\n", 20097249924eSViresh Kumar policy->cpu, target_freq, relation, old_target_freq); 20105a1c0228SViresh Kumar 20119c0ebcf7SViresh Kumar /* 20129c0ebcf7SViresh Kumar * This might look like a redundant call as we are checking it again 20139c0ebcf7SViresh Kumar * after finding index. But it is left intentionally for cases where 20149c0ebcf7SViresh Kumar * exactly same freq is called again and so we can save on few function 20159c0ebcf7SViresh Kumar * calls. 20169c0ebcf7SViresh Kumar */ 20175a1c0228SViresh Kumar if (target_freq == policy->cur) 20185a1c0228SViresh Kumar return 0; 20195a1c0228SViresh Kumar 20201c03a2d0SViresh Kumar /* Save last value to restore later on errors */ 20211c03a2d0SViresh Kumar policy->restore_freq = policy->cur; 20221c03a2d0SViresh Kumar 20231c3d85ddSRafael J. Wysocki if (cpufreq_driver->target) 20241c3d85ddSRafael J. Wysocki retval = cpufreq_driver->target(policy, target_freq, relation); 20259c0ebcf7SViresh Kumar else if (cpufreq_driver->target_index) { 20269c0ebcf7SViresh Kumar struct cpufreq_frequency_table *freq_table; 20279c0ebcf7SViresh Kumar int index; 202890d45d17SAshok Raj 20299c0ebcf7SViresh Kumar freq_table = cpufreq_frequency_get_table(policy->cpu); 20309c0ebcf7SViresh Kumar if (unlikely(!freq_table)) { 20319c0ebcf7SViresh Kumar pr_err("%s: Unable to find freq_table\n", __func__); 20329c0ebcf7SViresh Kumar goto out; 20339c0ebcf7SViresh Kumar } 20349c0ebcf7SViresh Kumar 20359c0ebcf7SViresh Kumar retval = cpufreq_frequency_table_target(policy, freq_table, 20369c0ebcf7SViresh Kumar target_freq, relation, &index); 20379c0ebcf7SViresh Kumar if (unlikely(retval)) { 20389c0ebcf7SViresh Kumar pr_err("%s: Unable to find matching freq\n", __func__); 20399c0ebcf7SViresh Kumar goto out; 20409c0ebcf7SViresh Kumar } 20419c0ebcf7SViresh Kumar 2042d4019f0aSViresh Kumar if (freq_table[index].frequency == policy->cur) { 20439c0ebcf7SViresh Kumar retval = 0; 2044d4019f0aSViresh Kumar goto out; 2045d4019f0aSViresh Kumar } 2046d4019f0aSViresh Kumar 20478d65775dSViresh Kumar retval = __target_index(policy, freq_table, index); 20489c0ebcf7SViresh Kumar } 20499c0ebcf7SViresh Kumar 20509c0ebcf7SViresh Kumar out: 20511da177e4SLinus Torvalds return retval; 20521da177e4SLinus Torvalds } 20531da177e4SLinus Torvalds EXPORT_SYMBOL_GPL(__cpufreq_driver_target); 20541da177e4SLinus Torvalds 20551da177e4SLinus Torvalds int cpufreq_driver_target(struct cpufreq_policy *policy, 20561da177e4SLinus Torvalds unsigned int target_freq, 20571da177e4SLinus Torvalds unsigned int relation) 20581da177e4SLinus Torvalds { 2059f1829e4aSJulia Lawall int ret = -EINVAL; 20601da177e4SLinus Torvalds 2061ad7722daSviresh kumar down_write(&policy->rwsem); 20621da177e4SLinus Torvalds 20631da177e4SLinus Torvalds ret = __cpufreq_driver_target(policy, target_freq, relation); 20641da177e4SLinus Torvalds 2065ad7722daSviresh kumar up_write(&policy->rwsem); 20661da177e4SLinus Torvalds 20671da177e4SLinus Torvalds return ret; 20681da177e4SLinus Torvalds } 20691da177e4SLinus Torvalds EXPORT_SYMBOL_GPL(cpufreq_driver_target); 20701da177e4SLinus Torvalds 2071e08f5f5bSGautham R Shenoy static int __cpufreq_governor(struct cpufreq_policy *policy, 2072e08f5f5bSGautham R Shenoy unsigned int event) 20731da177e4SLinus Torvalds { 2074cc993cabSDave Jones int ret; 20756afde10cSThomas Renninger 20766afde10cSThomas Renninger /* Only must be defined when default governor is known to have latency 20776afde10cSThomas Renninger restrictions, like e.g. conservative or ondemand. 20786afde10cSThomas Renninger That this is the case is already ensured in Kconfig 20796afde10cSThomas Renninger */ 20806afde10cSThomas Renninger #ifdef CONFIG_CPU_FREQ_GOV_PERFORMANCE 20816afde10cSThomas Renninger struct cpufreq_governor *gov = &cpufreq_gov_performance; 20826afde10cSThomas Renninger #else 20836afde10cSThomas Renninger struct cpufreq_governor *gov = NULL; 20846afde10cSThomas Renninger #endif 20851c256245SThomas Renninger 20862f0aea93SViresh Kumar /* Don't start any governor operations if we are entering suspend */ 20872f0aea93SViresh Kumar if (cpufreq_suspended) 20882f0aea93SViresh Kumar return 0; 2089cb57720bSEthan Zhao /* 2090cb57720bSEthan Zhao * Governor might not be initiated here if ACPI _PPC changed 2091cb57720bSEthan Zhao * notification happened, so check it. 2092cb57720bSEthan Zhao */ 2093cb57720bSEthan Zhao if (!policy->governor) 2094cb57720bSEthan Zhao return -EINVAL; 20952f0aea93SViresh Kumar 20961c256245SThomas Renninger if (policy->governor->max_transition_latency && 20971c256245SThomas Renninger policy->cpuinfo.transition_latency > 20981c256245SThomas Renninger policy->governor->max_transition_latency) { 20996afde10cSThomas Renninger if (!gov) 21006afde10cSThomas Renninger return -EINVAL; 21016afde10cSThomas Renninger else { 2102e837f9b5SJoe Perches pr_warn("%s governor failed, too long transition latency of HW, fallback to %s governor\n", 2103e837f9b5SJoe Perches policy->governor->name, gov->name); 21041c256245SThomas Renninger policy->governor = gov; 21051c256245SThomas Renninger } 21066afde10cSThomas Renninger } 21071da177e4SLinus Torvalds 2108fe492f3fSViresh Kumar if (event == CPUFREQ_GOV_POLICY_INIT) 21091da177e4SLinus Torvalds if (!try_module_get(policy->governor->owner)) 21101da177e4SLinus Torvalds return -EINVAL; 21111da177e4SLinus Torvalds 21122d06d8c4SDominik Brodowski pr_debug("__cpufreq_governor for CPU %u, event %u\n", 2113e08f5f5bSGautham R Shenoy policy->cpu, event); 211495731ebbSXiaoguang Chen 211595731ebbSXiaoguang Chen mutex_lock(&cpufreq_governor_lock); 211656d07db2SSrivatsa S. Bhat if ((policy->governor_enabled && event == CPUFREQ_GOV_START) 2117f73d3933SViresh Kumar || (!policy->governor_enabled 2118f73d3933SViresh Kumar && (event == CPUFREQ_GOV_LIMITS || event == CPUFREQ_GOV_STOP))) { 211995731ebbSXiaoguang Chen mutex_unlock(&cpufreq_governor_lock); 212095731ebbSXiaoguang Chen return -EBUSY; 212195731ebbSXiaoguang Chen } 212295731ebbSXiaoguang Chen 212395731ebbSXiaoguang Chen if (event == CPUFREQ_GOV_STOP) 212495731ebbSXiaoguang Chen policy->governor_enabled = false; 212595731ebbSXiaoguang Chen else if (event == CPUFREQ_GOV_START) 212695731ebbSXiaoguang Chen policy->governor_enabled = true; 212795731ebbSXiaoguang Chen 212895731ebbSXiaoguang Chen mutex_unlock(&cpufreq_governor_lock); 212995731ebbSXiaoguang Chen 21301da177e4SLinus Torvalds ret = policy->governor->governor(policy, event); 21311da177e4SLinus Torvalds 21324d5dcc42SViresh Kumar if (!ret) { 21334d5dcc42SViresh Kumar if (event == CPUFREQ_GOV_POLICY_INIT) 21348e53695fSViresh Kumar policy->governor->initialized++; 21354d5dcc42SViresh Kumar else if (event == CPUFREQ_GOV_POLICY_EXIT) 21368e53695fSViresh Kumar policy->governor->initialized--; 213795731ebbSXiaoguang Chen } else { 213895731ebbSXiaoguang Chen /* Restore original values */ 213995731ebbSXiaoguang Chen mutex_lock(&cpufreq_governor_lock); 214095731ebbSXiaoguang Chen if (event == CPUFREQ_GOV_STOP) 214195731ebbSXiaoguang Chen policy->governor_enabled = true; 214295731ebbSXiaoguang Chen else if (event == CPUFREQ_GOV_START) 214395731ebbSXiaoguang Chen policy->governor_enabled = false; 214495731ebbSXiaoguang Chen mutex_unlock(&cpufreq_governor_lock); 21454d5dcc42SViresh Kumar } 2146b394058fSViresh Kumar 2147fe492f3fSViresh Kumar if (((event == CPUFREQ_GOV_POLICY_INIT) && ret) || 2148fe492f3fSViresh Kumar ((event == CPUFREQ_GOV_POLICY_EXIT) && !ret)) 21491da177e4SLinus Torvalds module_put(policy->governor->owner); 21501da177e4SLinus Torvalds 21511da177e4SLinus Torvalds return ret; 21521da177e4SLinus Torvalds } 21531da177e4SLinus Torvalds 21541da177e4SLinus Torvalds int cpufreq_register_governor(struct cpufreq_governor *governor) 21551da177e4SLinus Torvalds { 21563bcb09a3SJeremy Fitzhardinge int err; 21571da177e4SLinus Torvalds 21581da177e4SLinus Torvalds if (!governor) 21591da177e4SLinus Torvalds return -EINVAL; 21601da177e4SLinus Torvalds 2161a7b422cdSKonrad Rzeszutek Wilk if (cpufreq_disabled()) 2162a7b422cdSKonrad Rzeszutek Wilk return -ENODEV; 2163a7b422cdSKonrad Rzeszutek Wilk 21643fc54d37Sakpm@osdl.org mutex_lock(&cpufreq_governor_mutex); 21651da177e4SLinus Torvalds 2166b394058fSViresh Kumar governor->initialized = 0; 21673bcb09a3SJeremy Fitzhardinge err = -EBUSY; 216842f91fa1SViresh Kumar if (!find_governor(governor->name)) { 21693bcb09a3SJeremy Fitzhardinge err = 0; 21701da177e4SLinus Torvalds list_add(&governor->governor_list, &cpufreq_governor_list); 21713bcb09a3SJeremy Fitzhardinge } 21721da177e4SLinus Torvalds 21733fc54d37Sakpm@osdl.org mutex_unlock(&cpufreq_governor_mutex); 21743bcb09a3SJeremy Fitzhardinge return err; 21751da177e4SLinus Torvalds } 21761da177e4SLinus Torvalds EXPORT_SYMBOL_GPL(cpufreq_register_governor); 21771da177e4SLinus Torvalds 21781da177e4SLinus Torvalds void cpufreq_unregister_governor(struct cpufreq_governor *governor) 21791da177e4SLinus Torvalds { 21804573237bSViresh Kumar struct cpufreq_policy *policy; 21814573237bSViresh Kumar unsigned long flags; 218290e41bacSPrarit Bhargava 21831da177e4SLinus Torvalds if (!governor) 21841da177e4SLinus Torvalds return; 21851da177e4SLinus Torvalds 2186a7b422cdSKonrad Rzeszutek Wilk if (cpufreq_disabled()) 2187a7b422cdSKonrad Rzeszutek Wilk return; 2188a7b422cdSKonrad Rzeszutek Wilk 21894573237bSViresh Kumar /* clear last_governor for all inactive policies */ 21904573237bSViresh Kumar read_lock_irqsave(&cpufreq_driver_lock, flags); 21914573237bSViresh Kumar for_each_inactive_policy(policy) { 219218bf3a12SViresh Kumar if (!strcmp(policy->last_governor, governor->name)) { 219318bf3a12SViresh Kumar policy->governor = NULL; 21944573237bSViresh Kumar strcpy(policy->last_governor, "\0"); 219590e41bacSPrarit Bhargava } 219618bf3a12SViresh Kumar } 21974573237bSViresh Kumar read_unlock_irqrestore(&cpufreq_driver_lock, flags); 219890e41bacSPrarit Bhargava 21993fc54d37Sakpm@osdl.org mutex_lock(&cpufreq_governor_mutex); 22001da177e4SLinus Torvalds list_del(&governor->governor_list); 22013fc54d37Sakpm@osdl.org mutex_unlock(&cpufreq_governor_mutex); 22021da177e4SLinus Torvalds return; 22031da177e4SLinus Torvalds } 22041da177e4SLinus Torvalds EXPORT_SYMBOL_GPL(cpufreq_unregister_governor); 22051da177e4SLinus Torvalds 22061da177e4SLinus Torvalds 22071da177e4SLinus Torvalds /********************************************************************* 22081da177e4SLinus Torvalds * POLICY INTERFACE * 22091da177e4SLinus Torvalds *********************************************************************/ 22101da177e4SLinus Torvalds 22111da177e4SLinus Torvalds /** 22121da177e4SLinus Torvalds * cpufreq_get_policy - get the current cpufreq_policy 221329464f28SDave Jones * @policy: struct cpufreq_policy into which the current cpufreq_policy 221429464f28SDave Jones * is written 22151da177e4SLinus Torvalds * 22161da177e4SLinus Torvalds * Reads the current cpufreq policy. 22171da177e4SLinus Torvalds */ 22181da177e4SLinus Torvalds int cpufreq_get_policy(struct cpufreq_policy *policy, unsigned int cpu) 22191da177e4SLinus Torvalds { 22201da177e4SLinus Torvalds struct cpufreq_policy *cpu_policy; 22211da177e4SLinus Torvalds if (!policy) 22221da177e4SLinus Torvalds return -EINVAL; 22231da177e4SLinus Torvalds 22241da177e4SLinus Torvalds cpu_policy = cpufreq_cpu_get(cpu); 22251da177e4SLinus Torvalds if (!cpu_policy) 22261da177e4SLinus Torvalds return -EINVAL; 22271da177e4SLinus Torvalds 2228d5b73cd8SViresh Kumar memcpy(policy, cpu_policy, sizeof(*policy)); 22291da177e4SLinus Torvalds 22301da177e4SLinus Torvalds cpufreq_cpu_put(cpu_policy); 22311da177e4SLinus Torvalds return 0; 22321da177e4SLinus Torvalds } 22331da177e4SLinus Torvalds EXPORT_SYMBOL(cpufreq_get_policy); 22341da177e4SLinus Torvalds 2235153d7f3fSArjan van de Ven /* 2236037ce839SViresh Kumar * policy : current policy. 2237037ce839SViresh Kumar * new_policy: policy to be set. 2238153d7f3fSArjan van de Ven */ 2239037ce839SViresh Kumar static int cpufreq_set_policy(struct cpufreq_policy *policy, 22403a3e9e06SViresh Kumar struct cpufreq_policy *new_policy) 22411da177e4SLinus Torvalds { 2242d9a789c7SRafael J. Wysocki struct cpufreq_governor *old_gov; 2243d9a789c7SRafael J. Wysocki int ret; 22441da177e4SLinus Torvalds 2245e837f9b5SJoe Perches pr_debug("setting new policy for CPU %u: %u - %u kHz\n", 2246e837f9b5SJoe Perches new_policy->cpu, new_policy->min, new_policy->max); 22471da177e4SLinus Torvalds 2248d5b73cd8SViresh Kumar memcpy(&new_policy->cpuinfo, &policy->cpuinfo, sizeof(policy->cpuinfo)); 22491da177e4SLinus Torvalds 2250d9a789c7SRafael J. Wysocki if (new_policy->min > policy->max || new_policy->max < policy->min) 2251d9a789c7SRafael J. Wysocki return -EINVAL; 22529c9a43edSMattia Dongili 22531da177e4SLinus Torvalds /* verify the cpu speed can be set within this limit */ 22543a3e9e06SViresh Kumar ret = cpufreq_driver->verify(new_policy); 22551da177e4SLinus Torvalds if (ret) 2256d9a789c7SRafael J. Wysocki return ret; 22571da177e4SLinus Torvalds 22581da177e4SLinus Torvalds /* adjust if necessary - all reasons */ 2259e041c683SAlan Stern blocking_notifier_call_chain(&cpufreq_policy_notifier_list, 22603a3e9e06SViresh Kumar CPUFREQ_ADJUST, new_policy); 22611da177e4SLinus Torvalds 22621da177e4SLinus Torvalds /* adjust if necessary - hardware incompatibility*/ 2263e041c683SAlan Stern blocking_notifier_call_chain(&cpufreq_policy_notifier_list, 22643a3e9e06SViresh Kumar CPUFREQ_INCOMPATIBLE, new_policy); 22651da177e4SLinus Torvalds 2266bb176f7dSViresh Kumar /* 2267bb176f7dSViresh Kumar * verify the cpu speed can be set within this limit, which might be 2268bb176f7dSViresh Kumar * different to the first one 2269bb176f7dSViresh Kumar */ 22703a3e9e06SViresh Kumar ret = cpufreq_driver->verify(new_policy); 2271e041c683SAlan Stern if (ret) 2272d9a789c7SRafael J. Wysocki return ret; 22731da177e4SLinus Torvalds 22741da177e4SLinus Torvalds /* notification of the new policy */ 2275e041c683SAlan Stern blocking_notifier_call_chain(&cpufreq_policy_notifier_list, 22763a3e9e06SViresh Kumar CPUFREQ_NOTIFY, new_policy); 22771da177e4SLinus Torvalds 22783a3e9e06SViresh Kumar policy->min = new_policy->min; 22793a3e9e06SViresh Kumar policy->max = new_policy->max; 22801da177e4SLinus Torvalds 22812d06d8c4SDominik Brodowski pr_debug("new min and max freqs are %u - %u kHz\n", 22823a3e9e06SViresh Kumar policy->min, policy->max); 22831da177e4SLinus Torvalds 22841c3d85ddSRafael J. Wysocki if (cpufreq_driver->setpolicy) { 22853a3e9e06SViresh Kumar policy->policy = new_policy->policy; 22862d06d8c4SDominik Brodowski pr_debug("setting range\n"); 2287d9a789c7SRafael J. Wysocki return cpufreq_driver->setpolicy(new_policy); 2288d9a789c7SRafael J. Wysocki } 2289d9a789c7SRafael J. Wysocki 2290d9a789c7SRafael J. Wysocki if (new_policy->governor == policy->governor) 2291d9a789c7SRafael J. Wysocki goto out; 22921da177e4SLinus Torvalds 22932d06d8c4SDominik Brodowski pr_debug("governor switch\n"); 22941da177e4SLinus Torvalds 2295d9a789c7SRafael J. Wysocki /* save old, working values */ 2296d9a789c7SRafael J. Wysocki old_gov = policy->governor; 22971da177e4SLinus Torvalds /* end old governor */ 2298d9a789c7SRafael J. Wysocki if (old_gov) { 22993a3e9e06SViresh Kumar __cpufreq_governor(policy, CPUFREQ_GOV_STOP); 2300ad7722daSviresh kumar up_write(&policy->rwsem); 2301d9a789c7SRafael J. Wysocki __cpufreq_governor(policy, CPUFREQ_GOV_POLICY_EXIT); 2302ad7722daSviresh kumar down_write(&policy->rwsem); 23037bd353a9SViresh Kumar } 23041da177e4SLinus Torvalds 23051da177e4SLinus Torvalds /* start new governor */ 23063a3e9e06SViresh Kumar policy->governor = new_policy->governor; 23073a3e9e06SViresh Kumar if (!__cpufreq_governor(policy, CPUFREQ_GOV_POLICY_INIT)) { 2308d9a789c7SRafael J. Wysocki if (!__cpufreq_governor(policy, CPUFREQ_GOV_START)) 2309d9a789c7SRafael J. Wysocki goto out; 2310d9a789c7SRafael J. Wysocki 2311ad7722daSviresh kumar up_write(&policy->rwsem); 2312d9a789c7SRafael J. Wysocki __cpufreq_governor(policy, CPUFREQ_GOV_POLICY_EXIT); 2313ad7722daSviresh kumar down_write(&policy->rwsem); 2314955ef483SViresh Kumar } 23157bd353a9SViresh Kumar 23161da177e4SLinus Torvalds /* new governor failed, so re-start old one */ 2317d9a789c7SRafael J. Wysocki pr_debug("starting governor %s failed\n", policy->governor->name); 23181da177e4SLinus Torvalds if (old_gov) { 23193a3e9e06SViresh Kumar policy->governor = old_gov; 2320d9a789c7SRafael J. Wysocki __cpufreq_governor(policy, CPUFREQ_GOV_POLICY_INIT); 2321d9a789c7SRafael J. Wysocki __cpufreq_governor(policy, CPUFREQ_GOV_START); 23221da177e4SLinus Torvalds } 23231da177e4SLinus Torvalds 2324d9a789c7SRafael J. Wysocki return -EINVAL; 2325d9a789c7SRafael J. Wysocki 2326d9a789c7SRafael J. Wysocki out: 2327d9a789c7SRafael J. Wysocki pr_debug("governor: change or update limits\n"); 2328d9a789c7SRafael J. Wysocki return __cpufreq_governor(policy, CPUFREQ_GOV_LIMITS); 23291da177e4SLinus Torvalds } 23301da177e4SLinus Torvalds 23311da177e4SLinus Torvalds /** 23321da177e4SLinus Torvalds * cpufreq_update_policy - re-evaluate an existing cpufreq policy 23331da177e4SLinus Torvalds * @cpu: CPU which shall be re-evaluated 23341da177e4SLinus Torvalds * 233525985edcSLucas De Marchi * Useful for policy notifiers which have different necessities 23361da177e4SLinus Torvalds * at different times. 23371da177e4SLinus Torvalds */ 23381da177e4SLinus Torvalds int cpufreq_update_policy(unsigned int cpu) 23391da177e4SLinus Torvalds { 23403a3e9e06SViresh Kumar struct cpufreq_policy *policy = cpufreq_cpu_get(cpu); 23413a3e9e06SViresh Kumar struct cpufreq_policy new_policy; 2342f1829e4aSJulia Lawall int ret; 23431da177e4SLinus Torvalds 2344fefa8ff8SAaron Plattner if (!policy) 2345fefa8ff8SAaron Plattner return -ENODEV; 23461da177e4SLinus Torvalds 2347ad7722daSviresh kumar down_write(&policy->rwsem); 23481da177e4SLinus Torvalds 23492d06d8c4SDominik Brodowski pr_debug("updating policy for CPU %u\n", cpu); 2350d5b73cd8SViresh Kumar memcpy(&new_policy, policy, sizeof(*policy)); 23513a3e9e06SViresh Kumar new_policy.min = policy->user_policy.min; 23523a3e9e06SViresh Kumar new_policy.max = policy->user_policy.max; 23533a3e9e06SViresh Kumar new_policy.policy = policy->user_policy.policy; 23543a3e9e06SViresh Kumar new_policy.governor = policy->user_policy.governor; 23551da177e4SLinus Torvalds 2356bb176f7dSViresh Kumar /* 2357bb176f7dSViresh Kumar * BIOS might change freq behind our back 2358bb176f7dSViresh Kumar * -> ask driver for current freq and notify governors about a change 2359bb176f7dSViresh Kumar */ 23602ed99e39SRafael J. Wysocki if (cpufreq_driver->get && !cpufreq_driver->setpolicy) { 23613a3e9e06SViresh Kumar new_policy.cur = cpufreq_driver->get(cpu); 2362bd0fa9bbSViresh Kumar if (WARN_ON(!new_policy.cur)) { 2363bd0fa9bbSViresh Kumar ret = -EIO; 2364fefa8ff8SAaron Plattner goto unlock; 2365bd0fa9bbSViresh Kumar } 2366bd0fa9bbSViresh Kumar 23673a3e9e06SViresh Kumar if (!policy->cur) { 2368e837f9b5SJoe Perches pr_debug("Driver did not initialize current freq\n"); 23693a3e9e06SViresh Kumar policy->cur = new_policy.cur; 2370a85f7bd3SThomas Renninger } else { 23719c0ebcf7SViresh Kumar if (policy->cur != new_policy.cur && has_target()) 2372a1e1dc41SViresh Kumar cpufreq_out_of_sync(policy, new_policy.cur); 23730961dd0dSThomas Renninger } 2374a85f7bd3SThomas Renninger } 23750961dd0dSThomas Renninger 2376037ce839SViresh Kumar ret = cpufreq_set_policy(policy, &new_policy); 23771da177e4SLinus Torvalds 2378fefa8ff8SAaron Plattner unlock: 2379ad7722daSviresh kumar up_write(&policy->rwsem); 23805a01f2e8SVenkatesh Pallipadi 23813a3e9e06SViresh Kumar cpufreq_cpu_put(policy); 23821da177e4SLinus Torvalds return ret; 23831da177e4SLinus Torvalds } 23841da177e4SLinus Torvalds EXPORT_SYMBOL(cpufreq_update_policy); 23851da177e4SLinus Torvalds 23862760984fSPaul Gortmaker static int cpufreq_cpu_callback(struct notifier_block *nfb, 2387c32b6b8eSAshok Raj unsigned long action, void *hcpu) 2388c32b6b8eSAshok Raj { 2389c32b6b8eSAshok Raj unsigned int cpu = (unsigned long)hcpu; 23908a25a2fdSKay Sievers struct device *dev; 2391c32b6b8eSAshok Raj 23928a25a2fdSKay Sievers dev = get_cpu_device(cpu); 23938a25a2fdSKay Sievers if (dev) { 23945302c3fbSSrivatsa S. Bhat switch (action & ~CPU_TASKS_FROZEN) { 2395c32b6b8eSAshok Raj case CPU_ONLINE: 239623faf0b7SViresh Kumar cpufreq_add_dev(dev, NULL); 2397c32b6b8eSAshok Raj break; 23985302c3fbSSrivatsa S. Bhat 2399c32b6b8eSAshok Raj case CPU_DOWN_PREPARE: 2400*559ed407SRafael J. Wysocki __cpufreq_remove_dev_prepare(dev); 24011aee40acSSrivatsa S. Bhat break; 24021aee40acSSrivatsa S. Bhat 24031aee40acSSrivatsa S. Bhat case CPU_POST_DEAD: 2404*559ed407SRafael J. Wysocki __cpufreq_remove_dev_finish(dev); 2405c32b6b8eSAshok Raj break; 24065302c3fbSSrivatsa S. Bhat 24075a01f2e8SVenkatesh Pallipadi case CPU_DOWN_FAILED: 240823faf0b7SViresh Kumar cpufreq_add_dev(dev, NULL); 2409c32b6b8eSAshok Raj break; 2410c32b6b8eSAshok Raj } 2411c32b6b8eSAshok Raj } 2412c32b6b8eSAshok Raj return NOTIFY_OK; 2413c32b6b8eSAshok Raj } 2414c32b6b8eSAshok Raj 24159c36f746SNeal Buckendahl static struct notifier_block __refdata cpufreq_cpu_notifier = { 2416c32b6b8eSAshok Raj .notifier_call = cpufreq_cpu_callback, 2417c32b6b8eSAshok Raj }; 24181da177e4SLinus Torvalds 24191da177e4SLinus Torvalds /********************************************************************* 24206f19efc0SLukasz Majewski * BOOST * 24216f19efc0SLukasz Majewski *********************************************************************/ 24226f19efc0SLukasz Majewski static int cpufreq_boost_set_sw(int state) 24236f19efc0SLukasz Majewski { 24246f19efc0SLukasz Majewski struct cpufreq_frequency_table *freq_table; 24256f19efc0SLukasz Majewski struct cpufreq_policy *policy; 24266f19efc0SLukasz Majewski int ret = -EINVAL; 24276f19efc0SLukasz Majewski 2428f963735aSViresh Kumar for_each_active_policy(policy) { 24296f19efc0SLukasz Majewski freq_table = cpufreq_frequency_get_table(policy->cpu); 24306f19efc0SLukasz Majewski if (freq_table) { 24316f19efc0SLukasz Majewski ret = cpufreq_frequency_table_cpuinfo(policy, 24326f19efc0SLukasz Majewski freq_table); 24336f19efc0SLukasz Majewski if (ret) { 24346f19efc0SLukasz Majewski pr_err("%s: Policy frequency update failed\n", 24356f19efc0SLukasz Majewski __func__); 24366f19efc0SLukasz Majewski break; 24376f19efc0SLukasz Majewski } 24386f19efc0SLukasz Majewski policy->user_policy.max = policy->max; 24396f19efc0SLukasz Majewski __cpufreq_governor(policy, CPUFREQ_GOV_LIMITS); 24406f19efc0SLukasz Majewski } 24416f19efc0SLukasz Majewski } 24426f19efc0SLukasz Majewski 24436f19efc0SLukasz Majewski return ret; 24446f19efc0SLukasz Majewski } 24456f19efc0SLukasz Majewski 24466f19efc0SLukasz Majewski int cpufreq_boost_trigger_state(int state) 24476f19efc0SLukasz Majewski { 24486f19efc0SLukasz Majewski unsigned long flags; 24496f19efc0SLukasz Majewski int ret = 0; 24506f19efc0SLukasz Majewski 24516f19efc0SLukasz Majewski if (cpufreq_driver->boost_enabled == state) 24526f19efc0SLukasz Majewski return 0; 24536f19efc0SLukasz Majewski 24546f19efc0SLukasz Majewski write_lock_irqsave(&cpufreq_driver_lock, flags); 24556f19efc0SLukasz Majewski cpufreq_driver->boost_enabled = state; 24566f19efc0SLukasz Majewski write_unlock_irqrestore(&cpufreq_driver_lock, flags); 24576f19efc0SLukasz Majewski 24586f19efc0SLukasz Majewski ret = cpufreq_driver->set_boost(state); 24596f19efc0SLukasz Majewski if (ret) { 24606f19efc0SLukasz Majewski write_lock_irqsave(&cpufreq_driver_lock, flags); 24616f19efc0SLukasz Majewski cpufreq_driver->boost_enabled = !state; 24626f19efc0SLukasz Majewski write_unlock_irqrestore(&cpufreq_driver_lock, flags); 24636f19efc0SLukasz Majewski 2464e837f9b5SJoe Perches pr_err("%s: Cannot %s BOOST\n", 2465e837f9b5SJoe Perches __func__, state ? "enable" : "disable"); 24666f19efc0SLukasz Majewski } 24676f19efc0SLukasz Majewski 24686f19efc0SLukasz Majewski return ret; 24696f19efc0SLukasz Majewski } 24706f19efc0SLukasz Majewski 24716f19efc0SLukasz Majewski int cpufreq_boost_supported(void) 24726f19efc0SLukasz Majewski { 24736f19efc0SLukasz Majewski if (likely(cpufreq_driver)) 24746f19efc0SLukasz Majewski return cpufreq_driver->boost_supported; 24756f19efc0SLukasz Majewski 24766f19efc0SLukasz Majewski return 0; 24776f19efc0SLukasz Majewski } 24786f19efc0SLukasz Majewski EXPORT_SYMBOL_GPL(cpufreq_boost_supported); 24796f19efc0SLukasz Majewski 24806f19efc0SLukasz Majewski int cpufreq_boost_enabled(void) 24816f19efc0SLukasz Majewski { 24826f19efc0SLukasz Majewski return cpufreq_driver->boost_enabled; 24836f19efc0SLukasz Majewski } 24846f19efc0SLukasz Majewski EXPORT_SYMBOL_GPL(cpufreq_boost_enabled); 24856f19efc0SLukasz Majewski 24866f19efc0SLukasz Majewski /********************************************************************* 24871da177e4SLinus Torvalds * REGISTER / UNREGISTER CPUFREQ DRIVER * 24881da177e4SLinus Torvalds *********************************************************************/ 24891da177e4SLinus Torvalds 24901da177e4SLinus Torvalds /** 24911da177e4SLinus Torvalds * cpufreq_register_driver - register a CPU Frequency driver 24921da177e4SLinus Torvalds * @driver_data: A struct cpufreq_driver containing the values# 24931da177e4SLinus Torvalds * submitted by the CPU Frequency driver. 24941da177e4SLinus Torvalds * 24951da177e4SLinus Torvalds * Registers a CPU Frequency driver to this core code. This code 24961da177e4SLinus Torvalds * returns zero on success, -EBUSY when another driver got here first 24971da177e4SLinus Torvalds * (and isn't unregistered in the meantime). 24981da177e4SLinus Torvalds * 24991da177e4SLinus Torvalds */ 2500221dee28SLinus Torvalds int cpufreq_register_driver(struct cpufreq_driver *driver_data) 25011da177e4SLinus Torvalds { 25021da177e4SLinus Torvalds unsigned long flags; 25031da177e4SLinus Torvalds int ret; 25041da177e4SLinus Torvalds 2505a7b422cdSKonrad Rzeszutek Wilk if (cpufreq_disabled()) 2506a7b422cdSKonrad Rzeszutek Wilk return -ENODEV; 2507a7b422cdSKonrad Rzeszutek Wilk 25081da177e4SLinus Torvalds if (!driver_data || !driver_data->verify || !driver_data->init || 25099c0ebcf7SViresh Kumar !(driver_data->setpolicy || driver_data->target_index || 25109832235fSRafael J. Wysocki driver_data->target) || 25119832235fSRafael J. Wysocki (driver_data->setpolicy && (driver_data->target_index || 25121c03a2d0SViresh Kumar driver_data->target)) || 25131c03a2d0SViresh Kumar (!!driver_data->get_intermediate != !!driver_data->target_intermediate)) 25141da177e4SLinus Torvalds return -EINVAL; 25151da177e4SLinus Torvalds 25162d06d8c4SDominik Brodowski pr_debug("trying to register driver %s\n", driver_data->name); 25171da177e4SLinus Torvalds 25180d1857a1SNathan Zimmer write_lock_irqsave(&cpufreq_driver_lock, flags); 25191c3d85ddSRafael J. Wysocki if (cpufreq_driver) { 25200d1857a1SNathan Zimmer write_unlock_irqrestore(&cpufreq_driver_lock, flags); 25214dea5806SYinghai Lu return -EEXIST; 25221da177e4SLinus Torvalds } 25231c3d85ddSRafael J. Wysocki cpufreq_driver = driver_data; 25240d1857a1SNathan Zimmer write_unlock_irqrestore(&cpufreq_driver_lock, flags); 25251da177e4SLinus Torvalds 2526bc68b7dfSViresh Kumar if (driver_data->setpolicy) 2527bc68b7dfSViresh Kumar driver_data->flags |= CPUFREQ_CONST_LOOPS; 2528bc68b7dfSViresh Kumar 25296f19efc0SLukasz Majewski if (cpufreq_boost_supported()) { 25306f19efc0SLukasz Majewski /* 25316f19efc0SLukasz Majewski * Check if driver provides function to enable boost - 25326f19efc0SLukasz Majewski * if not, use cpufreq_boost_set_sw as default 25336f19efc0SLukasz Majewski */ 25346f19efc0SLukasz Majewski if (!cpufreq_driver->set_boost) 25356f19efc0SLukasz Majewski cpufreq_driver->set_boost = cpufreq_boost_set_sw; 25366f19efc0SLukasz Majewski 25376f19efc0SLukasz Majewski ret = cpufreq_sysfs_create_file(&boost.attr); 25386f19efc0SLukasz Majewski if (ret) { 25396f19efc0SLukasz Majewski pr_err("%s: cannot register global BOOST sysfs file\n", 25406f19efc0SLukasz Majewski __func__); 25416f19efc0SLukasz Majewski goto err_null_driver; 25426f19efc0SLukasz Majewski } 25436f19efc0SLukasz Majewski } 25446f19efc0SLukasz Majewski 25458a25a2fdSKay Sievers ret = subsys_interface_register(&cpufreq_interface); 25468f5bc2abSJiri Slaby if (ret) 25476f19efc0SLukasz Majewski goto err_boost_unreg; 25481da177e4SLinus Torvalds 2549ce1bcfe9SViresh Kumar if (!(cpufreq_driver->flags & CPUFREQ_STICKY) && 2550ce1bcfe9SViresh Kumar list_empty(&cpufreq_policy_list)) { 25511da177e4SLinus Torvalds /* if all ->init() calls failed, unregister */ 2552ce1bcfe9SViresh Kumar pr_debug("%s: No CPU initialized for driver %s\n", __func__, 2553e08f5f5bSGautham R Shenoy driver_data->name); 25548a25a2fdSKay Sievers goto err_if_unreg; 25551da177e4SLinus Torvalds } 25561da177e4SLinus Torvalds 255765edc68cSChandra Seetharaman register_hotcpu_notifier(&cpufreq_cpu_notifier); 25582d06d8c4SDominik Brodowski pr_debug("driver %s up and running\n", driver_data->name); 25591da177e4SLinus Torvalds 25608f5bc2abSJiri Slaby return 0; 25618a25a2fdSKay Sievers err_if_unreg: 25628a25a2fdSKay Sievers subsys_interface_unregister(&cpufreq_interface); 25636f19efc0SLukasz Majewski err_boost_unreg: 25646f19efc0SLukasz Majewski if (cpufreq_boost_supported()) 25656f19efc0SLukasz Majewski cpufreq_sysfs_remove_file(&boost.attr); 25668f5bc2abSJiri Slaby err_null_driver: 25670d1857a1SNathan Zimmer write_lock_irqsave(&cpufreq_driver_lock, flags); 25681c3d85ddSRafael J. Wysocki cpufreq_driver = NULL; 25690d1857a1SNathan Zimmer write_unlock_irqrestore(&cpufreq_driver_lock, flags); 25704d34a67dSDave Jones return ret; 25711da177e4SLinus Torvalds } 25721da177e4SLinus Torvalds EXPORT_SYMBOL_GPL(cpufreq_register_driver); 25731da177e4SLinus Torvalds 25741da177e4SLinus Torvalds /** 25751da177e4SLinus Torvalds * cpufreq_unregister_driver - unregister the current CPUFreq driver 25761da177e4SLinus Torvalds * 25771da177e4SLinus Torvalds * Unregister the current CPUFreq driver. Only call this if you have 25781da177e4SLinus Torvalds * the right to do so, i.e. if you have succeeded in initialising before! 25791da177e4SLinus Torvalds * Returns zero if successful, and -EINVAL if the cpufreq_driver is 25801da177e4SLinus Torvalds * currently not initialised. 25811da177e4SLinus Torvalds */ 2582221dee28SLinus Torvalds int cpufreq_unregister_driver(struct cpufreq_driver *driver) 25831da177e4SLinus Torvalds { 25841da177e4SLinus Torvalds unsigned long flags; 25851da177e4SLinus Torvalds 25861c3d85ddSRafael J. Wysocki if (!cpufreq_driver || (driver != cpufreq_driver)) 25871da177e4SLinus Torvalds return -EINVAL; 25881da177e4SLinus Torvalds 25892d06d8c4SDominik Brodowski pr_debug("unregistering driver %s\n", driver->name); 25901da177e4SLinus Torvalds 25918a25a2fdSKay Sievers subsys_interface_unregister(&cpufreq_interface); 25926f19efc0SLukasz Majewski if (cpufreq_boost_supported()) 25936f19efc0SLukasz Majewski cpufreq_sysfs_remove_file(&boost.attr); 25946f19efc0SLukasz Majewski 259565edc68cSChandra Seetharaman unregister_hotcpu_notifier(&cpufreq_cpu_notifier); 25961da177e4SLinus Torvalds 25976eed9404SViresh Kumar down_write(&cpufreq_rwsem); 25980d1857a1SNathan Zimmer write_lock_irqsave(&cpufreq_driver_lock, flags); 25996eed9404SViresh Kumar 26001c3d85ddSRafael J. Wysocki cpufreq_driver = NULL; 26016eed9404SViresh Kumar 26020d1857a1SNathan Zimmer write_unlock_irqrestore(&cpufreq_driver_lock, flags); 26036eed9404SViresh Kumar up_write(&cpufreq_rwsem); 26041da177e4SLinus Torvalds 26051da177e4SLinus Torvalds return 0; 26061da177e4SLinus Torvalds } 26071da177e4SLinus Torvalds EXPORT_SYMBOL_GPL(cpufreq_unregister_driver); 26085a01f2e8SVenkatesh Pallipadi 260990de2a4aSDoug Anderson /* 261090de2a4aSDoug Anderson * Stop cpufreq at shutdown to make sure it isn't holding any locks 261190de2a4aSDoug Anderson * or mutexes when secondary CPUs are halted. 261290de2a4aSDoug Anderson */ 261390de2a4aSDoug Anderson static struct syscore_ops cpufreq_syscore_ops = { 261490de2a4aSDoug Anderson .shutdown = cpufreq_suspend, 261590de2a4aSDoug Anderson }; 261690de2a4aSDoug Anderson 26175a01f2e8SVenkatesh Pallipadi static int __init cpufreq_core_init(void) 26185a01f2e8SVenkatesh Pallipadi { 2619a7b422cdSKonrad Rzeszutek Wilk if (cpufreq_disabled()) 2620a7b422cdSKonrad Rzeszutek Wilk return -ENODEV; 2621a7b422cdSKonrad Rzeszutek Wilk 26222361be23SViresh Kumar cpufreq_global_kobject = kobject_create(); 26238aa84ad8SThomas Renninger BUG_ON(!cpufreq_global_kobject); 26248aa84ad8SThomas Renninger 262590de2a4aSDoug Anderson register_syscore_ops(&cpufreq_syscore_ops); 262690de2a4aSDoug Anderson 26275a01f2e8SVenkatesh Pallipadi return 0; 26285a01f2e8SVenkatesh Pallipadi } 26295a01f2e8SVenkatesh Pallipadi core_initcall(cpufreq_core_init); 2630