11da177e4SLinus Torvalds /* 21da177e4SLinus Torvalds * linux/drivers/cpufreq/cpufreq.c 31da177e4SLinus Torvalds * 41da177e4SLinus Torvalds * Copyright (C) 2001 Russell King 51da177e4SLinus Torvalds * (C) 2002 - 2003 Dominik Brodowski <linux@brodo.de> 6bb176f7dSViresh Kumar * (C) 2013 Viresh Kumar <viresh.kumar@linaro.org> 71da177e4SLinus Torvalds * 8c32b6b8eSAshok Raj * Oct 2005 - Ashok Raj <ashok.raj@intel.com> 9c32b6b8eSAshok Raj * Added handling for CPU hotplug 108ff69732SDave Jones * Feb 2006 - Jacob Shin <jacob.shin@amd.com> 118ff69732SDave Jones * Fix handling for CPU hotplug -- affected CPUs 12c32b6b8eSAshok Raj * 131da177e4SLinus Torvalds * This program is free software; you can redistribute it and/or modify 141da177e4SLinus Torvalds * it under the terms of the GNU General Public License version 2 as 151da177e4SLinus Torvalds * published by the Free Software Foundation. 161da177e4SLinus Torvalds */ 171da177e4SLinus Torvalds 18db701151SViresh Kumar #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 19db701151SViresh Kumar 205ff0a268SViresh Kumar #include <linux/cpu.h> 211da177e4SLinus Torvalds #include <linux/cpufreq.h> 221da177e4SLinus Torvalds #include <linux/delay.h> 231da177e4SLinus Torvalds #include <linux/device.h> 245ff0a268SViresh Kumar #include <linux/init.h> 255ff0a268SViresh Kumar #include <linux/kernel_stat.h> 265ff0a268SViresh Kumar #include <linux/module.h> 273fc54d37Sakpm@osdl.org #include <linux/mutex.h> 285ff0a268SViresh Kumar #include <linux/slab.h> 292f0aea93SViresh Kumar #include <linux/suspend.h> 3090de2a4aSDoug Anderson #include <linux/syscore_ops.h> 315ff0a268SViresh Kumar #include <linux/tick.h> 326f4f2723SThomas Renninger #include <trace/events/power.h> 336f4f2723SThomas Renninger 34b4f0676fSViresh Kumar static LIST_HEAD(cpufreq_policy_list); 35f963735aSViresh Kumar 36f963735aSViresh Kumar static inline bool policy_is_inactive(struct cpufreq_policy *policy) 37f963735aSViresh Kumar { 38f963735aSViresh Kumar return cpumask_empty(policy->cpus); 39f963735aSViresh Kumar } 40f963735aSViresh Kumar 41f963735aSViresh Kumar static bool suitable_policy(struct cpufreq_policy *policy, bool active) 42f963735aSViresh Kumar { 43f963735aSViresh Kumar return active == !policy_is_inactive(policy); 44f963735aSViresh Kumar } 45f963735aSViresh Kumar 46f963735aSViresh Kumar /* Finds Next Acive/Inactive policy */ 47f963735aSViresh Kumar static struct cpufreq_policy *next_policy(struct cpufreq_policy *policy, 48f963735aSViresh Kumar bool active) 49f963735aSViresh Kumar { 50f963735aSViresh Kumar do { 51f963735aSViresh Kumar policy = list_next_entry(policy, policy_list); 52f963735aSViresh Kumar 53f963735aSViresh Kumar /* No more policies in the list */ 54f963735aSViresh Kumar if (&policy->policy_list == &cpufreq_policy_list) 55f963735aSViresh Kumar return NULL; 56f963735aSViresh Kumar } while (!suitable_policy(policy, active)); 57f963735aSViresh Kumar 58f963735aSViresh Kumar return policy; 59f963735aSViresh Kumar } 60f963735aSViresh Kumar 61f963735aSViresh Kumar static struct cpufreq_policy *first_policy(bool active) 62f963735aSViresh Kumar { 63f963735aSViresh Kumar struct cpufreq_policy *policy; 64f963735aSViresh Kumar 65f963735aSViresh Kumar /* No policies in the list */ 66f963735aSViresh Kumar if (list_empty(&cpufreq_policy_list)) 67f963735aSViresh Kumar return NULL; 68f963735aSViresh Kumar 69f963735aSViresh Kumar policy = list_first_entry(&cpufreq_policy_list, typeof(*policy), 70f963735aSViresh Kumar policy_list); 71f963735aSViresh Kumar 72f963735aSViresh Kumar if (!suitable_policy(policy, active)) 73f963735aSViresh Kumar policy = next_policy(policy, active); 74f963735aSViresh Kumar 75f963735aSViresh Kumar return policy; 76f963735aSViresh Kumar } 77f963735aSViresh Kumar 78f963735aSViresh Kumar /* Macros to iterate over CPU policies */ 79f963735aSViresh Kumar #define for_each_suitable_policy(__policy, __active) \ 80f963735aSViresh Kumar for (__policy = first_policy(__active); \ 81f963735aSViresh Kumar __policy; \ 82f963735aSViresh Kumar __policy = next_policy(__policy, __active)) 83f963735aSViresh Kumar 84f963735aSViresh Kumar #define for_each_active_policy(__policy) \ 85f963735aSViresh Kumar for_each_suitable_policy(__policy, true) 86f963735aSViresh Kumar #define for_each_inactive_policy(__policy) \ 87f963735aSViresh Kumar for_each_suitable_policy(__policy, false) 88f963735aSViresh Kumar 89b4f0676fSViresh Kumar #define for_each_policy(__policy) \ 90b4f0676fSViresh Kumar list_for_each_entry(__policy, &cpufreq_policy_list, policy_list) 91b4f0676fSViresh Kumar 92f7b27061SViresh Kumar /* Iterate over governors */ 93f7b27061SViresh Kumar static LIST_HEAD(cpufreq_governor_list); 94f7b27061SViresh Kumar #define for_each_governor(__governor) \ 95f7b27061SViresh Kumar list_for_each_entry(__governor, &cpufreq_governor_list, governor_list) 96f7b27061SViresh Kumar 971da177e4SLinus Torvalds /** 98cd878479SDave Jones * The "cpufreq driver" - the arch- or hardware-dependent low 991da177e4SLinus Torvalds * level driver of CPUFreq support, and its spinlock. This lock 1001da177e4SLinus Torvalds * also protects the cpufreq_cpu_data array. 1011da177e4SLinus Torvalds */ 1021c3d85ddSRafael J. Wysocki static struct cpufreq_driver *cpufreq_driver; 1037a6aedfaSMike Travis static DEFINE_PER_CPU(struct cpufreq_policy *, cpufreq_cpu_data); 104bb176f7dSViresh Kumar static DEFINE_RWLOCK(cpufreq_driver_lock); 1056f1e4efdSJane Li DEFINE_MUTEX(cpufreq_governor_lock); 106bb176f7dSViresh Kumar 107084f3493SThomas Renninger /* This one keeps track of the previously set governor of a removed CPU */ 108e77b89f1SDmitry Monakhov static DEFINE_PER_CPU(char[CPUFREQ_NAME_LEN], cpufreq_cpu_governor); 1091da177e4SLinus Torvalds 1102f0aea93SViresh Kumar /* Flag to suspend/resume CPUFreq governors */ 1112f0aea93SViresh Kumar static bool cpufreq_suspended; 1121da177e4SLinus Torvalds 1139c0ebcf7SViresh Kumar static inline bool has_target(void) 1149c0ebcf7SViresh Kumar { 1159c0ebcf7SViresh Kumar return cpufreq_driver->target_index || cpufreq_driver->target; 1169c0ebcf7SViresh Kumar } 1179c0ebcf7SViresh Kumar 1185a01f2e8SVenkatesh Pallipadi /* 1196eed9404SViresh Kumar * rwsem to guarantee that cpufreq driver module doesn't unload during critical 1206eed9404SViresh Kumar * sections 1216eed9404SViresh Kumar */ 1226eed9404SViresh Kumar static DECLARE_RWSEM(cpufreq_rwsem); 1236eed9404SViresh Kumar 1241da177e4SLinus Torvalds /* internal prototypes */ 12529464f28SDave Jones static int __cpufreq_governor(struct cpufreq_policy *policy, 12629464f28SDave Jones unsigned int event); 127d92d50a4SViresh Kumar static unsigned int __cpufreq_get(struct cpufreq_policy *policy); 12865f27f38SDavid Howells static void handle_update(struct work_struct *work); 1291da177e4SLinus Torvalds 1301da177e4SLinus Torvalds /** 1311da177e4SLinus Torvalds * Two notifier lists: the "policy" list is involved in the 1321da177e4SLinus Torvalds * validation process for a new CPU frequency policy; the 1331da177e4SLinus Torvalds * "transition" list for kernel code that needs to handle 1341da177e4SLinus Torvalds * changes to devices when the CPU clock speed changes. 1351da177e4SLinus Torvalds * The mutex locks both lists. 1361da177e4SLinus Torvalds */ 137e041c683SAlan Stern static BLOCKING_NOTIFIER_HEAD(cpufreq_policy_notifier_list); 138b4dfdbb3SAlan Stern static struct srcu_notifier_head cpufreq_transition_notifier_list; 1391da177e4SLinus Torvalds 14074212ca4SCesar Eduardo Barros static bool init_cpufreq_transition_notifier_list_called; 141b4dfdbb3SAlan Stern static int __init init_cpufreq_transition_notifier_list(void) 142b4dfdbb3SAlan Stern { 143b4dfdbb3SAlan Stern srcu_init_notifier_head(&cpufreq_transition_notifier_list); 14474212ca4SCesar Eduardo Barros init_cpufreq_transition_notifier_list_called = true; 145b4dfdbb3SAlan Stern return 0; 146b4dfdbb3SAlan Stern } 147b3438f82SLinus Torvalds pure_initcall(init_cpufreq_transition_notifier_list); 1481da177e4SLinus Torvalds 149a7b422cdSKonrad Rzeszutek Wilk static int off __read_mostly; 150da584455SViresh Kumar static int cpufreq_disabled(void) 151a7b422cdSKonrad Rzeszutek Wilk { 152a7b422cdSKonrad Rzeszutek Wilk return off; 153a7b422cdSKonrad Rzeszutek Wilk } 154a7b422cdSKonrad Rzeszutek Wilk void disable_cpufreq(void) 155a7b422cdSKonrad Rzeszutek Wilk { 156a7b422cdSKonrad Rzeszutek Wilk off = 1; 157a7b422cdSKonrad Rzeszutek Wilk } 1583fc54d37Sakpm@osdl.org static DEFINE_MUTEX(cpufreq_governor_mutex); 1591da177e4SLinus Torvalds 1604d5dcc42SViresh Kumar bool have_governor_per_policy(void) 1614d5dcc42SViresh Kumar { 1620b981e70SViresh Kumar return !!(cpufreq_driver->flags & CPUFREQ_HAVE_GOVERNOR_PER_POLICY); 1634d5dcc42SViresh Kumar } 1643f869d6dSViresh Kumar EXPORT_SYMBOL_GPL(have_governor_per_policy); 1654d5dcc42SViresh Kumar 166944e9a03SViresh Kumar struct kobject *get_governor_parent_kobj(struct cpufreq_policy *policy) 167944e9a03SViresh Kumar { 168944e9a03SViresh Kumar if (have_governor_per_policy()) 169944e9a03SViresh Kumar return &policy->kobj; 170944e9a03SViresh Kumar else 171944e9a03SViresh Kumar return cpufreq_global_kobject; 172944e9a03SViresh Kumar } 173944e9a03SViresh Kumar EXPORT_SYMBOL_GPL(get_governor_parent_kobj); 174944e9a03SViresh Kumar 17572a4ce34SViresh Kumar static inline u64 get_cpu_idle_time_jiffy(unsigned int cpu, u64 *wall) 17672a4ce34SViresh Kumar { 17772a4ce34SViresh Kumar u64 idle_time; 17872a4ce34SViresh Kumar u64 cur_wall_time; 17972a4ce34SViresh Kumar u64 busy_time; 18072a4ce34SViresh Kumar 18172a4ce34SViresh Kumar cur_wall_time = jiffies64_to_cputime64(get_jiffies_64()); 18272a4ce34SViresh Kumar 18372a4ce34SViresh Kumar busy_time = kcpustat_cpu(cpu).cpustat[CPUTIME_USER]; 18472a4ce34SViresh Kumar busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_SYSTEM]; 18572a4ce34SViresh Kumar busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_IRQ]; 18672a4ce34SViresh Kumar busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_SOFTIRQ]; 18772a4ce34SViresh Kumar busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_STEAL]; 18872a4ce34SViresh Kumar busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_NICE]; 18972a4ce34SViresh Kumar 19072a4ce34SViresh Kumar idle_time = cur_wall_time - busy_time; 19172a4ce34SViresh Kumar if (wall) 19272a4ce34SViresh Kumar *wall = cputime_to_usecs(cur_wall_time); 19372a4ce34SViresh Kumar 19472a4ce34SViresh Kumar return cputime_to_usecs(idle_time); 19572a4ce34SViresh Kumar } 19672a4ce34SViresh Kumar 19772a4ce34SViresh Kumar u64 get_cpu_idle_time(unsigned int cpu, u64 *wall, int io_busy) 19872a4ce34SViresh Kumar { 19972a4ce34SViresh Kumar u64 idle_time = get_cpu_idle_time_us(cpu, io_busy ? wall : NULL); 20072a4ce34SViresh Kumar 20172a4ce34SViresh Kumar if (idle_time == -1ULL) 20272a4ce34SViresh Kumar return get_cpu_idle_time_jiffy(cpu, wall); 20372a4ce34SViresh Kumar else if (!io_busy) 20472a4ce34SViresh Kumar idle_time += get_cpu_iowait_time_us(cpu, wall); 20572a4ce34SViresh Kumar 20672a4ce34SViresh Kumar return idle_time; 20772a4ce34SViresh Kumar } 20872a4ce34SViresh Kumar EXPORT_SYMBOL_GPL(get_cpu_idle_time); 20972a4ce34SViresh Kumar 21070e9e778SViresh Kumar /* 21170e9e778SViresh Kumar * This is a generic cpufreq init() routine which can be used by cpufreq 21270e9e778SViresh Kumar * drivers of SMP systems. It will do following: 21370e9e778SViresh Kumar * - validate & show freq table passed 21470e9e778SViresh Kumar * - set policies transition latency 21570e9e778SViresh Kumar * - policy->cpus with all possible CPUs 21670e9e778SViresh Kumar */ 21770e9e778SViresh Kumar int cpufreq_generic_init(struct cpufreq_policy *policy, 21870e9e778SViresh Kumar struct cpufreq_frequency_table *table, 21970e9e778SViresh Kumar unsigned int transition_latency) 22070e9e778SViresh Kumar { 22170e9e778SViresh Kumar int ret; 22270e9e778SViresh Kumar 22370e9e778SViresh Kumar ret = cpufreq_table_validate_and_show(policy, table); 22470e9e778SViresh Kumar if (ret) { 22570e9e778SViresh Kumar pr_err("%s: invalid frequency table: %d\n", __func__, ret); 22670e9e778SViresh Kumar return ret; 22770e9e778SViresh Kumar } 22870e9e778SViresh Kumar 22970e9e778SViresh Kumar policy->cpuinfo.transition_latency = transition_latency; 23070e9e778SViresh Kumar 23170e9e778SViresh Kumar /* 23270e9e778SViresh Kumar * The driver only supports the SMP configuartion where all processors 23370e9e778SViresh Kumar * share the clock and voltage and clock. 23470e9e778SViresh Kumar */ 23570e9e778SViresh Kumar cpumask_setall(policy->cpus); 23670e9e778SViresh Kumar 23770e9e778SViresh Kumar return 0; 23870e9e778SViresh Kumar } 23970e9e778SViresh Kumar EXPORT_SYMBOL_GPL(cpufreq_generic_init); 24070e9e778SViresh Kumar 241988bed09SViresh Kumar /* Only for cpufreq core internal use */ 242988bed09SViresh Kumar struct cpufreq_policy *cpufreq_cpu_get_raw(unsigned int cpu) 243652ed95dSViresh Kumar { 244652ed95dSViresh Kumar struct cpufreq_policy *policy = per_cpu(cpufreq_cpu_data, cpu); 245652ed95dSViresh Kumar 246988bed09SViresh Kumar return policy && cpumask_test_cpu(cpu, policy->cpus) ? policy : NULL; 247988bed09SViresh Kumar } 248988bed09SViresh Kumar 249988bed09SViresh Kumar unsigned int cpufreq_generic_get(unsigned int cpu) 250988bed09SViresh Kumar { 251988bed09SViresh Kumar struct cpufreq_policy *policy = cpufreq_cpu_get_raw(cpu); 252988bed09SViresh Kumar 253652ed95dSViresh Kumar if (!policy || IS_ERR(policy->clk)) { 254e837f9b5SJoe Perches pr_err("%s: No %s associated to cpu: %d\n", 255e837f9b5SJoe Perches __func__, policy ? "clk" : "policy", cpu); 256652ed95dSViresh Kumar return 0; 257652ed95dSViresh Kumar } 258652ed95dSViresh Kumar 259652ed95dSViresh Kumar return clk_get_rate(policy->clk) / 1000; 260652ed95dSViresh Kumar } 261652ed95dSViresh Kumar EXPORT_SYMBOL_GPL(cpufreq_generic_get); 262652ed95dSViresh Kumar 26350e9c852SViresh Kumar /** 26450e9c852SViresh Kumar * cpufreq_cpu_get: returns policy for a cpu and marks it busy. 26550e9c852SViresh Kumar * 26650e9c852SViresh Kumar * @cpu: cpu to find policy for. 26750e9c852SViresh Kumar * 26850e9c852SViresh Kumar * This returns policy for 'cpu', returns NULL if it doesn't exist. 26950e9c852SViresh Kumar * It also increments the kobject reference count to mark it busy and so would 27050e9c852SViresh Kumar * require a corresponding call to cpufreq_cpu_put() to decrement it back. 27150e9c852SViresh Kumar * If corresponding call cpufreq_cpu_put() isn't made, the policy wouldn't be 27250e9c852SViresh Kumar * freed as that depends on the kobj count. 27350e9c852SViresh Kumar * 27450e9c852SViresh Kumar * It also takes a read-lock of 'cpufreq_rwsem' and doesn't put it back if a 27550e9c852SViresh Kumar * valid policy is found. This is done to make sure the driver doesn't get 27650e9c852SViresh Kumar * unregistered while the policy is being used. 27750e9c852SViresh Kumar * 27850e9c852SViresh Kumar * Return: A valid policy on success, otherwise NULL on failure. 27950e9c852SViresh Kumar */ 2806eed9404SViresh Kumar struct cpufreq_policy *cpufreq_cpu_get(unsigned int cpu) 2811da177e4SLinus Torvalds { 2826eed9404SViresh Kumar struct cpufreq_policy *policy = NULL; 2831da177e4SLinus Torvalds unsigned long flags; 2841da177e4SLinus Torvalds 2851b947c90SViresh Kumar if (WARN_ON(cpu >= nr_cpu_ids)) 2866eed9404SViresh Kumar return NULL; 2876eed9404SViresh Kumar 2886eed9404SViresh Kumar if (!down_read_trylock(&cpufreq_rwsem)) 2896eed9404SViresh Kumar return NULL; 2901da177e4SLinus Torvalds 2911da177e4SLinus Torvalds /* get the cpufreq driver */ 2920d1857a1SNathan Zimmer read_lock_irqsave(&cpufreq_driver_lock, flags); 2931da177e4SLinus Torvalds 2946eed9404SViresh Kumar if (cpufreq_driver) { 2951da177e4SLinus Torvalds /* get the CPU */ 296988bed09SViresh Kumar policy = cpufreq_cpu_get_raw(cpu); 2976eed9404SViresh Kumar if (policy) 2986eed9404SViresh Kumar kobject_get(&policy->kobj); 2996eed9404SViresh Kumar } 3006eed9404SViresh Kumar 3016eed9404SViresh Kumar read_unlock_irqrestore(&cpufreq_driver_lock, flags); 3021da177e4SLinus Torvalds 3033a3e9e06SViresh Kumar if (!policy) 3046eed9404SViresh Kumar up_read(&cpufreq_rwsem); 3051da177e4SLinus Torvalds 3063a3e9e06SViresh Kumar return policy; 307a9144436SStephen Boyd } 3081da177e4SLinus Torvalds EXPORT_SYMBOL_GPL(cpufreq_cpu_get); 3091da177e4SLinus Torvalds 31050e9c852SViresh Kumar /** 31150e9c852SViresh Kumar * cpufreq_cpu_put: Decrements the usage count of a policy 31250e9c852SViresh Kumar * 31350e9c852SViresh Kumar * @policy: policy earlier returned by cpufreq_cpu_get(). 31450e9c852SViresh Kumar * 31550e9c852SViresh Kumar * This decrements the kobject reference count incremented earlier by calling 31650e9c852SViresh Kumar * cpufreq_cpu_get(). 31750e9c852SViresh Kumar * 31850e9c852SViresh Kumar * It also drops the read-lock of 'cpufreq_rwsem' taken at cpufreq_cpu_get(). 31950e9c852SViresh Kumar */ 3203a3e9e06SViresh Kumar void cpufreq_cpu_put(struct cpufreq_policy *policy) 321a9144436SStephen Boyd { 3226eed9404SViresh Kumar kobject_put(&policy->kobj); 3236eed9404SViresh Kumar up_read(&cpufreq_rwsem); 324a9144436SStephen Boyd } 3251da177e4SLinus Torvalds EXPORT_SYMBOL_GPL(cpufreq_cpu_put); 3261da177e4SLinus Torvalds 3271da177e4SLinus Torvalds /********************************************************************* 3281da177e4SLinus Torvalds * EXTERNALLY AFFECTING FREQUENCY CHANGES * 3291da177e4SLinus Torvalds *********************************************************************/ 3301da177e4SLinus Torvalds 3311da177e4SLinus Torvalds /** 3321da177e4SLinus Torvalds * adjust_jiffies - adjust the system "loops_per_jiffy" 3331da177e4SLinus Torvalds * 3341da177e4SLinus Torvalds * This function alters the system "loops_per_jiffy" for the clock 3351da177e4SLinus Torvalds * speed change. Note that loops_per_jiffy cannot be updated on SMP 3361da177e4SLinus Torvalds * systems as each CPU might be scaled differently. So, use the arch 3371da177e4SLinus Torvalds * per-CPU loops_per_jiffy value wherever possible. 3381da177e4SLinus Torvalds */ 33939c132eeSViresh Kumar static void adjust_jiffies(unsigned long val, struct cpufreq_freqs *ci) 34039c132eeSViresh Kumar { 3411da177e4SLinus Torvalds #ifndef CONFIG_SMP 3421da177e4SLinus Torvalds static unsigned long l_p_j_ref; 3431da177e4SLinus Torvalds static unsigned int l_p_j_ref_freq; 3441da177e4SLinus Torvalds 3451da177e4SLinus Torvalds if (ci->flags & CPUFREQ_CONST_LOOPS) 3461da177e4SLinus Torvalds return; 3471da177e4SLinus Torvalds 3481da177e4SLinus Torvalds if (!l_p_j_ref_freq) { 3491da177e4SLinus Torvalds l_p_j_ref = loops_per_jiffy; 3501da177e4SLinus Torvalds l_p_j_ref_freq = ci->old; 351e837f9b5SJoe Perches pr_debug("saving %lu as reference value for loops_per_jiffy; freq is %u kHz\n", 352e837f9b5SJoe Perches l_p_j_ref, l_p_j_ref_freq); 3531da177e4SLinus Torvalds } 3540b443eadSViresh Kumar if (val == CPUFREQ_POSTCHANGE && ci->old != ci->new) { 355e08f5f5bSGautham R Shenoy loops_per_jiffy = cpufreq_scale(l_p_j_ref, l_p_j_ref_freq, 356e08f5f5bSGautham R Shenoy ci->new); 357e837f9b5SJoe Perches pr_debug("scaling loops_per_jiffy to %lu for frequency %u kHz\n", 358e837f9b5SJoe Perches loops_per_jiffy, ci->new); 3591da177e4SLinus Torvalds } 3601da177e4SLinus Torvalds #endif 36139c132eeSViresh Kumar } 3621da177e4SLinus Torvalds 3630956df9cSViresh Kumar static void __cpufreq_notify_transition(struct cpufreq_policy *policy, 364b43a7ffbSViresh Kumar struct cpufreq_freqs *freqs, unsigned int state) 3651da177e4SLinus Torvalds { 3661da177e4SLinus Torvalds BUG_ON(irqs_disabled()); 3671da177e4SLinus Torvalds 368d5aaffa9SDirk Brandewie if (cpufreq_disabled()) 369d5aaffa9SDirk Brandewie return; 370d5aaffa9SDirk Brandewie 3711c3d85ddSRafael J. Wysocki freqs->flags = cpufreq_driver->flags; 3722d06d8c4SDominik Brodowski pr_debug("notification %u of frequency transition to %u kHz\n", 373e4472cb3SDave Jones state, freqs->new); 3741da177e4SLinus Torvalds 3751da177e4SLinus Torvalds switch (state) { 376e4472cb3SDave Jones 3771da177e4SLinus Torvalds case CPUFREQ_PRECHANGE: 378e4472cb3SDave Jones /* detect if the driver reported a value as "old frequency" 379e4472cb3SDave Jones * which is not equal to what the cpufreq core thinks is 380e4472cb3SDave Jones * "old frequency". 3811da177e4SLinus Torvalds */ 3821c3d85ddSRafael J. Wysocki if (!(cpufreq_driver->flags & CPUFREQ_CONST_LOOPS)) { 383e4472cb3SDave Jones if ((policy) && (policy->cpu == freqs->cpu) && 384e4472cb3SDave Jones (policy->cur) && (policy->cur != freqs->old)) { 385e837f9b5SJoe Perches pr_debug("Warning: CPU frequency is %u, cpufreq assumed %u kHz\n", 386e4472cb3SDave Jones freqs->old, policy->cur); 387e4472cb3SDave Jones freqs->old = policy->cur; 3881da177e4SLinus Torvalds } 3891da177e4SLinus Torvalds } 390b4dfdbb3SAlan Stern srcu_notifier_call_chain(&cpufreq_transition_notifier_list, 391e4472cb3SDave Jones CPUFREQ_PRECHANGE, freqs); 3921da177e4SLinus Torvalds adjust_jiffies(CPUFREQ_PRECHANGE, freqs); 3931da177e4SLinus Torvalds break; 394e4472cb3SDave Jones 3951da177e4SLinus Torvalds case CPUFREQ_POSTCHANGE: 3961da177e4SLinus Torvalds adjust_jiffies(CPUFREQ_POSTCHANGE, freqs); 397e837f9b5SJoe Perches pr_debug("FREQ: %lu - CPU: %lu\n", 398e837f9b5SJoe Perches (unsigned long)freqs->new, (unsigned long)freqs->cpu); 39925e41933SThomas Renninger trace_cpu_frequency(freqs->new, freqs->cpu); 400b4dfdbb3SAlan Stern srcu_notifier_call_chain(&cpufreq_transition_notifier_list, 401e4472cb3SDave Jones CPUFREQ_POSTCHANGE, freqs); 402e4472cb3SDave Jones if (likely(policy) && likely(policy->cpu == freqs->cpu)) 403e4472cb3SDave Jones policy->cur = freqs->new; 4041da177e4SLinus Torvalds break; 4051da177e4SLinus Torvalds } 4061da177e4SLinus Torvalds } 407bb176f7dSViresh Kumar 408b43a7ffbSViresh Kumar /** 409b43a7ffbSViresh Kumar * cpufreq_notify_transition - call notifier chain and adjust_jiffies 410b43a7ffbSViresh Kumar * on frequency transition. 411b43a7ffbSViresh Kumar * 412b43a7ffbSViresh Kumar * This function calls the transition notifiers and the "adjust_jiffies" 413b43a7ffbSViresh Kumar * function. It is called twice on all CPU frequency changes that have 414b43a7ffbSViresh Kumar * external effects. 415b43a7ffbSViresh Kumar */ 416236a9800SViresh Kumar static void cpufreq_notify_transition(struct cpufreq_policy *policy, 417b43a7ffbSViresh Kumar struct cpufreq_freqs *freqs, unsigned int state) 418b43a7ffbSViresh Kumar { 419b43a7ffbSViresh Kumar for_each_cpu(freqs->cpu, policy->cpus) 420b43a7ffbSViresh Kumar __cpufreq_notify_transition(policy, freqs, state); 421b43a7ffbSViresh Kumar } 4221da177e4SLinus Torvalds 423f7ba3b41SViresh Kumar /* Do post notifications when there are chances that transition has failed */ 424236a9800SViresh Kumar static void cpufreq_notify_post_transition(struct cpufreq_policy *policy, 425f7ba3b41SViresh Kumar struct cpufreq_freqs *freqs, int transition_failed) 426f7ba3b41SViresh Kumar { 427f7ba3b41SViresh Kumar cpufreq_notify_transition(policy, freqs, CPUFREQ_POSTCHANGE); 428f7ba3b41SViresh Kumar if (!transition_failed) 429f7ba3b41SViresh Kumar return; 430f7ba3b41SViresh Kumar 431f7ba3b41SViresh Kumar swap(freqs->old, freqs->new); 432f7ba3b41SViresh Kumar cpufreq_notify_transition(policy, freqs, CPUFREQ_PRECHANGE); 433f7ba3b41SViresh Kumar cpufreq_notify_transition(policy, freqs, CPUFREQ_POSTCHANGE); 434f7ba3b41SViresh Kumar } 435f7ba3b41SViresh Kumar 43612478cf0SSrivatsa S. Bhat void cpufreq_freq_transition_begin(struct cpufreq_policy *policy, 43712478cf0SSrivatsa S. Bhat struct cpufreq_freqs *freqs) 43812478cf0SSrivatsa S. Bhat { 439ca654dc3SSrivatsa S. Bhat 440ca654dc3SSrivatsa S. Bhat /* 441ca654dc3SSrivatsa S. Bhat * Catch double invocations of _begin() which lead to self-deadlock. 442ca654dc3SSrivatsa S. Bhat * ASYNC_NOTIFICATION drivers are left out because the cpufreq core 443ca654dc3SSrivatsa S. Bhat * doesn't invoke _begin() on their behalf, and hence the chances of 444ca654dc3SSrivatsa S. Bhat * double invocations are very low. Moreover, there are scenarios 445ca654dc3SSrivatsa S. Bhat * where these checks can emit false-positive warnings in these 446ca654dc3SSrivatsa S. Bhat * drivers; so we avoid that by skipping them altogether. 447ca654dc3SSrivatsa S. Bhat */ 448ca654dc3SSrivatsa S. Bhat WARN_ON(!(cpufreq_driver->flags & CPUFREQ_ASYNC_NOTIFICATION) 449ca654dc3SSrivatsa S. Bhat && current == policy->transition_task); 450ca654dc3SSrivatsa S. Bhat 45112478cf0SSrivatsa S. Bhat wait: 45212478cf0SSrivatsa S. Bhat wait_event(policy->transition_wait, !policy->transition_ongoing); 45312478cf0SSrivatsa S. Bhat 45412478cf0SSrivatsa S. Bhat spin_lock(&policy->transition_lock); 45512478cf0SSrivatsa S. Bhat 45612478cf0SSrivatsa S. Bhat if (unlikely(policy->transition_ongoing)) { 45712478cf0SSrivatsa S. Bhat spin_unlock(&policy->transition_lock); 45812478cf0SSrivatsa S. Bhat goto wait; 45912478cf0SSrivatsa S. Bhat } 46012478cf0SSrivatsa S. Bhat 46112478cf0SSrivatsa S. Bhat policy->transition_ongoing = true; 462ca654dc3SSrivatsa S. Bhat policy->transition_task = current; 46312478cf0SSrivatsa S. Bhat 46412478cf0SSrivatsa S. Bhat spin_unlock(&policy->transition_lock); 46512478cf0SSrivatsa S. Bhat 46612478cf0SSrivatsa S. Bhat cpufreq_notify_transition(policy, freqs, CPUFREQ_PRECHANGE); 46712478cf0SSrivatsa S. Bhat } 46812478cf0SSrivatsa S. Bhat EXPORT_SYMBOL_GPL(cpufreq_freq_transition_begin); 46912478cf0SSrivatsa S. Bhat 47012478cf0SSrivatsa S. Bhat void cpufreq_freq_transition_end(struct cpufreq_policy *policy, 47112478cf0SSrivatsa S. Bhat struct cpufreq_freqs *freqs, int transition_failed) 47212478cf0SSrivatsa S. Bhat { 47312478cf0SSrivatsa S. Bhat if (unlikely(WARN_ON(!policy->transition_ongoing))) 47412478cf0SSrivatsa S. Bhat return; 47512478cf0SSrivatsa S. Bhat 47612478cf0SSrivatsa S. Bhat cpufreq_notify_post_transition(policy, freqs, transition_failed); 47712478cf0SSrivatsa S. Bhat 47812478cf0SSrivatsa S. Bhat policy->transition_ongoing = false; 479ca654dc3SSrivatsa S. Bhat policy->transition_task = NULL; 48012478cf0SSrivatsa S. Bhat 48112478cf0SSrivatsa S. Bhat wake_up(&policy->transition_wait); 48212478cf0SSrivatsa S. Bhat } 48312478cf0SSrivatsa S. Bhat EXPORT_SYMBOL_GPL(cpufreq_freq_transition_end); 48412478cf0SSrivatsa S. Bhat 4851da177e4SLinus Torvalds 4861da177e4SLinus Torvalds /********************************************************************* 4871da177e4SLinus Torvalds * SYSFS INTERFACE * 4881da177e4SLinus Torvalds *********************************************************************/ 4898a5c74a1SRashika Kheria static ssize_t show_boost(struct kobject *kobj, 4906f19efc0SLukasz Majewski struct attribute *attr, char *buf) 4916f19efc0SLukasz Majewski { 4926f19efc0SLukasz Majewski return sprintf(buf, "%d\n", cpufreq_driver->boost_enabled); 4936f19efc0SLukasz Majewski } 4946f19efc0SLukasz Majewski 4956f19efc0SLukasz Majewski static ssize_t store_boost(struct kobject *kobj, struct attribute *attr, 4966f19efc0SLukasz Majewski const char *buf, size_t count) 4976f19efc0SLukasz Majewski { 4986f19efc0SLukasz Majewski int ret, enable; 4996f19efc0SLukasz Majewski 5006f19efc0SLukasz Majewski ret = sscanf(buf, "%d", &enable); 5016f19efc0SLukasz Majewski if (ret != 1 || enable < 0 || enable > 1) 5026f19efc0SLukasz Majewski return -EINVAL; 5036f19efc0SLukasz Majewski 5046f19efc0SLukasz Majewski if (cpufreq_boost_trigger_state(enable)) { 505e837f9b5SJoe Perches pr_err("%s: Cannot %s BOOST!\n", 506e837f9b5SJoe Perches __func__, enable ? "enable" : "disable"); 5076f19efc0SLukasz Majewski return -EINVAL; 5086f19efc0SLukasz Majewski } 5096f19efc0SLukasz Majewski 510e837f9b5SJoe Perches pr_debug("%s: cpufreq BOOST %s\n", 511e837f9b5SJoe Perches __func__, enable ? "enabled" : "disabled"); 5126f19efc0SLukasz Majewski 5136f19efc0SLukasz Majewski return count; 5146f19efc0SLukasz Majewski } 5156f19efc0SLukasz Majewski define_one_global_rw(boost); 5161da177e4SLinus Torvalds 51742f91fa1SViresh Kumar static struct cpufreq_governor *find_governor(const char *str_governor) 5183bcb09a3SJeremy Fitzhardinge { 5193bcb09a3SJeremy Fitzhardinge struct cpufreq_governor *t; 5203bcb09a3SJeremy Fitzhardinge 521f7b27061SViresh Kumar for_each_governor(t) 5227c4f4539SRasmus Villemoes if (!strncasecmp(str_governor, t->name, CPUFREQ_NAME_LEN)) 5233bcb09a3SJeremy Fitzhardinge return t; 5243bcb09a3SJeremy Fitzhardinge 5253bcb09a3SJeremy Fitzhardinge return NULL; 5263bcb09a3SJeremy Fitzhardinge } 5273bcb09a3SJeremy Fitzhardinge 5281da177e4SLinus Torvalds /** 5291da177e4SLinus Torvalds * cpufreq_parse_governor - parse a governor string 5301da177e4SLinus Torvalds */ 5311da177e4SLinus Torvalds static int cpufreq_parse_governor(char *str_governor, unsigned int *policy, 5321da177e4SLinus Torvalds struct cpufreq_governor **governor) 5331da177e4SLinus Torvalds { 5343bcb09a3SJeremy Fitzhardinge int err = -EINVAL; 5353bcb09a3SJeremy Fitzhardinge 5361c3d85ddSRafael J. Wysocki if (!cpufreq_driver) 5373bcb09a3SJeremy Fitzhardinge goto out; 5383bcb09a3SJeremy Fitzhardinge 5391c3d85ddSRafael J. Wysocki if (cpufreq_driver->setpolicy) { 5407c4f4539SRasmus Villemoes if (!strncasecmp(str_governor, "performance", CPUFREQ_NAME_LEN)) { 5411da177e4SLinus Torvalds *policy = CPUFREQ_POLICY_PERFORMANCE; 5423bcb09a3SJeremy Fitzhardinge err = 0; 5437c4f4539SRasmus Villemoes } else if (!strncasecmp(str_governor, "powersave", 544e08f5f5bSGautham R Shenoy CPUFREQ_NAME_LEN)) { 5451da177e4SLinus Torvalds *policy = CPUFREQ_POLICY_POWERSAVE; 5463bcb09a3SJeremy Fitzhardinge err = 0; 5471da177e4SLinus Torvalds } 5482e1cc3a5SViresh Kumar } else { 5491da177e4SLinus Torvalds struct cpufreq_governor *t; 5503bcb09a3SJeremy Fitzhardinge 5513fc54d37Sakpm@osdl.org mutex_lock(&cpufreq_governor_mutex); 5523bcb09a3SJeremy Fitzhardinge 55342f91fa1SViresh Kumar t = find_governor(str_governor); 5543bcb09a3SJeremy Fitzhardinge 555ea714970SJeremy Fitzhardinge if (t == NULL) { 556ea714970SJeremy Fitzhardinge int ret; 557ea714970SJeremy Fitzhardinge 558ea714970SJeremy Fitzhardinge mutex_unlock(&cpufreq_governor_mutex); 5591a8e1463SKees Cook ret = request_module("cpufreq_%s", str_governor); 560ea714970SJeremy Fitzhardinge mutex_lock(&cpufreq_governor_mutex); 561ea714970SJeremy Fitzhardinge 562ea714970SJeremy Fitzhardinge if (ret == 0) 56342f91fa1SViresh Kumar t = find_governor(str_governor); 564ea714970SJeremy Fitzhardinge } 565ea714970SJeremy Fitzhardinge 5663bcb09a3SJeremy Fitzhardinge if (t != NULL) { 5671da177e4SLinus Torvalds *governor = t; 5683bcb09a3SJeremy Fitzhardinge err = 0; 5691da177e4SLinus Torvalds } 5703bcb09a3SJeremy Fitzhardinge 5713bcb09a3SJeremy Fitzhardinge mutex_unlock(&cpufreq_governor_mutex); 5721da177e4SLinus Torvalds } 5731da177e4SLinus Torvalds out: 5743bcb09a3SJeremy Fitzhardinge return err; 5751da177e4SLinus Torvalds } 5761da177e4SLinus Torvalds 5771da177e4SLinus Torvalds /** 578e08f5f5bSGautham R Shenoy * cpufreq_per_cpu_attr_read() / show_##file_name() - 579e08f5f5bSGautham R Shenoy * print out cpufreq information 5801da177e4SLinus Torvalds * 5811da177e4SLinus Torvalds * Write out information from cpufreq_driver->policy[cpu]; object must be 5821da177e4SLinus Torvalds * "unsigned int". 5831da177e4SLinus Torvalds */ 5841da177e4SLinus Torvalds 5851da177e4SLinus Torvalds #define show_one(file_name, object) \ 5861da177e4SLinus Torvalds static ssize_t show_##file_name \ 5871da177e4SLinus Torvalds (struct cpufreq_policy *policy, char *buf) \ 5881da177e4SLinus Torvalds { \ 5891da177e4SLinus Torvalds return sprintf(buf, "%u\n", policy->object); \ 5901da177e4SLinus Torvalds } 5911da177e4SLinus Torvalds 5921da177e4SLinus Torvalds show_one(cpuinfo_min_freq, cpuinfo.min_freq); 5931da177e4SLinus Torvalds show_one(cpuinfo_max_freq, cpuinfo.max_freq); 594ed129784SThomas Renninger show_one(cpuinfo_transition_latency, cpuinfo.transition_latency); 5951da177e4SLinus Torvalds show_one(scaling_min_freq, min); 5961da177e4SLinus Torvalds show_one(scaling_max_freq, max); 597c034b02eSDirk Brandewie 59809347b29SViresh Kumar static ssize_t show_scaling_cur_freq(struct cpufreq_policy *policy, char *buf) 599c034b02eSDirk Brandewie { 600c034b02eSDirk Brandewie ssize_t ret; 601c034b02eSDirk Brandewie 602c034b02eSDirk Brandewie if (cpufreq_driver && cpufreq_driver->setpolicy && cpufreq_driver->get) 603c034b02eSDirk Brandewie ret = sprintf(buf, "%u\n", cpufreq_driver->get(policy->cpu)); 604c034b02eSDirk Brandewie else 605c034b02eSDirk Brandewie ret = sprintf(buf, "%u\n", policy->cur); 606c034b02eSDirk Brandewie return ret; 607c034b02eSDirk Brandewie } 6081da177e4SLinus Torvalds 609037ce839SViresh Kumar static int cpufreq_set_policy(struct cpufreq_policy *policy, 6103a3e9e06SViresh Kumar struct cpufreq_policy *new_policy); 6117970e08bSThomas Renninger 6121da177e4SLinus Torvalds /** 6131da177e4SLinus Torvalds * cpufreq_per_cpu_attr_write() / store_##file_name() - sysfs write access 6141da177e4SLinus Torvalds */ 6151da177e4SLinus Torvalds #define store_one(file_name, object) \ 6161da177e4SLinus Torvalds static ssize_t store_##file_name \ 6171da177e4SLinus Torvalds (struct cpufreq_policy *policy, const char *buf, size_t count) \ 6181da177e4SLinus Torvalds { \ 619619c144cSVince Hsu int ret, temp; \ 6201da177e4SLinus Torvalds struct cpufreq_policy new_policy; \ 6211da177e4SLinus Torvalds \ 6221da177e4SLinus Torvalds ret = cpufreq_get_policy(&new_policy, policy->cpu); \ 6231da177e4SLinus Torvalds if (ret) \ 6241da177e4SLinus Torvalds return -EINVAL; \ 6251da177e4SLinus Torvalds \ 6261da177e4SLinus Torvalds ret = sscanf(buf, "%u", &new_policy.object); \ 6271da177e4SLinus Torvalds if (ret != 1) \ 6281da177e4SLinus Torvalds return -EINVAL; \ 6291da177e4SLinus Torvalds \ 630619c144cSVince Hsu temp = new_policy.object; \ 631037ce839SViresh Kumar ret = cpufreq_set_policy(policy, &new_policy); \ 632619c144cSVince Hsu if (!ret) \ 633619c144cSVince Hsu policy->user_policy.object = temp; \ 6341da177e4SLinus Torvalds \ 6351da177e4SLinus Torvalds return ret ? ret : count; \ 6361da177e4SLinus Torvalds } 6371da177e4SLinus Torvalds 6381da177e4SLinus Torvalds store_one(scaling_min_freq, min); 6391da177e4SLinus Torvalds store_one(scaling_max_freq, max); 6401da177e4SLinus Torvalds 6411da177e4SLinus Torvalds /** 6421da177e4SLinus Torvalds * show_cpuinfo_cur_freq - current CPU frequency as detected by hardware 6431da177e4SLinus Torvalds */ 644e08f5f5bSGautham R Shenoy static ssize_t show_cpuinfo_cur_freq(struct cpufreq_policy *policy, 645e08f5f5bSGautham R Shenoy char *buf) 6461da177e4SLinus Torvalds { 647d92d50a4SViresh Kumar unsigned int cur_freq = __cpufreq_get(policy); 6481da177e4SLinus Torvalds if (!cur_freq) 6491da177e4SLinus Torvalds return sprintf(buf, "<unknown>"); 6501da177e4SLinus Torvalds return sprintf(buf, "%u\n", cur_freq); 6511da177e4SLinus Torvalds } 6521da177e4SLinus Torvalds 6531da177e4SLinus Torvalds /** 6541da177e4SLinus Torvalds * show_scaling_governor - show the current policy for the specified CPU 6551da177e4SLinus Torvalds */ 656905d77cdSDave Jones static ssize_t show_scaling_governor(struct cpufreq_policy *policy, char *buf) 6571da177e4SLinus Torvalds { 6581da177e4SLinus Torvalds if (policy->policy == CPUFREQ_POLICY_POWERSAVE) 6591da177e4SLinus Torvalds return sprintf(buf, "powersave\n"); 6601da177e4SLinus Torvalds else if (policy->policy == CPUFREQ_POLICY_PERFORMANCE) 6611da177e4SLinus Torvalds return sprintf(buf, "performance\n"); 6621da177e4SLinus Torvalds else if (policy->governor) 6634b972f0bSviresh kumar return scnprintf(buf, CPUFREQ_NAME_PLEN, "%s\n", 66429464f28SDave Jones policy->governor->name); 6651da177e4SLinus Torvalds return -EINVAL; 6661da177e4SLinus Torvalds } 6671da177e4SLinus Torvalds 6681da177e4SLinus Torvalds /** 6691da177e4SLinus Torvalds * store_scaling_governor - store policy for the specified CPU 6701da177e4SLinus Torvalds */ 6711da177e4SLinus Torvalds static ssize_t store_scaling_governor(struct cpufreq_policy *policy, 6721da177e4SLinus Torvalds const char *buf, size_t count) 6731da177e4SLinus Torvalds { 6745136fa56SSrivatsa S. Bhat int ret; 6751da177e4SLinus Torvalds char str_governor[16]; 6761da177e4SLinus Torvalds struct cpufreq_policy new_policy; 6771da177e4SLinus Torvalds 6781da177e4SLinus Torvalds ret = cpufreq_get_policy(&new_policy, policy->cpu); 6791da177e4SLinus Torvalds if (ret) 6801da177e4SLinus Torvalds return ret; 6811da177e4SLinus Torvalds 6821da177e4SLinus Torvalds ret = sscanf(buf, "%15s", str_governor); 6831da177e4SLinus Torvalds if (ret != 1) 6841da177e4SLinus Torvalds return -EINVAL; 6851da177e4SLinus Torvalds 686e08f5f5bSGautham R Shenoy if (cpufreq_parse_governor(str_governor, &new_policy.policy, 687e08f5f5bSGautham R Shenoy &new_policy.governor)) 6881da177e4SLinus Torvalds return -EINVAL; 6891da177e4SLinus Torvalds 690037ce839SViresh Kumar ret = cpufreq_set_policy(policy, &new_policy); 6917970e08bSThomas Renninger 6927970e08bSThomas Renninger policy->user_policy.policy = policy->policy; 6937970e08bSThomas Renninger policy->user_policy.governor = policy->governor; 6947970e08bSThomas Renninger 695e08f5f5bSGautham R Shenoy if (ret) 696e08f5f5bSGautham R Shenoy return ret; 697e08f5f5bSGautham R Shenoy else 698e08f5f5bSGautham R Shenoy return count; 6991da177e4SLinus Torvalds } 7001da177e4SLinus Torvalds 7011da177e4SLinus Torvalds /** 7021da177e4SLinus Torvalds * show_scaling_driver - show the cpufreq driver currently loaded 7031da177e4SLinus Torvalds */ 7041da177e4SLinus Torvalds static ssize_t show_scaling_driver(struct cpufreq_policy *policy, char *buf) 7051da177e4SLinus Torvalds { 7061c3d85ddSRafael J. Wysocki return scnprintf(buf, CPUFREQ_NAME_PLEN, "%s\n", cpufreq_driver->name); 7071da177e4SLinus Torvalds } 7081da177e4SLinus Torvalds 7091da177e4SLinus Torvalds /** 7101da177e4SLinus Torvalds * show_scaling_available_governors - show the available CPUfreq governors 7111da177e4SLinus Torvalds */ 7121da177e4SLinus Torvalds static ssize_t show_scaling_available_governors(struct cpufreq_policy *policy, 7131da177e4SLinus Torvalds char *buf) 7141da177e4SLinus Torvalds { 7151da177e4SLinus Torvalds ssize_t i = 0; 7161da177e4SLinus Torvalds struct cpufreq_governor *t; 7171da177e4SLinus Torvalds 7189c0ebcf7SViresh Kumar if (!has_target()) { 7191da177e4SLinus Torvalds i += sprintf(buf, "performance powersave"); 7201da177e4SLinus Torvalds goto out; 7211da177e4SLinus Torvalds } 7221da177e4SLinus Torvalds 723f7b27061SViresh Kumar for_each_governor(t) { 72429464f28SDave Jones if (i >= (ssize_t) ((PAGE_SIZE / sizeof(char)) 72529464f28SDave Jones - (CPUFREQ_NAME_LEN + 2))) 7261da177e4SLinus Torvalds goto out; 7274b972f0bSviresh kumar i += scnprintf(&buf[i], CPUFREQ_NAME_PLEN, "%s ", t->name); 7281da177e4SLinus Torvalds } 7291da177e4SLinus Torvalds out: 7301da177e4SLinus Torvalds i += sprintf(&buf[i], "\n"); 7311da177e4SLinus Torvalds return i; 7321da177e4SLinus Torvalds } 733e8628dd0SDarrick J. Wong 734f4fd3797SLan Tianyu ssize_t cpufreq_show_cpus(const struct cpumask *mask, char *buf) 7351da177e4SLinus Torvalds { 7361da177e4SLinus Torvalds ssize_t i = 0; 7371da177e4SLinus Torvalds unsigned int cpu; 7381da177e4SLinus Torvalds 739835481d9SRusty Russell for_each_cpu(cpu, mask) { 7401da177e4SLinus Torvalds if (i) 7411da177e4SLinus Torvalds i += scnprintf(&buf[i], (PAGE_SIZE - i - 2), " "); 7421da177e4SLinus Torvalds i += scnprintf(&buf[i], (PAGE_SIZE - i - 2), "%u", cpu); 7431da177e4SLinus Torvalds if (i >= (PAGE_SIZE - 5)) 7441da177e4SLinus Torvalds break; 7451da177e4SLinus Torvalds } 7461da177e4SLinus Torvalds i += sprintf(&buf[i], "\n"); 7471da177e4SLinus Torvalds return i; 7481da177e4SLinus Torvalds } 749f4fd3797SLan Tianyu EXPORT_SYMBOL_GPL(cpufreq_show_cpus); 7501da177e4SLinus Torvalds 751e8628dd0SDarrick J. Wong /** 752e8628dd0SDarrick J. Wong * show_related_cpus - show the CPUs affected by each transition even if 753e8628dd0SDarrick J. Wong * hw coordination is in use 754e8628dd0SDarrick J. Wong */ 755e8628dd0SDarrick J. Wong static ssize_t show_related_cpus(struct cpufreq_policy *policy, char *buf) 756e8628dd0SDarrick J. Wong { 757f4fd3797SLan Tianyu return cpufreq_show_cpus(policy->related_cpus, buf); 758e8628dd0SDarrick J. Wong } 759e8628dd0SDarrick J. Wong 760e8628dd0SDarrick J. Wong /** 761e8628dd0SDarrick J. Wong * show_affected_cpus - show the CPUs affected by each transition 762e8628dd0SDarrick J. Wong */ 763e8628dd0SDarrick J. Wong static ssize_t show_affected_cpus(struct cpufreq_policy *policy, char *buf) 764e8628dd0SDarrick J. Wong { 765f4fd3797SLan Tianyu return cpufreq_show_cpus(policy->cpus, buf); 766e8628dd0SDarrick J. Wong } 767e8628dd0SDarrick J. Wong 7689e76988eSVenki Pallipadi static ssize_t store_scaling_setspeed(struct cpufreq_policy *policy, 7699e76988eSVenki Pallipadi const char *buf, size_t count) 7709e76988eSVenki Pallipadi { 7719e76988eSVenki Pallipadi unsigned int freq = 0; 7729e76988eSVenki Pallipadi unsigned int ret; 7739e76988eSVenki Pallipadi 774879000f9SCHIKAMA masaki if (!policy->governor || !policy->governor->store_setspeed) 7759e76988eSVenki Pallipadi return -EINVAL; 7769e76988eSVenki Pallipadi 7779e76988eSVenki Pallipadi ret = sscanf(buf, "%u", &freq); 7789e76988eSVenki Pallipadi if (ret != 1) 7799e76988eSVenki Pallipadi return -EINVAL; 7809e76988eSVenki Pallipadi 7819e76988eSVenki Pallipadi policy->governor->store_setspeed(policy, freq); 7829e76988eSVenki Pallipadi 7839e76988eSVenki Pallipadi return count; 7849e76988eSVenki Pallipadi } 7859e76988eSVenki Pallipadi 7869e76988eSVenki Pallipadi static ssize_t show_scaling_setspeed(struct cpufreq_policy *policy, char *buf) 7879e76988eSVenki Pallipadi { 788879000f9SCHIKAMA masaki if (!policy->governor || !policy->governor->show_setspeed) 7899e76988eSVenki Pallipadi return sprintf(buf, "<unsupported>\n"); 7909e76988eSVenki Pallipadi 7919e76988eSVenki Pallipadi return policy->governor->show_setspeed(policy, buf); 7929e76988eSVenki Pallipadi } 7931da177e4SLinus Torvalds 794e2f74f35SThomas Renninger /** 7958bf1ac72Sviresh kumar * show_bios_limit - show the current cpufreq HW/BIOS limitation 796e2f74f35SThomas Renninger */ 797e2f74f35SThomas Renninger static ssize_t show_bios_limit(struct cpufreq_policy *policy, char *buf) 798e2f74f35SThomas Renninger { 799e2f74f35SThomas Renninger unsigned int limit; 800e2f74f35SThomas Renninger int ret; 8011c3d85ddSRafael J. Wysocki if (cpufreq_driver->bios_limit) { 8021c3d85ddSRafael J. Wysocki ret = cpufreq_driver->bios_limit(policy->cpu, &limit); 803e2f74f35SThomas Renninger if (!ret) 804e2f74f35SThomas Renninger return sprintf(buf, "%u\n", limit); 805e2f74f35SThomas Renninger } 806e2f74f35SThomas Renninger return sprintf(buf, "%u\n", policy->cpuinfo.max_freq); 807e2f74f35SThomas Renninger } 808e2f74f35SThomas Renninger 8096dad2a29SBorislav Petkov cpufreq_freq_attr_ro_perm(cpuinfo_cur_freq, 0400); 8106dad2a29SBorislav Petkov cpufreq_freq_attr_ro(cpuinfo_min_freq); 8116dad2a29SBorislav Petkov cpufreq_freq_attr_ro(cpuinfo_max_freq); 8126dad2a29SBorislav Petkov cpufreq_freq_attr_ro(cpuinfo_transition_latency); 8136dad2a29SBorislav Petkov cpufreq_freq_attr_ro(scaling_available_governors); 8146dad2a29SBorislav Petkov cpufreq_freq_attr_ro(scaling_driver); 8156dad2a29SBorislav Petkov cpufreq_freq_attr_ro(scaling_cur_freq); 8166dad2a29SBorislav Petkov cpufreq_freq_attr_ro(bios_limit); 8176dad2a29SBorislav Petkov cpufreq_freq_attr_ro(related_cpus); 8186dad2a29SBorislav Petkov cpufreq_freq_attr_ro(affected_cpus); 8196dad2a29SBorislav Petkov cpufreq_freq_attr_rw(scaling_min_freq); 8206dad2a29SBorislav Petkov cpufreq_freq_attr_rw(scaling_max_freq); 8216dad2a29SBorislav Petkov cpufreq_freq_attr_rw(scaling_governor); 8226dad2a29SBorislav Petkov cpufreq_freq_attr_rw(scaling_setspeed); 8231da177e4SLinus Torvalds 8241da177e4SLinus Torvalds static struct attribute *default_attrs[] = { 8251da177e4SLinus Torvalds &cpuinfo_min_freq.attr, 8261da177e4SLinus Torvalds &cpuinfo_max_freq.attr, 827ed129784SThomas Renninger &cpuinfo_transition_latency.attr, 8281da177e4SLinus Torvalds &scaling_min_freq.attr, 8291da177e4SLinus Torvalds &scaling_max_freq.attr, 8301da177e4SLinus Torvalds &affected_cpus.attr, 831e8628dd0SDarrick J. Wong &related_cpus.attr, 8321da177e4SLinus Torvalds &scaling_governor.attr, 8331da177e4SLinus Torvalds &scaling_driver.attr, 8341da177e4SLinus Torvalds &scaling_available_governors.attr, 8359e76988eSVenki Pallipadi &scaling_setspeed.attr, 8361da177e4SLinus Torvalds NULL 8371da177e4SLinus Torvalds }; 8381da177e4SLinus Torvalds 8391da177e4SLinus Torvalds #define to_policy(k) container_of(k, struct cpufreq_policy, kobj) 8401da177e4SLinus Torvalds #define to_attr(a) container_of(a, struct freq_attr, attr) 8411da177e4SLinus Torvalds 8421da177e4SLinus Torvalds static ssize_t show(struct kobject *kobj, struct attribute *attr, char *buf) 8431da177e4SLinus Torvalds { 8441da177e4SLinus Torvalds struct cpufreq_policy *policy = to_policy(kobj); 8451da177e4SLinus Torvalds struct freq_attr *fattr = to_attr(attr); 8461b750e3bSViresh Kumar ssize_t ret; 8476eed9404SViresh Kumar 8486eed9404SViresh Kumar if (!down_read_trylock(&cpufreq_rwsem)) 8491b750e3bSViresh Kumar return -EINVAL; 8505a01f2e8SVenkatesh Pallipadi 851ad7722daSviresh kumar down_read(&policy->rwsem); 8525a01f2e8SVenkatesh Pallipadi 853e08f5f5bSGautham R Shenoy if (fattr->show) 854e08f5f5bSGautham R Shenoy ret = fattr->show(policy, buf); 855e08f5f5bSGautham R Shenoy else 856e08f5f5bSGautham R Shenoy ret = -EIO; 857e08f5f5bSGautham R Shenoy 858ad7722daSviresh kumar up_read(&policy->rwsem); 8596eed9404SViresh Kumar up_read(&cpufreq_rwsem); 8601b750e3bSViresh Kumar 8611da177e4SLinus Torvalds return ret; 8621da177e4SLinus Torvalds } 8631da177e4SLinus Torvalds 8641da177e4SLinus Torvalds static ssize_t store(struct kobject *kobj, struct attribute *attr, 8651da177e4SLinus Torvalds const char *buf, size_t count) 8661da177e4SLinus Torvalds { 8671da177e4SLinus Torvalds struct cpufreq_policy *policy = to_policy(kobj); 8681da177e4SLinus Torvalds struct freq_attr *fattr = to_attr(attr); 869a07530b4SDave Jones ssize_t ret = -EINVAL; 8706eed9404SViresh Kumar 8714f750c93SSrivatsa S. Bhat get_online_cpus(); 8724f750c93SSrivatsa S. Bhat 8734f750c93SSrivatsa S. Bhat if (!cpu_online(policy->cpu)) 8744f750c93SSrivatsa S. Bhat goto unlock; 8754f750c93SSrivatsa S. Bhat 8766eed9404SViresh Kumar if (!down_read_trylock(&cpufreq_rwsem)) 8774f750c93SSrivatsa S. Bhat goto unlock; 8785a01f2e8SVenkatesh Pallipadi 879ad7722daSviresh kumar down_write(&policy->rwsem); 8805a01f2e8SVenkatesh Pallipadi 881e08f5f5bSGautham R Shenoy if (fattr->store) 882e08f5f5bSGautham R Shenoy ret = fattr->store(policy, buf, count); 883e08f5f5bSGautham R Shenoy else 884e08f5f5bSGautham R Shenoy ret = -EIO; 885e08f5f5bSGautham R Shenoy 886ad7722daSviresh kumar up_write(&policy->rwsem); 8876eed9404SViresh Kumar 8886eed9404SViresh Kumar up_read(&cpufreq_rwsem); 8894f750c93SSrivatsa S. Bhat unlock: 8904f750c93SSrivatsa S. Bhat put_online_cpus(); 8914f750c93SSrivatsa S. Bhat 8921da177e4SLinus Torvalds return ret; 8931da177e4SLinus Torvalds } 8941da177e4SLinus Torvalds 8951da177e4SLinus Torvalds static void cpufreq_sysfs_release(struct kobject *kobj) 8961da177e4SLinus Torvalds { 8971da177e4SLinus Torvalds struct cpufreq_policy *policy = to_policy(kobj); 8982d06d8c4SDominik Brodowski pr_debug("last reference is dropped\n"); 8991da177e4SLinus Torvalds complete(&policy->kobj_unregister); 9001da177e4SLinus Torvalds } 9011da177e4SLinus Torvalds 90252cf25d0SEmese Revfy static const struct sysfs_ops sysfs_ops = { 9031da177e4SLinus Torvalds .show = show, 9041da177e4SLinus Torvalds .store = store, 9051da177e4SLinus Torvalds }; 9061da177e4SLinus Torvalds 9071da177e4SLinus Torvalds static struct kobj_type ktype_cpufreq = { 9081da177e4SLinus Torvalds .sysfs_ops = &sysfs_ops, 9091da177e4SLinus Torvalds .default_attrs = default_attrs, 9101da177e4SLinus Torvalds .release = cpufreq_sysfs_release, 9111da177e4SLinus Torvalds }; 9121da177e4SLinus Torvalds 9132361be23SViresh Kumar struct kobject *cpufreq_global_kobject; 9142361be23SViresh Kumar EXPORT_SYMBOL(cpufreq_global_kobject); 9152361be23SViresh Kumar 9162361be23SViresh Kumar static int cpufreq_global_kobject_usage; 9172361be23SViresh Kumar 9182361be23SViresh Kumar int cpufreq_get_global_kobject(void) 9192361be23SViresh Kumar { 9202361be23SViresh Kumar if (!cpufreq_global_kobject_usage++) 9212361be23SViresh Kumar return kobject_add(cpufreq_global_kobject, 9222361be23SViresh Kumar &cpu_subsys.dev_root->kobj, "%s", "cpufreq"); 9232361be23SViresh Kumar 9242361be23SViresh Kumar return 0; 9252361be23SViresh Kumar } 9262361be23SViresh Kumar EXPORT_SYMBOL(cpufreq_get_global_kobject); 9272361be23SViresh Kumar 9282361be23SViresh Kumar void cpufreq_put_global_kobject(void) 9292361be23SViresh Kumar { 9302361be23SViresh Kumar if (!--cpufreq_global_kobject_usage) 9312361be23SViresh Kumar kobject_del(cpufreq_global_kobject); 9322361be23SViresh Kumar } 9332361be23SViresh Kumar EXPORT_SYMBOL(cpufreq_put_global_kobject); 9342361be23SViresh Kumar 9352361be23SViresh Kumar int cpufreq_sysfs_create_file(const struct attribute *attr) 9362361be23SViresh Kumar { 9372361be23SViresh Kumar int ret = cpufreq_get_global_kobject(); 9382361be23SViresh Kumar 9392361be23SViresh Kumar if (!ret) { 9402361be23SViresh Kumar ret = sysfs_create_file(cpufreq_global_kobject, attr); 9412361be23SViresh Kumar if (ret) 9422361be23SViresh Kumar cpufreq_put_global_kobject(); 9432361be23SViresh Kumar } 9442361be23SViresh Kumar 9452361be23SViresh Kumar return ret; 9462361be23SViresh Kumar } 9472361be23SViresh Kumar EXPORT_SYMBOL(cpufreq_sysfs_create_file); 9482361be23SViresh Kumar 9492361be23SViresh Kumar void cpufreq_sysfs_remove_file(const struct attribute *attr) 9502361be23SViresh Kumar { 9512361be23SViresh Kumar sysfs_remove_file(cpufreq_global_kobject, attr); 9522361be23SViresh Kumar cpufreq_put_global_kobject(); 9532361be23SViresh Kumar } 9542361be23SViresh Kumar EXPORT_SYMBOL(cpufreq_sysfs_remove_file); 9552361be23SViresh Kumar 95619d6f7ecSDave Jones /* symlink affected CPUs */ 957308b60e7SViresh Kumar static int cpufreq_add_dev_symlink(struct cpufreq_policy *policy) 95819d6f7ecSDave Jones { 95919d6f7ecSDave Jones unsigned int j; 96019d6f7ecSDave Jones int ret = 0; 96119d6f7ecSDave Jones 96219d6f7ecSDave Jones for_each_cpu(j, policy->cpus) { 9638a25a2fdSKay Sievers struct device *cpu_dev; 96419d6f7ecSDave Jones 965308b60e7SViresh Kumar if (j == policy->cpu) 96619d6f7ecSDave Jones continue; 96719d6f7ecSDave Jones 968e8fdde10SViresh Kumar pr_debug("Adding link for CPU: %u\n", j); 9698a25a2fdSKay Sievers cpu_dev = get_cpu_device(j); 9708a25a2fdSKay Sievers ret = sysfs_create_link(&cpu_dev->kobj, &policy->kobj, 97119d6f7ecSDave Jones "cpufreq"); 97271c3461eSRafael J. Wysocki if (ret) 97371c3461eSRafael J. Wysocki break; 97419d6f7ecSDave Jones } 97519d6f7ecSDave Jones return ret; 97619d6f7ecSDave Jones } 97719d6f7ecSDave Jones 978308b60e7SViresh Kumar static int cpufreq_add_dev_interface(struct cpufreq_policy *policy, 9798a25a2fdSKay Sievers struct device *dev) 980909a694eSDave Jones { 981909a694eSDave Jones struct freq_attr **drv_attr; 982909a694eSDave Jones int ret = 0; 983909a694eSDave Jones 984909a694eSDave Jones /* set up files for this cpu device */ 9851c3d85ddSRafael J. Wysocki drv_attr = cpufreq_driver->attr; 986f13f1184SViresh Kumar while (drv_attr && *drv_attr) { 987909a694eSDave Jones ret = sysfs_create_file(&policy->kobj, &((*drv_attr)->attr)); 988909a694eSDave Jones if (ret) 9896d4e81edSTomeu Vizoso return ret; 990909a694eSDave Jones drv_attr++; 991909a694eSDave Jones } 9921c3d85ddSRafael J. Wysocki if (cpufreq_driver->get) { 993909a694eSDave Jones ret = sysfs_create_file(&policy->kobj, &cpuinfo_cur_freq.attr); 994909a694eSDave Jones if (ret) 9956d4e81edSTomeu Vizoso return ret; 996909a694eSDave Jones } 997c034b02eSDirk Brandewie 998909a694eSDave Jones ret = sysfs_create_file(&policy->kobj, &scaling_cur_freq.attr); 999909a694eSDave Jones if (ret) 10006d4e81edSTomeu Vizoso return ret; 1001c034b02eSDirk Brandewie 10021c3d85ddSRafael J. Wysocki if (cpufreq_driver->bios_limit) { 1003e2f74f35SThomas Renninger ret = sysfs_create_file(&policy->kobj, &bios_limit.attr); 1004e2f74f35SThomas Renninger if (ret) 10056d4e81edSTomeu Vizoso return ret; 1006e2f74f35SThomas Renninger } 1007909a694eSDave Jones 10086d4e81edSTomeu Vizoso return cpufreq_add_dev_symlink(policy); 1009e18f1682SSrivatsa S. Bhat } 1010e18f1682SSrivatsa S. Bhat 1011e18f1682SSrivatsa S. Bhat static void cpufreq_init_policy(struct cpufreq_policy *policy) 1012e18f1682SSrivatsa S. Bhat { 10136e2c89d1Sviresh kumar struct cpufreq_governor *gov = NULL; 1014e18f1682SSrivatsa S. Bhat struct cpufreq_policy new_policy; 1015e18f1682SSrivatsa S. Bhat int ret = 0; 1016e18f1682SSrivatsa S. Bhat 1017d5b73cd8SViresh Kumar memcpy(&new_policy, policy, sizeof(*policy)); 1018a27a9ab7SJason Baron 10196e2c89d1Sviresh kumar /* Update governor of new_policy to the governor used before hotplug */ 102042f91fa1SViresh Kumar gov = find_governor(per_cpu(cpufreq_cpu_governor, policy->cpu)); 10216e2c89d1Sviresh kumar if (gov) 10226e2c89d1Sviresh kumar pr_debug("Restoring governor %s for cpu %d\n", 10236e2c89d1Sviresh kumar policy->governor->name, policy->cpu); 10246e2c89d1Sviresh kumar else 10256e2c89d1Sviresh kumar gov = CPUFREQ_DEFAULT_GOVERNOR; 10266e2c89d1Sviresh kumar 10276e2c89d1Sviresh kumar new_policy.governor = gov; 10286e2c89d1Sviresh kumar 1029a27a9ab7SJason Baron /* Use the default policy if its valid. */ 1030a27a9ab7SJason Baron if (cpufreq_driver->setpolicy) 10316e2c89d1Sviresh kumar cpufreq_parse_governor(gov->name, &new_policy.policy, NULL); 1032ecf7e461SDave Jones 1033ecf7e461SDave Jones /* set default policy */ 1034037ce839SViresh Kumar ret = cpufreq_set_policy(policy, &new_policy); 1035ecf7e461SDave Jones if (ret) { 10362d06d8c4SDominik Brodowski pr_debug("setting policy failed\n"); 10371c3d85ddSRafael J. Wysocki if (cpufreq_driver->exit) 10381c3d85ddSRafael J. Wysocki cpufreq_driver->exit(policy); 1039ecf7e461SDave Jones } 1040909a694eSDave Jones } 1041909a694eSDave Jones 1042d8d3b471SViresh Kumar static int cpufreq_add_policy_cpu(struct cpufreq_policy *policy, 104342f921a6SViresh Kumar unsigned int cpu, struct device *dev) 1044fcf80582SViresh Kumar { 10459c0ebcf7SViresh Kumar int ret = 0; 1046fcf80582SViresh Kumar 1047bb29ae15SViresh Kumar /* Has this CPU been taken care of already? */ 1048bb29ae15SViresh Kumar if (cpumask_test_cpu(cpu, policy->cpus)) 1049bb29ae15SViresh Kumar return 0; 1050bb29ae15SViresh Kumar 10519c0ebcf7SViresh Kumar if (has_target()) { 10523de9bdebSViresh Kumar ret = __cpufreq_governor(policy, CPUFREQ_GOV_STOP); 10533de9bdebSViresh Kumar if (ret) { 10543de9bdebSViresh Kumar pr_err("%s: Failed to stop governor\n", __func__); 10553de9bdebSViresh Kumar return ret; 10563de9bdebSViresh Kumar } 10573de9bdebSViresh Kumar } 1058fcf80582SViresh Kumar 1059ad7722daSviresh kumar down_write(&policy->rwsem); 1060fcf80582SViresh Kumar cpumask_set_cpu(cpu, policy->cpus); 1061ad7722daSviresh kumar up_write(&policy->rwsem); 10622eaa3e2dSViresh Kumar 10639c0ebcf7SViresh Kumar if (has_target()) { 1064e5c87b76SStratos Karafotis ret = __cpufreq_governor(policy, CPUFREQ_GOV_START); 1065e5c87b76SStratos Karafotis if (!ret) 1066e5c87b76SStratos Karafotis ret = __cpufreq_governor(policy, CPUFREQ_GOV_LIMITS); 1067e5c87b76SStratos Karafotis 1068e5c87b76SStratos Karafotis if (ret) { 10693de9bdebSViresh Kumar pr_err("%s: Failed to start governor\n", __func__); 10703de9bdebSViresh Kumar return ret; 10713de9bdebSViresh Kumar } 1072820c6ca2SViresh Kumar } 1073fcf80582SViresh Kumar 107442f921a6SViresh Kumar return sysfs_create_link(&dev->kobj, &policy->kobj, "cpufreq"); 1075fcf80582SViresh Kumar } 10761da177e4SLinus Torvalds 10778414809cSSrivatsa S. Bhat static struct cpufreq_policy *cpufreq_policy_restore(unsigned int cpu) 10788414809cSSrivatsa S. Bhat { 10798414809cSSrivatsa S. Bhat struct cpufreq_policy *policy; 10808414809cSSrivatsa S. Bhat unsigned long flags; 10818414809cSSrivatsa S. Bhat 108244871c9cSLan Tianyu read_lock_irqsave(&cpufreq_driver_lock, flags); 10833914d379SViresh Kumar policy = per_cpu(cpufreq_cpu_data, cpu); 108444871c9cSLan Tianyu read_unlock_irqrestore(&cpufreq_driver_lock, flags); 10858414809cSSrivatsa S. Bhat 10863914d379SViresh Kumar if (likely(policy)) { 10873914d379SViresh Kumar /* Policy should be inactive here */ 10883914d379SViresh Kumar WARN_ON(!policy_is_inactive(policy)); 10896e2c89d1Sviresh kumar policy->governor = NULL; 10903914d379SViresh Kumar } 10916e2c89d1Sviresh kumar 10928414809cSSrivatsa S. Bhat return policy; 10938414809cSSrivatsa S. Bhat } 10948414809cSSrivatsa S. Bhat 1095e9698cc5SSrivatsa S. Bhat static struct cpufreq_policy *cpufreq_policy_alloc(void) 1096e9698cc5SSrivatsa S. Bhat { 1097e9698cc5SSrivatsa S. Bhat struct cpufreq_policy *policy; 1098e9698cc5SSrivatsa S. Bhat 1099e9698cc5SSrivatsa S. Bhat policy = kzalloc(sizeof(*policy), GFP_KERNEL); 1100e9698cc5SSrivatsa S. Bhat if (!policy) 1101e9698cc5SSrivatsa S. Bhat return NULL; 1102e9698cc5SSrivatsa S. Bhat 1103e9698cc5SSrivatsa S. Bhat if (!alloc_cpumask_var(&policy->cpus, GFP_KERNEL)) 1104e9698cc5SSrivatsa S. Bhat goto err_free_policy; 1105e9698cc5SSrivatsa S. Bhat 1106e9698cc5SSrivatsa S. Bhat if (!zalloc_cpumask_var(&policy->related_cpus, GFP_KERNEL)) 1107e9698cc5SSrivatsa S. Bhat goto err_free_cpumask; 1108e9698cc5SSrivatsa S. Bhat 1109c88a1f8bSLukasz Majewski INIT_LIST_HEAD(&policy->policy_list); 1110ad7722daSviresh kumar init_rwsem(&policy->rwsem); 111112478cf0SSrivatsa S. Bhat spin_lock_init(&policy->transition_lock); 111212478cf0SSrivatsa S. Bhat init_waitqueue_head(&policy->transition_wait); 1113818c5712SViresh Kumar init_completion(&policy->kobj_unregister); 1114818c5712SViresh Kumar INIT_WORK(&policy->update, handle_update); 1115ad7722daSviresh kumar 1116e9698cc5SSrivatsa S. Bhat return policy; 1117e9698cc5SSrivatsa S. Bhat 1118e9698cc5SSrivatsa S. Bhat err_free_cpumask: 1119e9698cc5SSrivatsa S. Bhat free_cpumask_var(policy->cpus); 1120e9698cc5SSrivatsa S. Bhat err_free_policy: 1121e9698cc5SSrivatsa S. Bhat kfree(policy); 1122e9698cc5SSrivatsa S. Bhat 1123e9698cc5SSrivatsa S. Bhat return NULL; 1124e9698cc5SSrivatsa S. Bhat } 1125e9698cc5SSrivatsa S. Bhat 112642f921a6SViresh Kumar static void cpufreq_policy_put_kobj(struct cpufreq_policy *policy) 112742f921a6SViresh Kumar { 112842f921a6SViresh Kumar struct kobject *kobj; 112942f921a6SViresh Kumar struct completion *cmp; 113042f921a6SViresh Kumar 1131fcd7af91SViresh Kumar blocking_notifier_call_chain(&cpufreq_policy_notifier_list, 1132fcd7af91SViresh Kumar CPUFREQ_REMOVE_POLICY, policy); 1133fcd7af91SViresh Kumar 113442f921a6SViresh Kumar down_read(&policy->rwsem); 113542f921a6SViresh Kumar kobj = &policy->kobj; 113642f921a6SViresh Kumar cmp = &policy->kobj_unregister; 113742f921a6SViresh Kumar up_read(&policy->rwsem); 113842f921a6SViresh Kumar kobject_put(kobj); 113942f921a6SViresh Kumar 114042f921a6SViresh Kumar /* 114142f921a6SViresh Kumar * We need to make sure that the underlying kobj is 114242f921a6SViresh Kumar * actually not referenced anymore by anybody before we 114342f921a6SViresh Kumar * proceed with unloading. 114442f921a6SViresh Kumar */ 114542f921a6SViresh Kumar pr_debug("waiting for dropping of refcount\n"); 114642f921a6SViresh Kumar wait_for_completion(cmp); 114742f921a6SViresh Kumar pr_debug("wait complete\n"); 114842f921a6SViresh Kumar } 114942f921a6SViresh Kumar 1150e9698cc5SSrivatsa S. Bhat static void cpufreq_policy_free(struct cpufreq_policy *policy) 1151e9698cc5SSrivatsa S. Bhat { 1152988bed09SViresh Kumar unsigned long flags; 1153988bed09SViresh Kumar int cpu; 1154988bed09SViresh Kumar 1155988bed09SViresh Kumar /* Remove policy from list */ 1156988bed09SViresh Kumar write_lock_irqsave(&cpufreq_driver_lock, flags); 1157988bed09SViresh Kumar list_del(&policy->policy_list); 1158988bed09SViresh Kumar 1159988bed09SViresh Kumar for_each_cpu(cpu, policy->related_cpus) 1160988bed09SViresh Kumar per_cpu(cpufreq_cpu_data, cpu) = NULL; 1161988bed09SViresh Kumar write_unlock_irqrestore(&cpufreq_driver_lock, flags); 1162988bed09SViresh Kumar 1163e9698cc5SSrivatsa S. Bhat free_cpumask_var(policy->related_cpus); 1164e9698cc5SSrivatsa S. Bhat free_cpumask_var(policy->cpus); 1165e9698cc5SSrivatsa S. Bhat kfree(policy); 1166e9698cc5SSrivatsa S. Bhat } 1167e9698cc5SSrivatsa S. Bhat 11681bfb425bSViresh Kumar static int update_policy_cpu(struct cpufreq_policy *policy, unsigned int cpu, 11691bfb425bSViresh Kumar struct device *cpu_dev) 11700d66b91eSSrivatsa S. Bhat { 11711bfb425bSViresh Kumar int ret; 11721bfb425bSViresh Kumar 117399ec899eSSrivatsa S. Bhat if (WARN_ON(cpu == policy->cpu)) 11741bfb425bSViresh Kumar return 0; 11751bfb425bSViresh Kumar 11761bfb425bSViresh Kumar /* Move kobject to the new policy->cpu */ 11771bfb425bSViresh Kumar ret = kobject_move(&policy->kobj, &cpu_dev->kobj); 11781bfb425bSViresh Kumar if (ret) { 11791bfb425bSViresh Kumar pr_err("%s: Failed to move kobj: %d\n", __func__, ret); 11801bfb425bSViresh Kumar return ret; 11811bfb425bSViresh Kumar } 1182cb38ed5cSSrivatsa S. Bhat 1183ad7722daSviresh kumar down_write(&policy->rwsem); 11840d66b91eSSrivatsa S. Bhat policy->cpu = cpu; 1185ad7722daSviresh kumar up_write(&policy->rwsem); 11868efd5765SViresh Kumar 11871bfb425bSViresh Kumar return 0; 11880d66b91eSSrivatsa S. Bhat } 11890d66b91eSSrivatsa S. Bhat 119023faf0b7SViresh Kumar /** 119123faf0b7SViresh Kumar * cpufreq_add_dev - add a CPU device 119223faf0b7SViresh Kumar * 119323faf0b7SViresh Kumar * Adds the cpufreq interface for a CPU device. 119423faf0b7SViresh Kumar * 119523faf0b7SViresh Kumar * The Oracle says: try running cpufreq registration/unregistration concurrently 119623faf0b7SViresh Kumar * with with cpu hotplugging and all hell will break loose. Tried to clean this 119723faf0b7SViresh Kumar * mess up, but more thorough testing is needed. - Mathieu 119823faf0b7SViresh Kumar */ 119923faf0b7SViresh Kumar static int cpufreq_add_dev(struct device *dev, struct subsys_interface *sif) 12001da177e4SLinus Torvalds { 1201fcf80582SViresh Kumar unsigned int j, cpu = dev->id; 120265922465SViresh Kumar int ret = -ENOMEM; 12037f0c020aSViresh Kumar struct cpufreq_policy *policy; 12041da177e4SLinus Torvalds unsigned long flags; 120596bbbe4aSViresh Kumar bool recover_policy = cpufreq_suspended; 12061da177e4SLinus Torvalds 1207c32b6b8eSAshok Raj if (cpu_is_offline(cpu)) 1208c32b6b8eSAshok Raj return 0; 1209c32b6b8eSAshok Raj 12102d06d8c4SDominik Brodowski pr_debug("adding CPU %u\n", cpu); 12111da177e4SLinus Torvalds 12126eed9404SViresh Kumar if (!down_read_trylock(&cpufreq_rwsem)) 12136eed9404SViresh Kumar return 0; 12146eed9404SViresh Kumar 1215bb29ae15SViresh Kumar /* Check if this CPU already has a policy to manage it */ 1216*9104bb26SViresh Kumar policy = per_cpu(cpufreq_cpu_data, cpu); 1217*9104bb26SViresh Kumar if (policy && !policy_is_inactive(policy)) { 1218*9104bb26SViresh Kumar WARN_ON(!cpumask_test_cpu(cpu, policy->related_cpus)); 12197f0c020aSViresh Kumar ret = cpufreq_add_policy_cpu(policy, cpu, dev); 12206eed9404SViresh Kumar up_read(&cpufreq_rwsem); 12216eed9404SViresh Kumar return ret; 1222fcf80582SViresh Kumar } 12231da177e4SLinus Torvalds 122472368d12SRafael J. Wysocki /* 122572368d12SRafael J. Wysocki * Restore the saved policy when doing light-weight init and fall back 122672368d12SRafael J. Wysocki * to the full init if that fails. 122772368d12SRafael J. Wysocki */ 122896bbbe4aSViresh Kumar policy = recover_policy ? cpufreq_policy_restore(cpu) : NULL; 122972368d12SRafael J. Wysocki if (!policy) { 123096bbbe4aSViresh Kumar recover_policy = false; 1231e9698cc5SSrivatsa S. Bhat policy = cpufreq_policy_alloc(); 1232059019a3SDave Jones if (!policy) 12331da177e4SLinus Torvalds goto nomem_out; 123472368d12SRafael J. Wysocki } 12350d66b91eSSrivatsa S. Bhat 12360d66b91eSSrivatsa S. Bhat /* 12370d66b91eSSrivatsa S. Bhat * In the resume path, since we restore a saved policy, the assignment 12380d66b91eSSrivatsa S. Bhat * to policy->cpu is like an update of the existing policy, rather than 12390d66b91eSSrivatsa S. Bhat * the creation of a brand new one. So we need to perform this update 12400d66b91eSSrivatsa S. Bhat * by invoking update_policy_cpu(). 12410d66b91eSSrivatsa S. Bhat */ 12421bfb425bSViresh Kumar if (recover_policy && cpu != policy->cpu) 12431bfb425bSViresh Kumar WARN_ON(update_policy_cpu(policy, cpu, dev)); 12441bfb425bSViresh Kumar else 12451da177e4SLinus Torvalds policy->cpu = cpu; 12460d66b91eSSrivatsa S. Bhat 1247835481d9SRusty Russell cpumask_copy(policy->cpus, cpumask_of(cpu)); 12481da177e4SLinus Torvalds 12491da177e4SLinus Torvalds /* call driver. From then on the cpufreq must be able 12501da177e4SLinus Torvalds * to accept all calls to ->verify and ->setpolicy for this CPU 12511da177e4SLinus Torvalds */ 12521c3d85ddSRafael J. Wysocki ret = cpufreq_driver->init(policy); 12531da177e4SLinus Torvalds if (ret) { 12542d06d8c4SDominik Brodowski pr_debug("initialization failed\n"); 12552eaa3e2dSViresh Kumar goto err_set_policy_cpu; 12561da177e4SLinus Torvalds } 1257643ae6e8SViresh Kumar 12586d4e81edSTomeu Vizoso down_write(&policy->rwsem); 12596d4e81edSTomeu Vizoso 12605a7e56a5SViresh Kumar /* related cpus should atleast have policy->cpus */ 12615a7e56a5SViresh Kumar cpumask_or(policy->related_cpus, policy->related_cpus, policy->cpus); 12625a7e56a5SViresh Kumar 12635a7e56a5SViresh Kumar /* 12645a7e56a5SViresh Kumar * affected cpus must always be the one, which are online. We aren't 12655a7e56a5SViresh Kumar * managing offline cpus here. 12665a7e56a5SViresh Kumar */ 12675a7e56a5SViresh Kumar cpumask_and(policy->cpus, policy->cpus, cpu_online_mask); 12685a7e56a5SViresh Kumar 126996bbbe4aSViresh Kumar if (!recover_policy) { 12705a7e56a5SViresh Kumar policy->user_policy.min = policy->min; 12715a7e56a5SViresh Kumar policy->user_policy.max = policy->max; 12726d4e81edSTomeu Vizoso 12736d4e81edSTomeu Vizoso /* prepare interface data */ 12746d4e81edSTomeu Vizoso ret = kobject_init_and_add(&policy->kobj, &ktype_cpufreq, 12756d4e81edSTomeu Vizoso &dev->kobj, "cpufreq"); 12766d4e81edSTomeu Vizoso if (ret) { 12776d4e81edSTomeu Vizoso pr_err("%s: failed to init policy->kobj: %d\n", 12786d4e81edSTomeu Vizoso __func__, ret); 12796d4e81edSTomeu Vizoso goto err_init_policy_kobj; 12806d4e81edSTomeu Vizoso } 12815a7e56a5SViresh Kumar 1282652ed95dSViresh Kumar write_lock_irqsave(&cpufreq_driver_lock, flags); 1283988bed09SViresh Kumar for_each_cpu(j, policy->related_cpus) 1284652ed95dSViresh Kumar per_cpu(cpufreq_cpu_data, j) = policy; 1285652ed95dSViresh Kumar write_unlock_irqrestore(&cpufreq_driver_lock, flags); 1286988bed09SViresh Kumar } 1287652ed95dSViresh Kumar 12882ed99e39SRafael J. Wysocki if (cpufreq_driver->get && !cpufreq_driver->setpolicy) { 1289da60ce9fSViresh Kumar policy->cur = cpufreq_driver->get(policy->cpu); 1290da60ce9fSViresh Kumar if (!policy->cur) { 1291da60ce9fSViresh Kumar pr_err("%s: ->get() failed\n", __func__); 1292da60ce9fSViresh Kumar goto err_get_freq; 1293da60ce9fSViresh Kumar } 1294da60ce9fSViresh Kumar } 1295da60ce9fSViresh Kumar 1296d3916691SViresh Kumar /* 1297d3916691SViresh Kumar * Sometimes boot loaders set CPU frequency to a value outside of 1298d3916691SViresh Kumar * frequency table present with cpufreq core. In such cases CPU might be 1299d3916691SViresh Kumar * unstable if it has to run on that frequency for long duration of time 1300d3916691SViresh Kumar * and so its better to set it to a frequency which is specified in 1301d3916691SViresh Kumar * freq-table. This also makes cpufreq stats inconsistent as 1302d3916691SViresh Kumar * cpufreq-stats would fail to register because current frequency of CPU 1303d3916691SViresh Kumar * isn't found in freq-table. 1304d3916691SViresh Kumar * 1305d3916691SViresh Kumar * Because we don't want this change to effect boot process badly, we go 1306d3916691SViresh Kumar * for the next freq which is >= policy->cur ('cur' must be set by now, 1307d3916691SViresh Kumar * otherwise we will end up setting freq to lowest of the table as 'cur' 1308d3916691SViresh Kumar * is initialized to zero). 1309d3916691SViresh Kumar * 1310d3916691SViresh Kumar * We are passing target-freq as "policy->cur - 1" otherwise 1311d3916691SViresh Kumar * __cpufreq_driver_target() would simply fail, as policy->cur will be 1312d3916691SViresh Kumar * equal to target-freq. 1313d3916691SViresh Kumar */ 1314d3916691SViresh Kumar if ((cpufreq_driver->flags & CPUFREQ_NEED_INITIAL_FREQ_CHECK) 1315d3916691SViresh Kumar && has_target()) { 1316d3916691SViresh Kumar /* Are we running at unknown frequency ? */ 1317d3916691SViresh Kumar ret = cpufreq_frequency_table_get_index(policy, policy->cur); 1318d3916691SViresh Kumar if (ret == -EINVAL) { 1319d3916691SViresh Kumar /* Warn user and fix it */ 1320d3916691SViresh Kumar pr_warn("%s: CPU%d: Running at unlisted freq: %u KHz\n", 1321d3916691SViresh Kumar __func__, policy->cpu, policy->cur); 1322d3916691SViresh Kumar ret = __cpufreq_driver_target(policy, policy->cur - 1, 1323d3916691SViresh Kumar CPUFREQ_RELATION_L); 1324d3916691SViresh Kumar 1325d3916691SViresh Kumar /* 1326d3916691SViresh Kumar * Reaching here after boot in a few seconds may not 1327d3916691SViresh Kumar * mean that system will remain stable at "unknown" 1328d3916691SViresh Kumar * frequency for longer duration. Hence, a BUG_ON(). 1329d3916691SViresh Kumar */ 1330d3916691SViresh Kumar BUG_ON(ret); 1331d3916691SViresh Kumar pr_warn("%s: CPU%d: Unlisted initial frequency changed to: %u KHz\n", 1332d3916691SViresh Kumar __func__, policy->cpu, policy->cur); 1333d3916691SViresh Kumar } 1334d3916691SViresh Kumar } 1335d3916691SViresh Kumar 1336a1531acdSThomas Renninger blocking_notifier_call_chain(&cpufreq_policy_notifier_list, 1337a1531acdSThomas Renninger CPUFREQ_START, policy); 1338a1531acdSThomas Renninger 133996bbbe4aSViresh Kumar if (!recover_policy) { 1340308b60e7SViresh Kumar ret = cpufreq_add_dev_interface(policy, dev); 134119d6f7ecSDave Jones if (ret) 13420142f9dcSAhmed S. Darwish goto err_out_unregister; 1343fcd7af91SViresh Kumar blocking_notifier_call_chain(&cpufreq_policy_notifier_list, 1344fcd7af91SViresh Kumar CPUFREQ_CREATE_POLICY, policy); 1345c88a1f8bSLukasz Majewski 1346c88a1f8bSLukasz Majewski write_lock_irqsave(&cpufreq_driver_lock, flags); 1347c88a1f8bSLukasz Majewski list_add(&policy->policy_list, &cpufreq_policy_list); 1348c88a1f8bSLukasz Majewski write_unlock_irqrestore(&cpufreq_driver_lock, flags); 1349988bed09SViresh Kumar } 13508ff69732SDave Jones 1351e18f1682SSrivatsa S. Bhat cpufreq_init_policy(policy); 1352e18f1682SSrivatsa S. Bhat 135396bbbe4aSViresh Kumar if (!recover_policy) { 135408fd8c1cSViresh Kumar policy->user_policy.policy = policy->policy; 135508fd8c1cSViresh Kumar policy->user_policy.governor = policy->governor; 135608fd8c1cSViresh Kumar } 13574e97b631SViresh Kumar up_write(&policy->rwsem); 135808fd8c1cSViresh Kumar 1359038c5b3eSGreg Kroah-Hartman kobject_uevent(&policy->kobj, KOBJ_ADD); 13607c45cf31SViresh Kumar 13616eed9404SViresh Kumar up_read(&cpufreq_rwsem); 13626eed9404SViresh Kumar 13637c45cf31SViresh Kumar /* Callback for handling stuff after policy is ready */ 13647c45cf31SViresh Kumar if (cpufreq_driver->ready) 13657c45cf31SViresh Kumar cpufreq_driver->ready(policy); 13667c45cf31SViresh Kumar 13672d06d8c4SDominik Brodowski pr_debug("initialization complete\n"); 13681da177e4SLinus Torvalds 13691da177e4SLinus Torvalds return 0; 13701da177e4SLinus Torvalds 13711da177e4SLinus Torvalds err_out_unregister: 1372652ed95dSViresh Kumar err_get_freq: 13736d4e81edSTomeu Vizoso if (!recover_policy) { 13746d4e81edSTomeu Vizoso kobject_put(&policy->kobj); 13756d4e81edSTomeu Vizoso wait_for_completion(&policy->kobj_unregister); 13766d4e81edSTomeu Vizoso } 13776d4e81edSTomeu Vizoso err_init_policy_kobj: 13787106e02bSPrarit Bhargava up_write(&policy->rwsem); 13797106e02bSPrarit Bhargava 1380da60ce9fSViresh Kumar if (cpufreq_driver->exit) 1381da60ce9fSViresh Kumar cpufreq_driver->exit(policy); 13822eaa3e2dSViresh Kumar err_set_policy_cpu: 13833914d379SViresh Kumar if (recover_policy) 138442f921a6SViresh Kumar cpufreq_policy_put_kobj(policy); 1385e9698cc5SSrivatsa S. Bhat cpufreq_policy_free(policy); 138642f921a6SViresh Kumar 13871da177e4SLinus Torvalds nomem_out: 13886eed9404SViresh Kumar up_read(&cpufreq_rwsem); 13896eed9404SViresh Kumar 13901da177e4SLinus Torvalds return ret; 13911da177e4SLinus Torvalds } 13921da177e4SLinus Torvalds 1393cedb70afSSrivatsa S. Bhat static int __cpufreq_remove_dev_prepare(struct device *dev, 139496bbbe4aSViresh Kumar struct subsys_interface *sif) 13951da177e4SLinus Torvalds { 1396f9ba680dSSrivatsa S. Bhat unsigned int cpu = dev->id, cpus; 13971bfb425bSViresh Kumar int ret; 13983a3e9e06SViresh Kumar struct cpufreq_policy *policy; 13991da177e4SLinus Torvalds 1400b8eed8afSViresh Kumar pr_debug("%s: unregistering CPU %u\n", __func__, cpu); 14011da177e4SLinus Torvalds 1402988bed09SViresh Kumar policy = cpufreq_cpu_get_raw(cpu); 14033a3e9e06SViresh Kumar if (!policy) { 1404b8eed8afSViresh Kumar pr_debug("%s: No cpu_data found\n", __func__); 14051da177e4SLinus Torvalds return -EINVAL; 14061da177e4SLinus Torvalds } 14071da177e4SLinus Torvalds 14089c0ebcf7SViresh Kumar if (has_target()) { 14093de9bdebSViresh Kumar ret = __cpufreq_governor(policy, CPUFREQ_GOV_STOP); 14103de9bdebSViresh Kumar if (ret) { 14113de9bdebSViresh Kumar pr_err("%s: Failed to stop governor\n", __func__); 14123de9bdebSViresh Kumar return ret; 14133de9bdebSViresh Kumar } 14145a01f2e8SVenkatesh Pallipadi 1415fa69e33fSDirk Brandewie strncpy(per_cpu(cpufreq_cpu_governor, cpu), 14163a3e9e06SViresh Kumar policy->governor->name, CPUFREQ_NAME_LEN); 1417db5f2995SViresh Kumar } 14181da177e4SLinus Torvalds 1419ad7722daSviresh kumar down_read(&policy->rwsem); 14203a3e9e06SViresh Kumar cpus = cpumask_weight(policy->cpus); 1421ad7722daSviresh kumar up_read(&policy->rwsem); 14221da177e4SLinus Torvalds 142361173f25SSrivatsa S. Bhat if (cpu != policy->cpu) { 142473bf0fc2SViresh Kumar sysfs_remove_link(&dev->kobj, "cpufreq"); 142573bf0fc2SViresh Kumar } else if (cpus > 1) { 14261bfb425bSViresh Kumar /* Nominate new CPU */ 14271bfb425bSViresh Kumar int new_cpu = cpumask_any_but(policy->cpus, cpu); 14281bfb425bSViresh Kumar struct device *cpu_dev = get_cpu_device(new_cpu); 14291bfb425bSViresh Kumar 14301bfb425bSViresh Kumar sysfs_remove_link(&cpu_dev->kobj, "cpufreq"); 14311bfb425bSViresh Kumar ret = update_policy_cpu(policy, new_cpu, cpu_dev); 14321bfb425bSViresh Kumar if (ret) { 14331bfb425bSViresh Kumar if (sysfs_create_link(&cpu_dev->kobj, &policy->kobj, 14341bfb425bSViresh Kumar "cpufreq")) 14351bfb425bSViresh Kumar pr_err("%s: Failed to restore kobj link to cpu:%d\n", 14361bfb425bSViresh Kumar __func__, cpu_dev->id); 14371bfb425bSViresh Kumar return ret; 14381bfb425bSViresh Kumar } 1439a82fab29SSrivatsa S. Bhat 1440bda9f552SStratos Karafotis if (!cpufreq_suspended) 144175949c9aSViresh Kumar pr_debug("%s: policy Kobject moved to cpu: %d from: %d\n", 144275949c9aSViresh Kumar __func__, new_cpu, cpu); 1443789ca243SPreeti U Murthy } else if (cpufreq_driver->stop_cpu) { 1444367dc4aaSDirk Brandewie cpufreq_driver->stop_cpu(policy); 14451da177e4SLinus Torvalds } 1446b8eed8afSViresh Kumar 1447cedb70afSSrivatsa S. Bhat return 0; 1448cedb70afSSrivatsa S. Bhat } 1449cedb70afSSrivatsa S. Bhat 1450cedb70afSSrivatsa S. Bhat static int __cpufreq_remove_dev_finish(struct device *dev, 145196bbbe4aSViresh Kumar struct subsys_interface *sif) 1452cedb70afSSrivatsa S. Bhat { 1453988bed09SViresh Kumar unsigned int cpu = dev->id; 1454cedb70afSSrivatsa S. Bhat int ret; 1455988bed09SViresh Kumar struct cpufreq_policy *policy = cpufreq_cpu_get_raw(cpu); 1456cedb70afSSrivatsa S. Bhat 1457cedb70afSSrivatsa S. Bhat if (!policy) { 1458cedb70afSSrivatsa S. Bhat pr_debug("%s: No cpu_data found\n", __func__); 1459cedb70afSSrivatsa S. Bhat return -EINVAL; 1460cedb70afSSrivatsa S. Bhat } 1461cedb70afSSrivatsa S. Bhat 1462ad7722daSviresh kumar down_write(&policy->rwsem); 14639c8f1ee4SViresh Kumar cpumask_clear_cpu(cpu, policy->cpus); 1464ad7722daSviresh kumar up_write(&policy->rwsem); 1465cedb70afSSrivatsa S. Bhat 1466b8eed8afSViresh Kumar /* If cpu is last user of policy, free policy */ 1467988bed09SViresh Kumar if (policy_is_inactive(policy)) { 14689c0ebcf7SViresh Kumar if (has_target()) { 14693de9bdebSViresh Kumar ret = __cpufreq_governor(policy, 14703de9bdebSViresh Kumar CPUFREQ_GOV_POLICY_EXIT); 14713de9bdebSViresh Kumar if (ret) { 14723de9bdebSViresh Kumar pr_err("%s: Failed to exit governor\n", 14733de9bdebSViresh Kumar __func__); 14743de9bdebSViresh Kumar return ret; 14753de9bdebSViresh Kumar } 14763de9bdebSViresh Kumar } 14772a998599SRafael J. Wysocki 147896bbbe4aSViresh Kumar if (!cpufreq_suspended) 147942f921a6SViresh Kumar cpufreq_policy_put_kobj(policy); 14801da177e4SLinus Torvalds 14818414809cSSrivatsa S. Bhat /* 14828414809cSSrivatsa S. Bhat * Perform the ->exit() even during light-weight tear-down, 14838414809cSSrivatsa S. Bhat * since this is a core component, and is essential for the 14848414809cSSrivatsa S. Bhat * subsequent light-weight ->init() to succeed. 14858414809cSSrivatsa S. Bhat */ 14861c3d85ddSRafael J. Wysocki if (cpufreq_driver->exit) 14873a3e9e06SViresh Kumar cpufreq_driver->exit(policy); 148827ecddc2SJacob Shin 148996bbbe4aSViresh Kumar if (!cpufreq_suspended) 14903a3e9e06SViresh Kumar cpufreq_policy_free(policy); 1491e5c87b76SStratos Karafotis } else if (has_target()) { 1492e5c87b76SStratos Karafotis ret = __cpufreq_governor(policy, CPUFREQ_GOV_START); 1493e5c87b76SStratos Karafotis if (!ret) 1494e5c87b76SStratos Karafotis ret = __cpufreq_governor(policy, CPUFREQ_GOV_LIMITS); 1495e5c87b76SStratos Karafotis 1496e5c87b76SStratos Karafotis if (ret) { 1497e5c87b76SStratos Karafotis pr_err("%s: Failed to start governor\n", __func__); 14983de9bdebSViresh Kumar return ret; 14993de9bdebSViresh Kumar } 1500b8eed8afSViresh Kumar } 15011da177e4SLinus Torvalds 15021da177e4SLinus Torvalds return 0; 15031da177e4SLinus Torvalds } 15041da177e4SLinus Torvalds 1505cedb70afSSrivatsa S. Bhat /** 150627a862e9SViresh Kumar * cpufreq_remove_dev - remove a CPU device 1507cedb70afSSrivatsa S. Bhat * 1508cedb70afSSrivatsa S. Bhat * Removes the cpufreq interface for a CPU device. 1509cedb70afSSrivatsa S. Bhat */ 15108a25a2fdSKay Sievers static int cpufreq_remove_dev(struct device *dev, struct subsys_interface *sif) 15115a01f2e8SVenkatesh Pallipadi { 15128a25a2fdSKay Sievers unsigned int cpu = dev->id; 151327a862e9SViresh Kumar int ret; 1514ec28297aSVenki Pallipadi 1515ec28297aSVenki Pallipadi if (cpu_is_offline(cpu)) 1516ec28297aSVenki Pallipadi return 0; 1517ec28297aSVenki Pallipadi 151896bbbe4aSViresh Kumar ret = __cpufreq_remove_dev_prepare(dev, sif); 151927a862e9SViresh Kumar 152027a862e9SViresh Kumar if (!ret) 152196bbbe4aSViresh Kumar ret = __cpufreq_remove_dev_finish(dev, sif); 152227a862e9SViresh Kumar 152327a862e9SViresh Kumar return ret; 15245a01f2e8SVenkatesh Pallipadi } 15255a01f2e8SVenkatesh Pallipadi 152665f27f38SDavid Howells static void handle_update(struct work_struct *work) 15271da177e4SLinus Torvalds { 152865f27f38SDavid Howells struct cpufreq_policy *policy = 152965f27f38SDavid Howells container_of(work, struct cpufreq_policy, update); 153065f27f38SDavid Howells unsigned int cpu = policy->cpu; 15312d06d8c4SDominik Brodowski pr_debug("handle_update for cpu %u called\n", cpu); 15321da177e4SLinus Torvalds cpufreq_update_policy(cpu); 15331da177e4SLinus Torvalds } 15341da177e4SLinus Torvalds 15351da177e4SLinus Torvalds /** 1536bb176f7dSViresh Kumar * cpufreq_out_of_sync - If actual and saved CPU frequency differs, we're 1537bb176f7dSViresh Kumar * in deep trouble. 1538a1e1dc41SViresh Kumar * @policy: policy managing CPUs 15391da177e4SLinus Torvalds * @new_freq: CPU frequency the CPU actually runs at 15401da177e4SLinus Torvalds * 154129464f28SDave Jones * We adjust to current frequency first, and need to clean up later. 154229464f28SDave Jones * So either call to cpufreq_update_policy() or schedule handle_update()). 15431da177e4SLinus Torvalds */ 1544a1e1dc41SViresh Kumar static void cpufreq_out_of_sync(struct cpufreq_policy *policy, 1545e08f5f5bSGautham R Shenoy unsigned int new_freq) 15461da177e4SLinus Torvalds { 15471da177e4SLinus Torvalds struct cpufreq_freqs freqs; 1548b43a7ffbSViresh Kumar 1549e837f9b5SJoe Perches pr_debug("Warning: CPU frequency out of sync: cpufreq and timing core thinks of %u, is %u kHz\n", 1550a1e1dc41SViresh Kumar policy->cur, new_freq); 15511da177e4SLinus Torvalds 1552a1e1dc41SViresh Kumar freqs.old = policy->cur; 15531da177e4SLinus Torvalds freqs.new = new_freq; 1554b43a7ffbSViresh Kumar 15558fec051eSViresh Kumar cpufreq_freq_transition_begin(policy, &freqs); 15568fec051eSViresh Kumar cpufreq_freq_transition_end(policy, &freqs, 0); 15571da177e4SLinus Torvalds } 15581da177e4SLinus Torvalds 15591da177e4SLinus Torvalds /** 15604ab70df4SDhaval Giani * cpufreq_quick_get - get the CPU frequency (in kHz) from policy->cur 156195235ca2SVenkatesh Pallipadi * @cpu: CPU number 156295235ca2SVenkatesh Pallipadi * 156395235ca2SVenkatesh Pallipadi * This is the last known freq, without actually getting it from the driver. 156495235ca2SVenkatesh Pallipadi * Return value will be same as what is shown in scaling_cur_freq in sysfs. 156595235ca2SVenkatesh Pallipadi */ 156695235ca2SVenkatesh Pallipadi unsigned int cpufreq_quick_get(unsigned int cpu) 156795235ca2SVenkatesh Pallipadi { 15689e21ba8bSDirk Brandewie struct cpufreq_policy *policy; 1569e08f5f5bSGautham R Shenoy unsigned int ret_freq = 0; 157095235ca2SVenkatesh Pallipadi 15711c3d85ddSRafael J. Wysocki if (cpufreq_driver && cpufreq_driver->setpolicy && cpufreq_driver->get) 15721c3d85ddSRafael J. Wysocki return cpufreq_driver->get(cpu); 15739e21ba8bSDirk Brandewie 15749e21ba8bSDirk Brandewie policy = cpufreq_cpu_get(cpu); 157595235ca2SVenkatesh Pallipadi if (policy) { 1576e08f5f5bSGautham R Shenoy ret_freq = policy->cur; 157795235ca2SVenkatesh Pallipadi cpufreq_cpu_put(policy); 157895235ca2SVenkatesh Pallipadi } 157995235ca2SVenkatesh Pallipadi 15804d34a67dSDave Jones return ret_freq; 158195235ca2SVenkatesh Pallipadi } 158295235ca2SVenkatesh Pallipadi EXPORT_SYMBOL(cpufreq_quick_get); 158395235ca2SVenkatesh Pallipadi 15843d737108SJesse Barnes /** 15853d737108SJesse Barnes * cpufreq_quick_get_max - get the max reported CPU frequency for this CPU 15863d737108SJesse Barnes * @cpu: CPU number 15873d737108SJesse Barnes * 15883d737108SJesse Barnes * Just return the max possible frequency for a given CPU. 15893d737108SJesse Barnes */ 15903d737108SJesse Barnes unsigned int cpufreq_quick_get_max(unsigned int cpu) 15913d737108SJesse Barnes { 15923d737108SJesse Barnes struct cpufreq_policy *policy = cpufreq_cpu_get(cpu); 15933d737108SJesse Barnes unsigned int ret_freq = 0; 15943d737108SJesse Barnes 15953d737108SJesse Barnes if (policy) { 15963d737108SJesse Barnes ret_freq = policy->max; 15973d737108SJesse Barnes cpufreq_cpu_put(policy); 15983d737108SJesse Barnes } 15993d737108SJesse Barnes 16003d737108SJesse Barnes return ret_freq; 16013d737108SJesse Barnes } 16023d737108SJesse Barnes EXPORT_SYMBOL(cpufreq_quick_get_max); 16033d737108SJesse Barnes 1604d92d50a4SViresh Kumar static unsigned int __cpufreq_get(struct cpufreq_policy *policy) 16051da177e4SLinus Torvalds { 1606e08f5f5bSGautham R Shenoy unsigned int ret_freq = 0; 16071da177e4SLinus Torvalds 16081c3d85ddSRafael J. Wysocki if (!cpufreq_driver->get) 16094d34a67dSDave Jones return ret_freq; 16101da177e4SLinus Torvalds 1611d92d50a4SViresh Kumar ret_freq = cpufreq_driver->get(policy->cpu); 16121da177e4SLinus Torvalds 1613e08f5f5bSGautham R Shenoy if (ret_freq && policy->cur && 16141c3d85ddSRafael J. Wysocki !(cpufreq_driver->flags & CPUFREQ_CONST_LOOPS)) { 1615e08f5f5bSGautham R Shenoy /* verify no discrepancy between actual and 1616e08f5f5bSGautham R Shenoy saved value exists */ 1617e08f5f5bSGautham R Shenoy if (unlikely(ret_freq != policy->cur)) { 1618a1e1dc41SViresh Kumar cpufreq_out_of_sync(policy, ret_freq); 16191da177e4SLinus Torvalds schedule_work(&policy->update); 16201da177e4SLinus Torvalds } 16211da177e4SLinus Torvalds } 16221da177e4SLinus Torvalds 16234d34a67dSDave Jones return ret_freq; 16245a01f2e8SVenkatesh Pallipadi } 16251da177e4SLinus Torvalds 16265a01f2e8SVenkatesh Pallipadi /** 16275a01f2e8SVenkatesh Pallipadi * cpufreq_get - get the current CPU frequency (in kHz) 16285a01f2e8SVenkatesh Pallipadi * @cpu: CPU number 16295a01f2e8SVenkatesh Pallipadi * 16305a01f2e8SVenkatesh Pallipadi * Get the CPU current (static) CPU frequency 16315a01f2e8SVenkatesh Pallipadi */ 16325a01f2e8SVenkatesh Pallipadi unsigned int cpufreq_get(unsigned int cpu) 16335a01f2e8SVenkatesh Pallipadi { 1634999976e0SAaron Plattner struct cpufreq_policy *policy = cpufreq_cpu_get(cpu); 16355a01f2e8SVenkatesh Pallipadi unsigned int ret_freq = 0; 16365a01f2e8SVenkatesh Pallipadi 1637999976e0SAaron Plattner if (policy) { 1638ad7722daSviresh kumar down_read(&policy->rwsem); 1639d92d50a4SViresh Kumar ret_freq = __cpufreq_get(policy); 1640ad7722daSviresh kumar up_read(&policy->rwsem); 1641999976e0SAaron Plattner 1642999976e0SAaron Plattner cpufreq_cpu_put(policy); 1643999976e0SAaron Plattner } 16446eed9404SViresh Kumar 16454d34a67dSDave Jones return ret_freq; 16461da177e4SLinus Torvalds } 16471da177e4SLinus Torvalds EXPORT_SYMBOL(cpufreq_get); 16481da177e4SLinus Torvalds 16498a25a2fdSKay Sievers static struct subsys_interface cpufreq_interface = { 16508a25a2fdSKay Sievers .name = "cpufreq", 16518a25a2fdSKay Sievers .subsys = &cpu_subsys, 16528a25a2fdSKay Sievers .add_dev = cpufreq_add_dev, 16538a25a2fdSKay Sievers .remove_dev = cpufreq_remove_dev, 1654e00e56dfSRafael J. Wysocki }; 1655e00e56dfSRafael J. Wysocki 1656e28867eaSViresh Kumar /* 1657e28867eaSViresh Kumar * In case platform wants some specific frequency to be configured 1658e28867eaSViresh Kumar * during suspend.. 165942d4dc3fSBenjamin Herrenschmidt */ 1660e28867eaSViresh Kumar int cpufreq_generic_suspend(struct cpufreq_policy *policy) 166142d4dc3fSBenjamin Herrenschmidt { 1662e28867eaSViresh Kumar int ret; 16634bc5d341SDave Jones 1664e28867eaSViresh Kumar if (!policy->suspend_freq) { 1665e28867eaSViresh Kumar pr_err("%s: suspend_freq can't be zero\n", __func__); 1666e28867eaSViresh Kumar return -EINVAL; 166742d4dc3fSBenjamin Herrenschmidt } 166842d4dc3fSBenjamin Herrenschmidt 1669e28867eaSViresh Kumar pr_debug("%s: Setting suspend-freq: %u\n", __func__, 1670e28867eaSViresh Kumar policy->suspend_freq); 1671e28867eaSViresh Kumar 1672e28867eaSViresh Kumar ret = __cpufreq_driver_target(policy, policy->suspend_freq, 1673e28867eaSViresh Kumar CPUFREQ_RELATION_H); 1674e28867eaSViresh Kumar if (ret) 1675e28867eaSViresh Kumar pr_err("%s: unable to set suspend-freq: %u. err: %d\n", 1676e28867eaSViresh Kumar __func__, policy->suspend_freq, ret); 1677e28867eaSViresh Kumar 1678c9060494SDave Jones return ret; 167942d4dc3fSBenjamin Herrenschmidt } 1680e28867eaSViresh Kumar EXPORT_SYMBOL(cpufreq_generic_suspend); 168142d4dc3fSBenjamin Herrenschmidt 168242d4dc3fSBenjamin Herrenschmidt /** 16832f0aea93SViresh Kumar * cpufreq_suspend() - Suspend CPUFreq governors 16841da177e4SLinus Torvalds * 16852f0aea93SViresh Kumar * Called during system wide Suspend/Hibernate cycles for suspending governors 16862f0aea93SViresh Kumar * as some platforms can't change frequency after this point in suspend cycle. 16872f0aea93SViresh Kumar * Because some of the devices (like: i2c, regulators, etc) they use for 16882f0aea93SViresh Kumar * changing frequency are suspended quickly after this point. 16891da177e4SLinus Torvalds */ 16902f0aea93SViresh Kumar void cpufreq_suspend(void) 16911da177e4SLinus Torvalds { 16923a3e9e06SViresh Kumar struct cpufreq_policy *policy; 16931da177e4SLinus Torvalds 16942f0aea93SViresh Kumar if (!cpufreq_driver) 1695e00e56dfSRafael J. Wysocki return; 16961da177e4SLinus Torvalds 16972f0aea93SViresh Kumar if (!has_target()) 1698b1b12babSViresh Kumar goto suspend; 16991da177e4SLinus Torvalds 17002f0aea93SViresh Kumar pr_debug("%s: Suspending Governors\n", __func__); 17012f0aea93SViresh Kumar 1702f963735aSViresh Kumar for_each_active_policy(policy) { 17032f0aea93SViresh Kumar if (__cpufreq_governor(policy, CPUFREQ_GOV_STOP)) 17042f0aea93SViresh Kumar pr_err("%s: Failed to stop governor for policy: %p\n", 17052f0aea93SViresh Kumar __func__, policy); 17062f0aea93SViresh Kumar else if (cpufreq_driver->suspend 17072f0aea93SViresh Kumar && cpufreq_driver->suspend(policy)) 17082f0aea93SViresh Kumar pr_err("%s: Failed to suspend driver: %p\n", __func__, 17092f0aea93SViresh Kumar policy); 17101da177e4SLinus Torvalds } 1711b1b12babSViresh Kumar 1712b1b12babSViresh Kumar suspend: 1713b1b12babSViresh Kumar cpufreq_suspended = true; 17141da177e4SLinus Torvalds } 17151da177e4SLinus Torvalds 17161da177e4SLinus Torvalds /** 17172f0aea93SViresh Kumar * cpufreq_resume() - Resume CPUFreq governors 17181da177e4SLinus Torvalds * 17192f0aea93SViresh Kumar * Called during system wide Suspend/Hibernate cycle for resuming governors that 17202f0aea93SViresh Kumar * are suspended with cpufreq_suspend(). 17211da177e4SLinus Torvalds */ 17222f0aea93SViresh Kumar void cpufreq_resume(void) 17231da177e4SLinus Torvalds { 17241da177e4SLinus Torvalds struct cpufreq_policy *policy; 17251da177e4SLinus Torvalds 17262f0aea93SViresh Kumar if (!cpufreq_driver) 17271da177e4SLinus Torvalds return; 17281da177e4SLinus Torvalds 17298e30444eSLan Tianyu cpufreq_suspended = false; 17308e30444eSLan Tianyu 17312f0aea93SViresh Kumar if (!has_target()) 17322f0aea93SViresh Kumar return; 17331da177e4SLinus Torvalds 17342f0aea93SViresh Kumar pr_debug("%s: Resuming Governors\n", __func__); 17352f0aea93SViresh Kumar 1736f963735aSViresh Kumar for_each_active_policy(policy) { 17370c5aa405SViresh Kumar if (cpufreq_driver->resume && cpufreq_driver->resume(policy)) 17380c5aa405SViresh Kumar pr_err("%s: Failed to resume driver: %p\n", __func__, 17390c5aa405SViresh Kumar policy); 17400c5aa405SViresh Kumar else if (__cpufreq_governor(policy, CPUFREQ_GOV_START) 17412f0aea93SViresh Kumar || __cpufreq_governor(policy, CPUFREQ_GOV_LIMITS)) 17422f0aea93SViresh Kumar pr_err("%s: Failed to start governor for policy: %p\n", 17432f0aea93SViresh Kumar __func__, policy); 1744c75de0acSViresh Kumar } 17452f0aea93SViresh Kumar 17462f0aea93SViresh Kumar /* 1747c75de0acSViresh Kumar * schedule call cpufreq_update_policy() for first-online CPU, as that 1748c75de0acSViresh Kumar * wouldn't be hotplugged-out on suspend. It will verify that the 1749c75de0acSViresh Kumar * current freq is in sync with what we believe it to be. 17502f0aea93SViresh Kumar */ 1751c75de0acSViresh Kumar policy = cpufreq_cpu_get_raw(cpumask_first(cpu_online_mask)); 1752c75de0acSViresh Kumar if (WARN_ON(!policy)) 1753c75de0acSViresh Kumar return; 1754c75de0acSViresh Kumar 17553a3e9e06SViresh Kumar schedule_work(&policy->update); 17561da177e4SLinus Torvalds } 17571da177e4SLinus Torvalds 17589d95046eSBorislav Petkov /** 17599d95046eSBorislav Petkov * cpufreq_get_current_driver - return current driver's name 17609d95046eSBorislav Petkov * 17619d95046eSBorislav Petkov * Return the name string of the currently loaded cpufreq driver 17629d95046eSBorislav Petkov * or NULL, if none. 17639d95046eSBorislav Petkov */ 17649d95046eSBorislav Petkov const char *cpufreq_get_current_driver(void) 17659d95046eSBorislav Petkov { 17661c3d85ddSRafael J. Wysocki if (cpufreq_driver) 17671c3d85ddSRafael J. Wysocki return cpufreq_driver->name; 17681c3d85ddSRafael J. Wysocki 17691c3d85ddSRafael J. Wysocki return NULL; 17709d95046eSBorislav Petkov } 17719d95046eSBorislav Petkov EXPORT_SYMBOL_GPL(cpufreq_get_current_driver); 17721da177e4SLinus Torvalds 177351315cdfSThomas Petazzoni /** 177451315cdfSThomas Petazzoni * cpufreq_get_driver_data - return current driver data 177551315cdfSThomas Petazzoni * 177651315cdfSThomas Petazzoni * Return the private data of the currently loaded cpufreq 177751315cdfSThomas Petazzoni * driver, or NULL if no cpufreq driver is loaded. 177851315cdfSThomas Petazzoni */ 177951315cdfSThomas Petazzoni void *cpufreq_get_driver_data(void) 178051315cdfSThomas Petazzoni { 178151315cdfSThomas Petazzoni if (cpufreq_driver) 178251315cdfSThomas Petazzoni return cpufreq_driver->driver_data; 178351315cdfSThomas Petazzoni 178451315cdfSThomas Petazzoni return NULL; 178551315cdfSThomas Petazzoni } 178651315cdfSThomas Petazzoni EXPORT_SYMBOL_GPL(cpufreq_get_driver_data); 178751315cdfSThomas Petazzoni 17881da177e4SLinus Torvalds /********************************************************************* 17891da177e4SLinus Torvalds * NOTIFIER LISTS INTERFACE * 17901da177e4SLinus Torvalds *********************************************************************/ 17911da177e4SLinus Torvalds 17921da177e4SLinus Torvalds /** 17931da177e4SLinus Torvalds * cpufreq_register_notifier - register a driver with cpufreq 17941da177e4SLinus Torvalds * @nb: notifier function to register 17951da177e4SLinus Torvalds * @list: CPUFREQ_TRANSITION_NOTIFIER or CPUFREQ_POLICY_NOTIFIER 17961da177e4SLinus Torvalds * 17971da177e4SLinus Torvalds * Add a driver to one of two lists: either a list of drivers that 17981da177e4SLinus Torvalds * are notified about clock rate changes (once before and once after 17991da177e4SLinus Torvalds * the transition), or a list of drivers that are notified about 18001da177e4SLinus Torvalds * changes in cpufreq policy. 18011da177e4SLinus Torvalds * 18021da177e4SLinus Torvalds * This function may sleep, and has the same return conditions as 1803e041c683SAlan Stern * blocking_notifier_chain_register. 18041da177e4SLinus Torvalds */ 18051da177e4SLinus Torvalds int cpufreq_register_notifier(struct notifier_block *nb, unsigned int list) 18061da177e4SLinus Torvalds { 18071da177e4SLinus Torvalds int ret; 18081da177e4SLinus Torvalds 1809d5aaffa9SDirk Brandewie if (cpufreq_disabled()) 1810d5aaffa9SDirk Brandewie return -EINVAL; 1811d5aaffa9SDirk Brandewie 181274212ca4SCesar Eduardo Barros WARN_ON(!init_cpufreq_transition_notifier_list_called); 181374212ca4SCesar Eduardo Barros 18141da177e4SLinus Torvalds switch (list) { 18151da177e4SLinus Torvalds case CPUFREQ_TRANSITION_NOTIFIER: 1816b4dfdbb3SAlan Stern ret = srcu_notifier_chain_register( 1817e041c683SAlan Stern &cpufreq_transition_notifier_list, nb); 18181da177e4SLinus Torvalds break; 18191da177e4SLinus Torvalds case CPUFREQ_POLICY_NOTIFIER: 1820e041c683SAlan Stern ret = blocking_notifier_chain_register( 1821e041c683SAlan Stern &cpufreq_policy_notifier_list, nb); 18221da177e4SLinus Torvalds break; 18231da177e4SLinus Torvalds default: 18241da177e4SLinus Torvalds ret = -EINVAL; 18251da177e4SLinus Torvalds } 18261da177e4SLinus Torvalds 18271da177e4SLinus Torvalds return ret; 18281da177e4SLinus Torvalds } 18291da177e4SLinus Torvalds EXPORT_SYMBOL(cpufreq_register_notifier); 18301da177e4SLinus Torvalds 18311da177e4SLinus Torvalds /** 18321da177e4SLinus Torvalds * cpufreq_unregister_notifier - unregister a driver with cpufreq 18331da177e4SLinus Torvalds * @nb: notifier block to be unregistered 18341da177e4SLinus Torvalds * @list: CPUFREQ_TRANSITION_NOTIFIER or CPUFREQ_POLICY_NOTIFIER 18351da177e4SLinus Torvalds * 18361da177e4SLinus Torvalds * Remove a driver from the CPU frequency notifier list. 18371da177e4SLinus Torvalds * 18381da177e4SLinus Torvalds * This function may sleep, and has the same return conditions as 1839e041c683SAlan Stern * blocking_notifier_chain_unregister. 18401da177e4SLinus Torvalds */ 18411da177e4SLinus Torvalds int cpufreq_unregister_notifier(struct notifier_block *nb, unsigned int list) 18421da177e4SLinus Torvalds { 18431da177e4SLinus Torvalds int ret; 18441da177e4SLinus Torvalds 1845d5aaffa9SDirk Brandewie if (cpufreq_disabled()) 1846d5aaffa9SDirk Brandewie return -EINVAL; 1847d5aaffa9SDirk Brandewie 18481da177e4SLinus Torvalds switch (list) { 18491da177e4SLinus Torvalds case CPUFREQ_TRANSITION_NOTIFIER: 1850b4dfdbb3SAlan Stern ret = srcu_notifier_chain_unregister( 1851e041c683SAlan Stern &cpufreq_transition_notifier_list, nb); 18521da177e4SLinus Torvalds break; 18531da177e4SLinus Torvalds case CPUFREQ_POLICY_NOTIFIER: 1854e041c683SAlan Stern ret = blocking_notifier_chain_unregister( 1855e041c683SAlan Stern &cpufreq_policy_notifier_list, nb); 18561da177e4SLinus Torvalds break; 18571da177e4SLinus Torvalds default: 18581da177e4SLinus Torvalds ret = -EINVAL; 18591da177e4SLinus Torvalds } 18601da177e4SLinus Torvalds 18611da177e4SLinus Torvalds return ret; 18621da177e4SLinus Torvalds } 18631da177e4SLinus Torvalds EXPORT_SYMBOL(cpufreq_unregister_notifier); 18641da177e4SLinus Torvalds 18651da177e4SLinus Torvalds 18661da177e4SLinus Torvalds /********************************************************************* 18671da177e4SLinus Torvalds * GOVERNORS * 18681da177e4SLinus Torvalds *********************************************************************/ 18691da177e4SLinus Torvalds 18701c03a2d0SViresh Kumar /* Must set freqs->new to intermediate frequency */ 18711c03a2d0SViresh Kumar static int __target_intermediate(struct cpufreq_policy *policy, 18721c03a2d0SViresh Kumar struct cpufreq_freqs *freqs, int index) 18731c03a2d0SViresh Kumar { 18741c03a2d0SViresh Kumar int ret; 18751c03a2d0SViresh Kumar 18761c03a2d0SViresh Kumar freqs->new = cpufreq_driver->get_intermediate(policy, index); 18771c03a2d0SViresh Kumar 18781c03a2d0SViresh Kumar /* We don't need to switch to intermediate freq */ 18791c03a2d0SViresh Kumar if (!freqs->new) 18801c03a2d0SViresh Kumar return 0; 18811c03a2d0SViresh Kumar 18821c03a2d0SViresh Kumar pr_debug("%s: cpu: %d, switching to intermediate freq: oldfreq: %u, intermediate freq: %u\n", 18831c03a2d0SViresh Kumar __func__, policy->cpu, freqs->old, freqs->new); 18841c03a2d0SViresh Kumar 18851c03a2d0SViresh Kumar cpufreq_freq_transition_begin(policy, freqs); 18861c03a2d0SViresh Kumar ret = cpufreq_driver->target_intermediate(policy, index); 18871c03a2d0SViresh Kumar cpufreq_freq_transition_end(policy, freqs, ret); 18881c03a2d0SViresh Kumar 18891c03a2d0SViresh Kumar if (ret) 18901c03a2d0SViresh Kumar pr_err("%s: Failed to change to intermediate frequency: %d\n", 18911c03a2d0SViresh Kumar __func__, ret); 18921c03a2d0SViresh Kumar 18931c03a2d0SViresh Kumar return ret; 18941c03a2d0SViresh Kumar } 18951c03a2d0SViresh Kumar 18968d65775dSViresh Kumar static int __target_index(struct cpufreq_policy *policy, 18978d65775dSViresh Kumar struct cpufreq_frequency_table *freq_table, int index) 18988d65775dSViresh Kumar { 18991c03a2d0SViresh Kumar struct cpufreq_freqs freqs = {.old = policy->cur, .flags = 0}; 19001c03a2d0SViresh Kumar unsigned int intermediate_freq = 0; 19018d65775dSViresh Kumar int retval = -EINVAL; 19028d65775dSViresh Kumar bool notify; 19038d65775dSViresh Kumar 19048d65775dSViresh Kumar notify = !(cpufreq_driver->flags & CPUFREQ_ASYNC_NOTIFICATION); 19058d65775dSViresh Kumar if (notify) { 19061c03a2d0SViresh Kumar /* Handle switching to intermediate frequency */ 19071c03a2d0SViresh Kumar if (cpufreq_driver->get_intermediate) { 19081c03a2d0SViresh Kumar retval = __target_intermediate(policy, &freqs, index); 19091c03a2d0SViresh Kumar if (retval) 19101c03a2d0SViresh Kumar return retval; 19118d65775dSViresh Kumar 19121c03a2d0SViresh Kumar intermediate_freq = freqs.new; 19131c03a2d0SViresh Kumar /* Set old freq to intermediate */ 19141c03a2d0SViresh Kumar if (intermediate_freq) 19151c03a2d0SViresh Kumar freqs.old = freqs.new; 19161c03a2d0SViresh Kumar } 19171c03a2d0SViresh Kumar 19181c03a2d0SViresh Kumar freqs.new = freq_table[index].frequency; 19198d65775dSViresh Kumar pr_debug("%s: cpu: %d, oldfreq: %u, new freq: %u\n", 19208d65775dSViresh Kumar __func__, policy->cpu, freqs.old, freqs.new); 19218d65775dSViresh Kumar 19228d65775dSViresh Kumar cpufreq_freq_transition_begin(policy, &freqs); 19238d65775dSViresh Kumar } 19248d65775dSViresh Kumar 19258d65775dSViresh Kumar retval = cpufreq_driver->target_index(policy, index); 19268d65775dSViresh Kumar if (retval) 19278d65775dSViresh Kumar pr_err("%s: Failed to change cpu frequency: %d\n", __func__, 19288d65775dSViresh Kumar retval); 19298d65775dSViresh Kumar 19301c03a2d0SViresh Kumar if (notify) { 19318d65775dSViresh Kumar cpufreq_freq_transition_end(policy, &freqs, retval); 19328d65775dSViresh Kumar 19331c03a2d0SViresh Kumar /* 19341c03a2d0SViresh Kumar * Failed after setting to intermediate freq? Driver should have 19351c03a2d0SViresh Kumar * reverted back to initial frequency and so should we. Check 19361c03a2d0SViresh Kumar * here for intermediate_freq instead of get_intermediate, in 19371c03a2d0SViresh Kumar * case we have't switched to intermediate freq at all. 19381c03a2d0SViresh Kumar */ 19391c03a2d0SViresh Kumar if (unlikely(retval && intermediate_freq)) { 19401c03a2d0SViresh Kumar freqs.old = intermediate_freq; 19411c03a2d0SViresh Kumar freqs.new = policy->restore_freq; 19421c03a2d0SViresh Kumar cpufreq_freq_transition_begin(policy, &freqs); 19431c03a2d0SViresh Kumar cpufreq_freq_transition_end(policy, &freqs, 0); 19441c03a2d0SViresh Kumar } 19451c03a2d0SViresh Kumar } 19461c03a2d0SViresh Kumar 19478d65775dSViresh Kumar return retval; 19488d65775dSViresh Kumar } 19498d65775dSViresh Kumar 19501da177e4SLinus Torvalds int __cpufreq_driver_target(struct cpufreq_policy *policy, 19511da177e4SLinus Torvalds unsigned int target_freq, 19521da177e4SLinus Torvalds unsigned int relation) 19531da177e4SLinus Torvalds { 19547249924eSViresh Kumar unsigned int old_target_freq = target_freq; 19558d65775dSViresh Kumar int retval = -EINVAL; 1956c32b6b8eSAshok Raj 1957a7b422cdSKonrad Rzeszutek Wilk if (cpufreq_disabled()) 1958a7b422cdSKonrad Rzeszutek Wilk return -ENODEV; 1959a7b422cdSKonrad Rzeszutek Wilk 19607249924eSViresh Kumar /* Make sure that target_freq is within supported range */ 19617249924eSViresh Kumar if (target_freq > policy->max) 19627249924eSViresh Kumar target_freq = policy->max; 19637249924eSViresh Kumar if (target_freq < policy->min) 19647249924eSViresh Kumar target_freq = policy->min; 19657249924eSViresh Kumar 19667249924eSViresh Kumar pr_debug("target for CPU %u: %u kHz, relation %u, requested %u kHz\n", 19677249924eSViresh Kumar policy->cpu, target_freq, relation, old_target_freq); 19685a1c0228SViresh Kumar 19699c0ebcf7SViresh Kumar /* 19709c0ebcf7SViresh Kumar * This might look like a redundant call as we are checking it again 19719c0ebcf7SViresh Kumar * after finding index. But it is left intentionally for cases where 19729c0ebcf7SViresh Kumar * exactly same freq is called again and so we can save on few function 19739c0ebcf7SViresh Kumar * calls. 19749c0ebcf7SViresh Kumar */ 19755a1c0228SViresh Kumar if (target_freq == policy->cur) 19765a1c0228SViresh Kumar return 0; 19775a1c0228SViresh Kumar 19781c03a2d0SViresh Kumar /* Save last value to restore later on errors */ 19791c03a2d0SViresh Kumar policy->restore_freq = policy->cur; 19801c03a2d0SViresh Kumar 19811c3d85ddSRafael J. Wysocki if (cpufreq_driver->target) 19821c3d85ddSRafael J. Wysocki retval = cpufreq_driver->target(policy, target_freq, relation); 19839c0ebcf7SViresh Kumar else if (cpufreq_driver->target_index) { 19849c0ebcf7SViresh Kumar struct cpufreq_frequency_table *freq_table; 19859c0ebcf7SViresh Kumar int index; 198690d45d17SAshok Raj 19879c0ebcf7SViresh Kumar freq_table = cpufreq_frequency_get_table(policy->cpu); 19889c0ebcf7SViresh Kumar if (unlikely(!freq_table)) { 19899c0ebcf7SViresh Kumar pr_err("%s: Unable to find freq_table\n", __func__); 19909c0ebcf7SViresh Kumar goto out; 19919c0ebcf7SViresh Kumar } 19929c0ebcf7SViresh Kumar 19939c0ebcf7SViresh Kumar retval = cpufreq_frequency_table_target(policy, freq_table, 19949c0ebcf7SViresh Kumar target_freq, relation, &index); 19959c0ebcf7SViresh Kumar if (unlikely(retval)) { 19969c0ebcf7SViresh Kumar pr_err("%s: Unable to find matching freq\n", __func__); 19979c0ebcf7SViresh Kumar goto out; 19989c0ebcf7SViresh Kumar } 19999c0ebcf7SViresh Kumar 2000d4019f0aSViresh Kumar if (freq_table[index].frequency == policy->cur) { 20019c0ebcf7SViresh Kumar retval = 0; 2002d4019f0aSViresh Kumar goto out; 2003d4019f0aSViresh Kumar } 2004d4019f0aSViresh Kumar 20058d65775dSViresh Kumar retval = __target_index(policy, freq_table, index); 20069c0ebcf7SViresh Kumar } 20079c0ebcf7SViresh Kumar 20089c0ebcf7SViresh Kumar out: 20091da177e4SLinus Torvalds return retval; 20101da177e4SLinus Torvalds } 20111da177e4SLinus Torvalds EXPORT_SYMBOL_GPL(__cpufreq_driver_target); 20121da177e4SLinus Torvalds 20131da177e4SLinus Torvalds int cpufreq_driver_target(struct cpufreq_policy *policy, 20141da177e4SLinus Torvalds unsigned int target_freq, 20151da177e4SLinus Torvalds unsigned int relation) 20161da177e4SLinus Torvalds { 2017f1829e4aSJulia Lawall int ret = -EINVAL; 20181da177e4SLinus Torvalds 2019ad7722daSviresh kumar down_write(&policy->rwsem); 20201da177e4SLinus Torvalds 20211da177e4SLinus Torvalds ret = __cpufreq_driver_target(policy, target_freq, relation); 20221da177e4SLinus Torvalds 2023ad7722daSviresh kumar up_write(&policy->rwsem); 20241da177e4SLinus Torvalds 20251da177e4SLinus Torvalds return ret; 20261da177e4SLinus Torvalds } 20271da177e4SLinus Torvalds EXPORT_SYMBOL_GPL(cpufreq_driver_target); 20281da177e4SLinus Torvalds 2029e08f5f5bSGautham R Shenoy static int __cpufreq_governor(struct cpufreq_policy *policy, 2030e08f5f5bSGautham R Shenoy unsigned int event) 20311da177e4SLinus Torvalds { 2032cc993cabSDave Jones int ret; 20336afde10cSThomas Renninger 20346afde10cSThomas Renninger /* Only must be defined when default governor is known to have latency 20356afde10cSThomas Renninger restrictions, like e.g. conservative or ondemand. 20366afde10cSThomas Renninger That this is the case is already ensured in Kconfig 20376afde10cSThomas Renninger */ 20386afde10cSThomas Renninger #ifdef CONFIG_CPU_FREQ_GOV_PERFORMANCE 20396afde10cSThomas Renninger struct cpufreq_governor *gov = &cpufreq_gov_performance; 20406afde10cSThomas Renninger #else 20416afde10cSThomas Renninger struct cpufreq_governor *gov = NULL; 20426afde10cSThomas Renninger #endif 20431c256245SThomas Renninger 20442f0aea93SViresh Kumar /* Don't start any governor operations if we are entering suspend */ 20452f0aea93SViresh Kumar if (cpufreq_suspended) 20462f0aea93SViresh Kumar return 0; 2047cb57720bSEthan Zhao /* 2048cb57720bSEthan Zhao * Governor might not be initiated here if ACPI _PPC changed 2049cb57720bSEthan Zhao * notification happened, so check it. 2050cb57720bSEthan Zhao */ 2051cb57720bSEthan Zhao if (!policy->governor) 2052cb57720bSEthan Zhao return -EINVAL; 20532f0aea93SViresh Kumar 20541c256245SThomas Renninger if (policy->governor->max_transition_latency && 20551c256245SThomas Renninger policy->cpuinfo.transition_latency > 20561c256245SThomas Renninger policy->governor->max_transition_latency) { 20576afde10cSThomas Renninger if (!gov) 20586afde10cSThomas Renninger return -EINVAL; 20596afde10cSThomas Renninger else { 2060e837f9b5SJoe Perches pr_warn("%s governor failed, too long transition latency of HW, fallback to %s governor\n", 2061e837f9b5SJoe Perches policy->governor->name, gov->name); 20621c256245SThomas Renninger policy->governor = gov; 20631c256245SThomas Renninger } 20646afde10cSThomas Renninger } 20651da177e4SLinus Torvalds 2066fe492f3fSViresh Kumar if (event == CPUFREQ_GOV_POLICY_INIT) 20671da177e4SLinus Torvalds if (!try_module_get(policy->governor->owner)) 20681da177e4SLinus Torvalds return -EINVAL; 20691da177e4SLinus Torvalds 20702d06d8c4SDominik Brodowski pr_debug("__cpufreq_governor for CPU %u, event %u\n", 2071e08f5f5bSGautham R Shenoy policy->cpu, event); 207295731ebbSXiaoguang Chen 207395731ebbSXiaoguang Chen mutex_lock(&cpufreq_governor_lock); 207456d07db2SSrivatsa S. Bhat if ((policy->governor_enabled && event == CPUFREQ_GOV_START) 2075f73d3933SViresh Kumar || (!policy->governor_enabled 2076f73d3933SViresh Kumar && (event == CPUFREQ_GOV_LIMITS || event == CPUFREQ_GOV_STOP))) { 207795731ebbSXiaoguang Chen mutex_unlock(&cpufreq_governor_lock); 207895731ebbSXiaoguang Chen return -EBUSY; 207995731ebbSXiaoguang Chen } 208095731ebbSXiaoguang Chen 208195731ebbSXiaoguang Chen if (event == CPUFREQ_GOV_STOP) 208295731ebbSXiaoguang Chen policy->governor_enabled = false; 208395731ebbSXiaoguang Chen else if (event == CPUFREQ_GOV_START) 208495731ebbSXiaoguang Chen policy->governor_enabled = true; 208595731ebbSXiaoguang Chen 208695731ebbSXiaoguang Chen mutex_unlock(&cpufreq_governor_lock); 208795731ebbSXiaoguang Chen 20881da177e4SLinus Torvalds ret = policy->governor->governor(policy, event); 20891da177e4SLinus Torvalds 20904d5dcc42SViresh Kumar if (!ret) { 20914d5dcc42SViresh Kumar if (event == CPUFREQ_GOV_POLICY_INIT) 20928e53695fSViresh Kumar policy->governor->initialized++; 20934d5dcc42SViresh Kumar else if (event == CPUFREQ_GOV_POLICY_EXIT) 20948e53695fSViresh Kumar policy->governor->initialized--; 209595731ebbSXiaoguang Chen } else { 209695731ebbSXiaoguang Chen /* Restore original values */ 209795731ebbSXiaoguang Chen mutex_lock(&cpufreq_governor_lock); 209895731ebbSXiaoguang Chen if (event == CPUFREQ_GOV_STOP) 209995731ebbSXiaoguang Chen policy->governor_enabled = true; 210095731ebbSXiaoguang Chen else if (event == CPUFREQ_GOV_START) 210195731ebbSXiaoguang Chen policy->governor_enabled = false; 210295731ebbSXiaoguang Chen mutex_unlock(&cpufreq_governor_lock); 21034d5dcc42SViresh Kumar } 2104b394058fSViresh Kumar 2105fe492f3fSViresh Kumar if (((event == CPUFREQ_GOV_POLICY_INIT) && ret) || 2106fe492f3fSViresh Kumar ((event == CPUFREQ_GOV_POLICY_EXIT) && !ret)) 21071da177e4SLinus Torvalds module_put(policy->governor->owner); 21081da177e4SLinus Torvalds 21091da177e4SLinus Torvalds return ret; 21101da177e4SLinus Torvalds } 21111da177e4SLinus Torvalds 21121da177e4SLinus Torvalds int cpufreq_register_governor(struct cpufreq_governor *governor) 21131da177e4SLinus Torvalds { 21143bcb09a3SJeremy Fitzhardinge int err; 21151da177e4SLinus Torvalds 21161da177e4SLinus Torvalds if (!governor) 21171da177e4SLinus Torvalds return -EINVAL; 21181da177e4SLinus Torvalds 2119a7b422cdSKonrad Rzeszutek Wilk if (cpufreq_disabled()) 2120a7b422cdSKonrad Rzeszutek Wilk return -ENODEV; 2121a7b422cdSKonrad Rzeszutek Wilk 21223fc54d37Sakpm@osdl.org mutex_lock(&cpufreq_governor_mutex); 21231da177e4SLinus Torvalds 2124b394058fSViresh Kumar governor->initialized = 0; 21253bcb09a3SJeremy Fitzhardinge err = -EBUSY; 212642f91fa1SViresh Kumar if (!find_governor(governor->name)) { 21273bcb09a3SJeremy Fitzhardinge err = 0; 21281da177e4SLinus Torvalds list_add(&governor->governor_list, &cpufreq_governor_list); 21293bcb09a3SJeremy Fitzhardinge } 21301da177e4SLinus Torvalds 21313fc54d37Sakpm@osdl.org mutex_unlock(&cpufreq_governor_mutex); 21323bcb09a3SJeremy Fitzhardinge return err; 21331da177e4SLinus Torvalds } 21341da177e4SLinus Torvalds EXPORT_SYMBOL_GPL(cpufreq_register_governor); 21351da177e4SLinus Torvalds 21361da177e4SLinus Torvalds void cpufreq_unregister_governor(struct cpufreq_governor *governor) 21371da177e4SLinus Torvalds { 213890e41bacSPrarit Bhargava int cpu; 213990e41bacSPrarit Bhargava 21401da177e4SLinus Torvalds if (!governor) 21411da177e4SLinus Torvalds return; 21421da177e4SLinus Torvalds 2143a7b422cdSKonrad Rzeszutek Wilk if (cpufreq_disabled()) 2144a7b422cdSKonrad Rzeszutek Wilk return; 2145a7b422cdSKonrad Rzeszutek Wilk 214690e41bacSPrarit Bhargava for_each_present_cpu(cpu) { 214790e41bacSPrarit Bhargava if (cpu_online(cpu)) 214890e41bacSPrarit Bhargava continue; 214990e41bacSPrarit Bhargava if (!strcmp(per_cpu(cpufreq_cpu_governor, cpu), governor->name)) 215090e41bacSPrarit Bhargava strcpy(per_cpu(cpufreq_cpu_governor, cpu), "\0"); 215190e41bacSPrarit Bhargava } 215290e41bacSPrarit Bhargava 21533fc54d37Sakpm@osdl.org mutex_lock(&cpufreq_governor_mutex); 21541da177e4SLinus Torvalds list_del(&governor->governor_list); 21553fc54d37Sakpm@osdl.org mutex_unlock(&cpufreq_governor_mutex); 21561da177e4SLinus Torvalds return; 21571da177e4SLinus Torvalds } 21581da177e4SLinus Torvalds EXPORT_SYMBOL_GPL(cpufreq_unregister_governor); 21591da177e4SLinus Torvalds 21601da177e4SLinus Torvalds 21611da177e4SLinus Torvalds /********************************************************************* 21621da177e4SLinus Torvalds * POLICY INTERFACE * 21631da177e4SLinus Torvalds *********************************************************************/ 21641da177e4SLinus Torvalds 21651da177e4SLinus Torvalds /** 21661da177e4SLinus Torvalds * cpufreq_get_policy - get the current cpufreq_policy 216729464f28SDave Jones * @policy: struct cpufreq_policy into which the current cpufreq_policy 216829464f28SDave Jones * is written 21691da177e4SLinus Torvalds * 21701da177e4SLinus Torvalds * Reads the current cpufreq policy. 21711da177e4SLinus Torvalds */ 21721da177e4SLinus Torvalds int cpufreq_get_policy(struct cpufreq_policy *policy, unsigned int cpu) 21731da177e4SLinus Torvalds { 21741da177e4SLinus Torvalds struct cpufreq_policy *cpu_policy; 21751da177e4SLinus Torvalds if (!policy) 21761da177e4SLinus Torvalds return -EINVAL; 21771da177e4SLinus Torvalds 21781da177e4SLinus Torvalds cpu_policy = cpufreq_cpu_get(cpu); 21791da177e4SLinus Torvalds if (!cpu_policy) 21801da177e4SLinus Torvalds return -EINVAL; 21811da177e4SLinus Torvalds 2182d5b73cd8SViresh Kumar memcpy(policy, cpu_policy, sizeof(*policy)); 21831da177e4SLinus Torvalds 21841da177e4SLinus Torvalds cpufreq_cpu_put(cpu_policy); 21851da177e4SLinus Torvalds return 0; 21861da177e4SLinus Torvalds } 21871da177e4SLinus Torvalds EXPORT_SYMBOL(cpufreq_get_policy); 21881da177e4SLinus Torvalds 2189153d7f3fSArjan van de Ven /* 2190037ce839SViresh Kumar * policy : current policy. 2191037ce839SViresh Kumar * new_policy: policy to be set. 2192153d7f3fSArjan van de Ven */ 2193037ce839SViresh Kumar static int cpufreq_set_policy(struct cpufreq_policy *policy, 21943a3e9e06SViresh Kumar struct cpufreq_policy *new_policy) 21951da177e4SLinus Torvalds { 2196d9a789c7SRafael J. Wysocki struct cpufreq_governor *old_gov; 2197d9a789c7SRafael J. Wysocki int ret; 21981da177e4SLinus Torvalds 2199e837f9b5SJoe Perches pr_debug("setting new policy for CPU %u: %u - %u kHz\n", 2200e837f9b5SJoe Perches new_policy->cpu, new_policy->min, new_policy->max); 22011da177e4SLinus Torvalds 2202d5b73cd8SViresh Kumar memcpy(&new_policy->cpuinfo, &policy->cpuinfo, sizeof(policy->cpuinfo)); 22031da177e4SLinus Torvalds 2204d9a789c7SRafael J. Wysocki if (new_policy->min > policy->max || new_policy->max < policy->min) 2205d9a789c7SRafael J. Wysocki return -EINVAL; 22069c9a43edSMattia Dongili 22071da177e4SLinus Torvalds /* verify the cpu speed can be set within this limit */ 22083a3e9e06SViresh Kumar ret = cpufreq_driver->verify(new_policy); 22091da177e4SLinus Torvalds if (ret) 2210d9a789c7SRafael J. Wysocki return ret; 22111da177e4SLinus Torvalds 22121da177e4SLinus Torvalds /* adjust if necessary - all reasons */ 2213e041c683SAlan Stern blocking_notifier_call_chain(&cpufreq_policy_notifier_list, 22143a3e9e06SViresh Kumar CPUFREQ_ADJUST, new_policy); 22151da177e4SLinus Torvalds 22161da177e4SLinus Torvalds /* adjust if necessary - hardware incompatibility*/ 2217e041c683SAlan Stern blocking_notifier_call_chain(&cpufreq_policy_notifier_list, 22183a3e9e06SViresh Kumar CPUFREQ_INCOMPATIBLE, new_policy); 22191da177e4SLinus Torvalds 2220bb176f7dSViresh Kumar /* 2221bb176f7dSViresh Kumar * verify the cpu speed can be set within this limit, which might be 2222bb176f7dSViresh Kumar * different to the first one 2223bb176f7dSViresh Kumar */ 22243a3e9e06SViresh Kumar ret = cpufreq_driver->verify(new_policy); 2225e041c683SAlan Stern if (ret) 2226d9a789c7SRafael J. Wysocki return ret; 22271da177e4SLinus Torvalds 22281da177e4SLinus Torvalds /* notification of the new policy */ 2229e041c683SAlan Stern blocking_notifier_call_chain(&cpufreq_policy_notifier_list, 22303a3e9e06SViresh Kumar CPUFREQ_NOTIFY, new_policy); 22311da177e4SLinus Torvalds 22323a3e9e06SViresh Kumar policy->min = new_policy->min; 22333a3e9e06SViresh Kumar policy->max = new_policy->max; 22341da177e4SLinus Torvalds 22352d06d8c4SDominik Brodowski pr_debug("new min and max freqs are %u - %u kHz\n", 22363a3e9e06SViresh Kumar policy->min, policy->max); 22371da177e4SLinus Torvalds 22381c3d85ddSRafael J. Wysocki if (cpufreq_driver->setpolicy) { 22393a3e9e06SViresh Kumar policy->policy = new_policy->policy; 22402d06d8c4SDominik Brodowski pr_debug("setting range\n"); 2241d9a789c7SRafael J. Wysocki return cpufreq_driver->setpolicy(new_policy); 2242d9a789c7SRafael J. Wysocki } 2243d9a789c7SRafael J. Wysocki 2244d9a789c7SRafael J. Wysocki if (new_policy->governor == policy->governor) 2245d9a789c7SRafael J. Wysocki goto out; 22461da177e4SLinus Torvalds 22472d06d8c4SDominik Brodowski pr_debug("governor switch\n"); 22481da177e4SLinus Torvalds 2249d9a789c7SRafael J. Wysocki /* save old, working values */ 2250d9a789c7SRafael J. Wysocki old_gov = policy->governor; 22511da177e4SLinus Torvalds /* end old governor */ 2252d9a789c7SRafael J. Wysocki if (old_gov) { 22533a3e9e06SViresh Kumar __cpufreq_governor(policy, CPUFREQ_GOV_STOP); 2254ad7722daSviresh kumar up_write(&policy->rwsem); 2255d9a789c7SRafael J. Wysocki __cpufreq_governor(policy, CPUFREQ_GOV_POLICY_EXIT); 2256ad7722daSviresh kumar down_write(&policy->rwsem); 22577bd353a9SViresh Kumar } 22581da177e4SLinus Torvalds 22591da177e4SLinus Torvalds /* start new governor */ 22603a3e9e06SViresh Kumar policy->governor = new_policy->governor; 22613a3e9e06SViresh Kumar if (!__cpufreq_governor(policy, CPUFREQ_GOV_POLICY_INIT)) { 2262d9a789c7SRafael J. Wysocki if (!__cpufreq_governor(policy, CPUFREQ_GOV_START)) 2263d9a789c7SRafael J. Wysocki goto out; 2264d9a789c7SRafael J. Wysocki 2265ad7722daSviresh kumar up_write(&policy->rwsem); 2266d9a789c7SRafael J. Wysocki __cpufreq_governor(policy, CPUFREQ_GOV_POLICY_EXIT); 2267ad7722daSviresh kumar down_write(&policy->rwsem); 2268955ef483SViresh Kumar } 22697bd353a9SViresh Kumar 22701da177e4SLinus Torvalds /* new governor failed, so re-start old one */ 2271d9a789c7SRafael J. Wysocki pr_debug("starting governor %s failed\n", policy->governor->name); 22721da177e4SLinus Torvalds if (old_gov) { 22733a3e9e06SViresh Kumar policy->governor = old_gov; 2274d9a789c7SRafael J. Wysocki __cpufreq_governor(policy, CPUFREQ_GOV_POLICY_INIT); 2275d9a789c7SRafael J. Wysocki __cpufreq_governor(policy, CPUFREQ_GOV_START); 22761da177e4SLinus Torvalds } 22771da177e4SLinus Torvalds 2278d9a789c7SRafael J. Wysocki return -EINVAL; 2279d9a789c7SRafael J. Wysocki 2280d9a789c7SRafael J. Wysocki out: 2281d9a789c7SRafael J. Wysocki pr_debug("governor: change or update limits\n"); 2282d9a789c7SRafael J. Wysocki return __cpufreq_governor(policy, CPUFREQ_GOV_LIMITS); 22831da177e4SLinus Torvalds } 22841da177e4SLinus Torvalds 22851da177e4SLinus Torvalds /** 22861da177e4SLinus Torvalds * cpufreq_update_policy - re-evaluate an existing cpufreq policy 22871da177e4SLinus Torvalds * @cpu: CPU which shall be re-evaluated 22881da177e4SLinus Torvalds * 228925985edcSLucas De Marchi * Useful for policy notifiers which have different necessities 22901da177e4SLinus Torvalds * at different times. 22911da177e4SLinus Torvalds */ 22921da177e4SLinus Torvalds int cpufreq_update_policy(unsigned int cpu) 22931da177e4SLinus Torvalds { 22943a3e9e06SViresh Kumar struct cpufreq_policy *policy = cpufreq_cpu_get(cpu); 22953a3e9e06SViresh Kumar struct cpufreq_policy new_policy; 2296f1829e4aSJulia Lawall int ret; 22971da177e4SLinus Torvalds 2298fefa8ff8SAaron Plattner if (!policy) 2299fefa8ff8SAaron Plattner return -ENODEV; 23001da177e4SLinus Torvalds 2301ad7722daSviresh kumar down_write(&policy->rwsem); 23021da177e4SLinus Torvalds 23032d06d8c4SDominik Brodowski pr_debug("updating policy for CPU %u\n", cpu); 2304d5b73cd8SViresh Kumar memcpy(&new_policy, policy, sizeof(*policy)); 23053a3e9e06SViresh Kumar new_policy.min = policy->user_policy.min; 23063a3e9e06SViresh Kumar new_policy.max = policy->user_policy.max; 23073a3e9e06SViresh Kumar new_policy.policy = policy->user_policy.policy; 23083a3e9e06SViresh Kumar new_policy.governor = policy->user_policy.governor; 23091da177e4SLinus Torvalds 2310bb176f7dSViresh Kumar /* 2311bb176f7dSViresh Kumar * BIOS might change freq behind our back 2312bb176f7dSViresh Kumar * -> ask driver for current freq and notify governors about a change 2313bb176f7dSViresh Kumar */ 23142ed99e39SRafael J. Wysocki if (cpufreq_driver->get && !cpufreq_driver->setpolicy) { 23153a3e9e06SViresh Kumar new_policy.cur = cpufreq_driver->get(cpu); 2316bd0fa9bbSViresh Kumar if (WARN_ON(!new_policy.cur)) { 2317bd0fa9bbSViresh Kumar ret = -EIO; 2318fefa8ff8SAaron Plattner goto unlock; 2319bd0fa9bbSViresh Kumar } 2320bd0fa9bbSViresh Kumar 23213a3e9e06SViresh Kumar if (!policy->cur) { 2322e837f9b5SJoe Perches pr_debug("Driver did not initialize current freq\n"); 23233a3e9e06SViresh Kumar policy->cur = new_policy.cur; 2324a85f7bd3SThomas Renninger } else { 23259c0ebcf7SViresh Kumar if (policy->cur != new_policy.cur && has_target()) 2326a1e1dc41SViresh Kumar cpufreq_out_of_sync(policy, new_policy.cur); 23270961dd0dSThomas Renninger } 2328a85f7bd3SThomas Renninger } 23290961dd0dSThomas Renninger 2330037ce839SViresh Kumar ret = cpufreq_set_policy(policy, &new_policy); 23311da177e4SLinus Torvalds 2332fefa8ff8SAaron Plattner unlock: 2333ad7722daSviresh kumar up_write(&policy->rwsem); 23345a01f2e8SVenkatesh Pallipadi 23353a3e9e06SViresh Kumar cpufreq_cpu_put(policy); 23361da177e4SLinus Torvalds return ret; 23371da177e4SLinus Torvalds } 23381da177e4SLinus Torvalds EXPORT_SYMBOL(cpufreq_update_policy); 23391da177e4SLinus Torvalds 23402760984fSPaul Gortmaker static int cpufreq_cpu_callback(struct notifier_block *nfb, 2341c32b6b8eSAshok Raj unsigned long action, void *hcpu) 2342c32b6b8eSAshok Raj { 2343c32b6b8eSAshok Raj unsigned int cpu = (unsigned long)hcpu; 23448a25a2fdSKay Sievers struct device *dev; 2345c32b6b8eSAshok Raj 23468a25a2fdSKay Sievers dev = get_cpu_device(cpu); 23478a25a2fdSKay Sievers if (dev) { 23485302c3fbSSrivatsa S. Bhat switch (action & ~CPU_TASKS_FROZEN) { 2349c32b6b8eSAshok Raj case CPU_ONLINE: 235023faf0b7SViresh Kumar cpufreq_add_dev(dev, NULL); 2351c32b6b8eSAshok Raj break; 23525302c3fbSSrivatsa S. Bhat 2353c32b6b8eSAshok Raj case CPU_DOWN_PREPARE: 235496bbbe4aSViresh Kumar __cpufreq_remove_dev_prepare(dev, NULL); 23551aee40acSSrivatsa S. Bhat break; 23561aee40acSSrivatsa S. Bhat 23571aee40acSSrivatsa S. Bhat case CPU_POST_DEAD: 235896bbbe4aSViresh Kumar __cpufreq_remove_dev_finish(dev, NULL); 2359c32b6b8eSAshok Raj break; 23605302c3fbSSrivatsa S. Bhat 23615a01f2e8SVenkatesh Pallipadi case CPU_DOWN_FAILED: 236223faf0b7SViresh Kumar cpufreq_add_dev(dev, NULL); 2363c32b6b8eSAshok Raj break; 2364c32b6b8eSAshok Raj } 2365c32b6b8eSAshok Raj } 2366c32b6b8eSAshok Raj return NOTIFY_OK; 2367c32b6b8eSAshok Raj } 2368c32b6b8eSAshok Raj 23699c36f746SNeal Buckendahl static struct notifier_block __refdata cpufreq_cpu_notifier = { 2370c32b6b8eSAshok Raj .notifier_call = cpufreq_cpu_callback, 2371c32b6b8eSAshok Raj }; 23721da177e4SLinus Torvalds 23731da177e4SLinus Torvalds /********************************************************************* 23746f19efc0SLukasz Majewski * BOOST * 23756f19efc0SLukasz Majewski *********************************************************************/ 23766f19efc0SLukasz Majewski static int cpufreq_boost_set_sw(int state) 23776f19efc0SLukasz Majewski { 23786f19efc0SLukasz Majewski struct cpufreq_frequency_table *freq_table; 23796f19efc0SLukasz Majewski struct cpufreq_policy *policy; 23806f19efc0SLukasz Majewski int ret = -EINVAL; 23816f19efc0SLukasz Majewski 2382f963735aSViresh Kumar for_each_active_policy(policy) { 23836f19efc0SLukasz Majewski freq_table = cpufreq_frequency_get_table(policy->cpu); 23846f19efc0SLukasz Majewski if (freq_table) { 23856f19efc0SLukasz Majewski ret = cpufreq_frequency_table_cpuinfo(policy, 23866f19efc0SLukasz Majewski freq_table); 23876f19efc0SLukasz Majewski if (ret) { 23886f19efc0SLukasz Majewski pr_err("%s: Policy frequency update failed\n", 23896f19efc0SLukasz Majewski __func__); 23906f19efc0SLukasz Majewski break; 23916f19efc0SLukasz Majewski } 23926f19efc0SLukasz Majewski policy->user_policy.max = policy->max; 23936f19efc0SLukasz Majewski __cpufreq_governor(policy, CPUFREQ_GOV_LIMITS); 23946f19efc0SLukasz Majewski } 23956f19efc0SLukasz Majewski } 23966f19efc0SLukasz Majewski 23976f19efc0SLukasz Majewski return ret; 23986f19efc0SLukasz Majewski } 23996f19efc0SLukasz Majewski 24006f19efc0SLukasz Majewski int cpufreq_boost_trigger_state(int state) 24016f19efc0SLukasz Majewski { 24026f19efc0SLukasz Majewski unsigned long flags; 24036f19efc0SLukasz Majewski int ret = 0; 24046f19efc0SLukasz Majewski 24056f19efc0SLukasz Majewski if (cpufreq_driver->boost_enabled == state) 24066f19efc0SLukasz Majewski return 0; 24076f19efc0SLukasz Majewski 24086f19efc0SLukasz Majewski write_lock_irqsave(&cpufreq_driver_lock, flags); 24096f19efc0SLukasz Majewski cpufreq_driver->boost_enabled = state; 24106f19efc0SLukasz Majewski write_unlock_irqrestore(&cpufreq_driver_lock, flags); 24116f19efc0SLukasz Majewski 24126f19efc0SLukasz Majewski ret = cpufreq_driver->set_boost(state); 24136f19efc0SLukasz Majewski if (ret) { 24146f19efc0SLukasz Majewski write_lock_irqsave(&cpufreq_driver_lock, flags); 24156f19efc0SLukasz Majewski cpufreq_driver->boost_enabled = !state; 24166f19efc0SLukasz Majewski write_unlock_irqrestore(&cpufreq_driver_lock, flags); 24176f19efc0SLukasz Majewski 2418e837f9b5SJoe Perches pr_err("%s: Cannot %s BOOST\n", 2419e837f9b5SJoe Perches __func__, state ? "enable" : "disable"); 24206f19efc0SLukasz Majewski } 24216f19efc0SLukasz Majewski 24226f19efc0SLukasz Majewski return ret; 24236f19efc0SLukasz Majewski } 24246f19efc0SLukasz Majewski 24256f19efc0SLukasz Majewski int cpufreq_boost_supported(void) 24266f19efc0SLukasz Majewski { 24276f19efc0SLukasz Majewski if (likely(cpufreq_driver)) 24286f19efc0SLukasz Majewski return cpufreq_driver->boost_supported; 24296f19efc0SLukasz Majewski 24306f19efc0SLukasz Majewski return 0; 24316f19efc0SLukasz Majewski } 24326f19efc0SLukasz Majewski EXPORT_SYMBOL_GPL(cpufreq_boost_supported); 24336f19efc0SLukasz Majewski 24346f19efc0SLukasz Majewski int cpufreq_boost_enabled(void) 24356f19efc0SLukasz Majewski { 24366f19efc0SLukasz Majewski return cpufreq_driver->boost_enabled; 24376f19efc0SLukasz Majewski } 24386f19efc0SLukasz Majewski EXPORT_SYMBOL_GPL(cpufreq_boost_enabled); 24396f19efc0SLukasz Majewski 24406f19efc0SLukasz Majewski /********************************************************************* 24411da177e4SLinus Torvalds * REGISTER / UNREGISTER CPUFREQ DRIVER * 24421da177e4SLinus Torvalds *********************************************************************/ 24431da177e4SLinus Torvalds 24441da177e4SLinus Torvalds /** 24451da177e4SLinus Torvalds * cpufreq_register_driver - register a CPU Frequency driver 24461da177e4SLinus Torvalds * @driver_data: A struct cpufreq_driver containing the values# 24471da177e4SLinus Torvalds * submitted by the CPU Frequency driver. 24481da177e4SLinus Torvalds * 24491da177e4SLinus Torvalds * Registers a CPU Frequency driver to this core code. This code 24501da177e4SLinus Torvalds * returns zero on success, -EBUSY when another driver got here first 24511da177e4SLinus Torvalds * (and isn't unregistered in the meantime). 24521da177e4SLinus Torvalds * 24531da177e4SLinus Torvalds */ 2454221dee28SLinus Torvalds int cpufreq_register_driver(struct cpufreq_driver *driver_data) 24551da177e4SLinus Torvalds { 24561da177e4SLinus Torvalds unsigned long flags; 24571da177e4SLinus Torvalds int ret; 24581da177e4SLinus Torvalds 2459a7b422cdSKonrad Rzeszutek Wilk if (cpufreq_disabled()) 2460a7b422cdSKonrad Rzeszutek Wilk return -ENODEV; 2461a7b422cdSKonrad Rzeszutek Wilk 24621da177e4SLinus Torvalds if (!driver_data || !driver_data->verify || !driver_data->init || 24639c0ebcf7SViresh Kumar !(driver_data->setpolicy || driver_data->target_index || 24649832235fSRafael J. Wysocki driver_data->target) || 24659832235fSRafael J. Wysocki (driver_data->setpolicy && (driver_data->target_index || 24661c03a2d0SViresh Kumar driver_data->target)) || 24671c03a2d0SViresh Kumar (!!driver_data->get_intermediate != !!driver_data->target_intermediate)) 24681da177e4SLinus Torvalds return -EINVAL; 24691da177e4SLinus Torvalds 24702d06d8c4SDominik Brodowski pr_debug("trying to register driver %s\n", driver_data->name); 24711da177e4SLinus Torvalds 24720d1857a1SNathan Zimmer write_lock_irqsave(&cpufreq_driver_lock, flags); 24731c3d85ddSRafael J. Wysocki if (cpufreq_driver) { 24740d1857a1SNathan Zimmer write_unlock_irqrestore(&cpufreq_driver_lock, flags); 24754dea5806SYinghai Lu return -EEXIST; 24761da177e4SLinus Torvalds } 24771c3d85ddSRafael J. Wysocki cpufreq_driver = driver_data; 24780d1857a1SNathan Zimmer write_unlock_irqrestore(&cpufreq_driver_lock, flags); 24791da177e4SLinus Torvalds 2480bc68b7dfSViresh Kumar if (driver_data->setpolicy) 2481bc68b7dfSViresh Kumar driver_data->flags |= CPUFREQ_CONST_LOOPS; 2482bc68b7dfSViresh Kumar 24836f19efc0SLukasz Majewski if (cpufreq_boost_supported()) { 24846f19efc0SLukasz Majewski /* 24856f19efc0SLukasz Majewski * Check if driver provides function to enable boost - 24866f19efc0SLukasz Majewski * if not, use cpufreq_boost_set_sw as default 24876f19efc0SLukasz Majewski */ 24886f19efc0SLukasz Majewski if (!cpufreq_driver->set_boost) 24896f19efc0SLukasz Majewski cpufreq_driver->set_boost = cpufreq_boost_set_sw; 24906f19efc0SLukasz Majewski 24916f19efc0SLukasz Majewski ret = cpufreq_sysfs_create_file(&boost.attr); 24926f19efc0SLukasz Majewski if (ret) { 24936f19efc0SLukasz Majewski pr_err("%s: cannot register global BOOST sysfs file\n", 24946f19efc0SLukasz Majewski __func__); 24956f19efc0SLukasz Majewski goto err_null_driver; 24966f19efc0SLukasz Majewski } 24976f19efc0SLukasz Majewski } 24986f19efc0SLukasz Majewski 24998a25a2fdSKay Sievers ret = subsys_interface_register(&cpufreq_interface); 25008f5bc2abSJiri Slaby if (ret) 25016f19efc0SLukasz Majewski goto err_boost_unreg; 25021da177e4SLinus Torvalds 2503ce1bcfe9SViresh Kumar if (!(cpufreq_driver->flags & CPUFREQ_STICKY) && 2504ce1bcfe9SViresh Kumar list_empty(&cpufreq_policy_list)) { 25051da177e4SLinus Torvalds /* if all ->init() calls failed, unregister */ 2506ce1bcfe9SViresh Kumar pr_debug("%s: No CPU initialized for driver %s\n", __func__, 2507e08f5f5bSGautham R Shenoy driver_data->name); 25088a25a2fdSKay Sievers goto err_if_unreg; 25091da177e4SLinus Torvalds } 25101da177e4SLinus Torvalds 251165edc68cSChandra Seetharaman register_hotcpu_notifier(&cpufreq_cpu_notifier); 25122d06d8c4SDominik Brodowski pr_debug("driver %s up and running\n", driver_data->name); 25131da177e4SLinus Torvalds 25148f5bc2abSJiri Slaby return 0; 25158a25a2fdSKay Sievers err_if_unreg: 25168a25a2fdSKay Sievers subsys_interface_unregister(&cpufreq_interface); 25176f19efc0SLukasz Majewski err_boost_unreg: 25186f19efc0SLukasz Majewski if (cpufreq_boost_supported()) 25196f19efc0SLukasz Majewski cpufreq_sysfs_remove_file(&boost.attr); 25208f5bc2abSJiri Slaby err_null_driver: 25210d1857a1SNathan Zimmer write_lock_irqsave(&cpufreq_driver_lock, flags); 25221c3d85ddSRafael J. Wysocki cpufreq_driver = NULL; 25230d1857a1SNathan Zimmer write_unlock_irqrestore(&cpufreq_driver_lock, flags); 25244d34a67dSDave Jones return ret; 25251da177e4SLinus Torvalds } 25261da177e4SLinus Torvalds EXPORT_SYMBOL_GPL(cpufreq_register_driver); 25271da177e4SLinus Torvalds 25281da177e4SLinus Torvalds /** 25291da177e4SLinus Torvalds * cpufreq_unregister_driver - unregister the current CPUFreq driver 25301da177e4SLinus Torvalds * 25311da177e4SLinus Torvalds * Unregister the current CPUFreq driver. Only call this if you have 25321da177e4SLinus Torvalds * the right to do so, i.e. if you have succeeded in initialising before! 25331da177e4SLinus Torvalds * Returns zero if successful, and -EINVAL if the cpufreq_driver is 25341da177e4SLinus Torvalds * currently not initialised. 25351da177e4SLinus Torvalds */ 2536221dee28SLinus Torvalds int cpufreq_unregister_driver(struct cpufreq_driver *driver) 25371da177e4SLinus Torvalds { 25381da177e4SLinus Torvalds unsigned long flags; 25391da177e4SLinus Torvalds 25401c3d85ddSRafael J. Wysocki if (!cpufreq_driver || (driver != cpufreq_driver)) 25411da177e4SLinus Torvalds return -EINVAL; 25421da177e4SLinus Torvalds 25432d06d8c4SDominik Brodowski pr_debug("unregistering driver %s\n", driver->name); 25441da177e4SLinus Torvalds 25458a25a2fdSKay Sievers subsys_interface_unregister(&cpufreq_interface); 25466f19efc0SLukasz Majewski if (cpufreq_boost_supported()) 25476f19efc0SLukasz Majewski cpufreq_sysfs_remove_file(&boost.attr); 25486f19efc0SLukasz Majewski 254965edc68cSChandra Seetharaman unregister_hotcpu_notifier(&cpufreq_cpu_notifier); 25501da177e4SLinus Torvalds 25516eed9404SViresh Kumar down_write(&cpufreq_rwsem); 25520d1857a1SNathan Zimmer write_lock_irqsave(&cpufreq_driver_lock, flags); 25536eed9404SViresh Kumar 25541c3d85ddSRafael J. Wysocki cpufreq_driver = NULL; 25556eed9404SViresh Kumar 25560d1857a1SNathan Zimmer write_unlock_irqrestore(&cpufreq_driver_lock, flags); 25576eed9404SViresh Kumar up_write(&cpufreq_rwsem); 25581da177e4SLinus Torvalds 25591da177e4SLinus Torvalds return 0; 25601da177e4SLinus Torvalds } 25611da177e4SLinus Torvalds EXPORT_SYMBOL_GPL(cpufreq_unregister_driver); 25625a01f2e8SVenkatesh Pallipadi 256390de2a4aSDoug Anderson /* 256490de2a4aSDoug Anderson * Stop cpufreq at shutdown to make sure it isn't holding any locks 256590de2a4aSDoug Anderson * or mutexes when secondary CPUs are halted. 256690de2a4aSDoug Anderson */ 256790de2a4aSDoug Anderson static struct syscore_ops cpufreq_syscore_ops = { 256890de2a4aSDoug Anderson .shutdown = cpufreq_suspend, 256990de2a4aSDoug Anderson }; 257090de2a4aSDoug Anderson 25715a01f2e8SVenkatesh Pallipadi static int __init cpufreq_core_init(void) 25725a01f2e8SVenkatesh Pallipadi { 2573a7b422cdSKonrad Rzeszutek Wilk if (cpufreq_disabled()) 2574a7b422cdSKonrad Rzeszutek Wilk return -ENODEV; 2575a7b422cdSKonrad Rzeszutek Wilk 25762361be23SViresh Kumar cpufreq_global_kobject = kobject_create(); 25778aa84ad8SThomas Renninger BUG_ON(!cpufreq_global_kobject); 25788aa84ad8SThomas Renninger 257990de2a4aSDoug Anderson register_syscore_ops(&cpufreq_syscore_ops); 258090de2a4aSDoug Anderson 25815a01f2e8SVenkatesh Pallipadi return 0; 25825a01f2e8SVenkatesh Pallipadi } 25835a01f2e8SVenkatesh Pallipadi core_initcall(cpufreq_core_init); 2584