11da177e4SLinus Torvalds /* 21da177e4SLinus Torvalds * linux/drivers/cpufreq/cpufreq.c 31da177e4SLinus Torvalds * 41da177e4SLinus Torvalds * Copyright (C) 2001 Russell King 51da177e4SLinus Torvalds * (C) 2002 - 2003 Dominik Brodowski <linux@brodo.de> 6bb176f7dSViresh Kumar * (C) 2013 Viresh Kumar <viresh.kumar@linaro.org> 71da177e4SLinus Torvalds * 8c32b6b8eSAshok Raj * Oct 2005 - Ashok Raj <ashok.raj@intel.com> 9c32b6b8eSAshok Raj * Added handling for CPU hotplug 108ff69732SDave Jones * Feb 2006 - Jacob Shin <jacob.shin@amd.com> 118ff69732SDave Jones * Fix handling for CPU hotplug -- affected CPUs 12c32b6b8eSAshok Raj * 131da177e4SLinus Torvalds * This program is free software; you can redistribute it and/or modify 141da177e4SLinus Torvalds * it under the terms of the GNU General Public License version 2 as 151da177e4SLinus Torvalds * published by the Free Software Foundation. 161da177e4SLinus Torvalds */ 171da177e4SLinus Torvalds 18db701151SViresh Kumar #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 19db701151SViresh Kumar 205ff0a268SViresh Kumar #include <linux/cpu.h> 211da177e4SLinus Torvalds #include <linux/cpufreq.h> 221da177e4SLinus Torvalds #include <linux/delay.h> 231da177e4SLinus Torvalds #include <linux/device.h> 245ff0a268SViresh Kumar #include <linux/init.h> 255ff0a268SViresh Kumar #include <linux/kernel_stat.h> 265ff0a268SViresh Kumar #include <linux/module.h> 273fc54d37Sakpm@osdl.org #include <linux/mutex.h> 285ff0a268SViresh Kumar #include <linux/slab.h> 292f0aea93SViresh Kumar #include <linux/suspend.h> 3090de2a4aSDoug Anderson #include <linux/syscore_ops.h> 315ff0a268SViresh Kumar #include <linux/tick.h> 326f4f2723SThomas Renninger #include <trace/events/power.h> 336f4f2723SThomas Renninger 34b4f0676fSViresh Kumar static LIST_HEAD(cpufreq_policy_list); 35f963735aSViresh Kumar 36f963735aSViresh Kumar static inline bool policy_is_inactive(struct cpufreq_policy *policy) 37f963735aSViresh Kumar { 38f963735aSViresh Kumar return cpumask_empty(policy->cpus); 39f963735aSViresh Kumar } 40f963735aSViresh Kumar 41f963735aSViresh Kumar static bool suitable_policy(struct cpufreq_policy *policy, bool active) 42f963735aSViresh Kumar { 43f963735aSViresh Kumar return active == !policy_is_inactive(policy); 44f963735aSViresh Kumar } 45f963735aSViresh Kumar 46f963735aSViresh Kumar /* Finds Next Acive/Inactive policy */ 47f963735aSViresh Kumar static struct cpufreq_policy *next_policy(struct cpufreq_policy *policy, 48f963735aSViresh Kumar bool active) 49f963735aSViresh Kumar { 50f963735aSViresh Kumar do { 51f963735aSViresh Kumar policy = list_next_entry(policy, policy_list); 52f963735aSViresh Kumar 53f963735aSViresh Kumar /* No more policies in the list */ 54f963735aSViresh Kumar if (&policy->policy_list == &cpufreq_policy_list) 55f963735aSViresh Kumar return NULL; 56f963735aSViresh Kumar } while (!suitable_policy(policy, active)); 57f963735aSViresh Kumar 58f963735aSViresh Kumar return policy; 59f963735aSViresh Kumar } 60f963735aSViresh Kumar 61f963735aSViresh Kumar static struct cpufreq_policy *first_policy(bool active) 62f963735aSViresh Kumar { 63f963735aSViresh Kumar struct cpufreq_policy *policy; 64f963735aSViresh Kumar 65f963735aSViresh Kumar /* No policies in the list */ 66f963735aSViresh Kumar if (list_empty(&cpufreq_policy_list)) 67f963735aSViresh Kumar return NULL; 68f963735aSViresh Kumar 69f963735aSViresh Kumar policy = list_first_entry(&cpufreq_policy_list, typeof(*policy), 70f963735aSViresh Kumar policy_list); 71f963735aSViresh Kumar 72f963735aSViresh Kumar if (!suitable_policy(policy, active)) 73f963735aSViresh Kumar policy = next_policy(policy, active); 74f963735aSViresh Kumar 75f963735aSViresh Kumar return policy; 76f963735aSViresh Kumar } 77f963735aSViresh Kumar 78f963735aSViresh Kumar /* Macros to iterate over CPU policies */ 79f963735aSViresh Kumar #define for_each_suitable_policy(__policy, __active) \ 80f963735aSViresh Kumar for (__policy = first_policy(__active); \ 81f963735aSViresh Kumar __policy; \ 82f963735aSViresh Kumar __policy = next_policy(__policy, __active)) 83f963735aSViresh Kumar 84f963735aSViresh Kumar #define for_each_active_policy(__policy) \ 85f963735aSViresh Kumar for_each_suitable_policy(__policy, true) 86f963735aSViresh Kumar #define for_each_inactive_policy(__policy) \ 87f963735aSViresh Kumar for_each_suitable_policy(__policy, false) 88f963735aSViresh Kumar 89b4f0676fSViresh Kumar #define for_each_policy(__policy) \ 90b4f0676fSViresh Kumar list_for_each_entry(__policy, &cpufreq_policy_list, policy_list) 91b4f0676fSViresh Kumar 92f7b27061SViresh Kumar /* Iterate over governors */ 93f7b27061SViresh Kumar static LIST_HEAD(cpufreq_governor_list); 94f7b27061SViresh Kumar #define for_each_governor(__governor) \ 95f7b27061SViresh Kumar list_for_each_entry(__governor, &cpufreq_governor_list, governor_list) 96f7b27061SViresh Kumar 971da177e4SLinus Torvalds /** 98cd878479SDave Jones * The "cpufreq driver" - the arch- or hardware-dependent low 991da177e4SLinus Torvalds * level driver of CPUFreq support, and its spinlock. This lock 1001da177e4SLinus Torvalds * also protects the cpufreq_cpu_data array. 1011da177e4SLinus Torvalds */ 1021c3d85ddSRafael J. Wysocki static struct cpufreq_driver *cpufreq_driver; 1037a6aedfaSMike Travis static DEFINE_PER_CPU(struct cpufreq_policy *, cpufreq_cpu_data); 104bb176f7dSViresh Kumar static DEFINE_RWLOCK(cpufreq_driver_lock); 1056f1e4efdSJane Li DEFINE_MUTEX(cpufreq_governor_lock); 106bb176f7dSViresh Kumar 1072f0aea93SViresh Kumar /* Flag to suspend/resume CPUFreq governors */ 1082f0aea93SViresh Kumar static bool cpufreq_suspended; 1091da177e4SLinus Torvalds 1109c0ebcf7SViresh Kumar static inline bool has_target(void) 1119c0ebcf7SViresh Kumar { 1129c0ebcf7SViresh Kumar return cpufreq_driver->target_index || cpufreq_driver->target; 1139c0ebcf7SViresh Kumar } 1149c0ebcf7SViresh Kumar 1151da177e4SLinus Torvalds /* internal prototypes */ 11629464f28SDave Jones static int __cpufreq_governor(struct cpufreq_policy *policy, 11729464f28SDave Jones unsigned int event); 118d92d50a4SViresh Kumar static unsigned int __cpufreq_get(struct cpufreq_policy *policy); 11965f27f38SDavid Howells static void handle_update(struct work_struct *work); 1201da177e4SLinus Torvalds 1211da177e4SLinus Torvalds /** 1221da177e4SLinus Torvalds * Two notifier lists: the "policy" list is involved in the 1231da177e4SLinus Torvalds * validation process for a new CPU frequency policy; the 1241da177e4SLinus Torvalds * "transition" list for kernel code that needs to handle 1251da177e4SLinus Torvalds * changes to devices when the CPU clock speed changes. 1261da177e4SLinus Torvalds * The mutex locks both lists. 1271da177e4SLinus Torvalds */ 128e041c683SAlan Stern static BLOCKING_NOTIFIER_HEAD(cpufreq_policy_notifier_list); 129b4dfdbb3SAlan Stern static struct srcu_notifier_head cpufreq_transition_notifier_list; 1301da177e4SLinus Torvalds 13174212ca4SCesar Eduardo Barros static bool init_cpufreq_transition_notifier_list_called; 132b4dfdbb3SAlan Stern static int __init init_cpufreq_transition_notifier_list(void) 133b4dfdbb3SAlan Stern { 134b4dfdbb3SAlan Stern srcu_init_notifier_head(&cpufreq_transition_notifier_list); 13574212ca4SCesar Eduardo Barros init_cpufreq_transition_notifier_list_called = true; 136b4dfdbb3SAlan Stern return 0; 137b4dfdbb3SAlan Stern } 138b3438f82SLinus Torvalds pure_initcall(init_cpufreq_transition_notifier_list); 1391da177e4SLinus Torvalds 140a7b422cdSKonrad Rzeszutek Wilk static int off __read_mostly; 141da584455SViresh Kumar static int cpufreq_disabled(void) 142a7b422cdSKonrad Rzeszutek Wilk { 143a7b422cdSKonrad Rzeszutek Wilk return off; 144a7b422cdSKonrad Rzeszutek Wilk } 145a7b422cdSKonrad Rzeszutek Wilk void disable_cpufreq(void) 146a7b422cdSKonrad Rzeszutek Wilk { 147a7b422cdSKonrad Rzeszutek Wilk off = 1; 148a7b422cdSKonrad Rzeszutek Wilk } 1493fc54d37Sakpm@osdl.org static DEFINE_MUTEX(cpufreq_governor_mutex); 1501da177e4SLinus Torvalds 1514d5dcc42SViresh Kumar bool have_governor_per_policy(void) 1524d5dcc42SViresh Kumar { 1530b981e70SViresh Kumar return !!(cpufreq_driver->flags & CPUFREQ_HAVE_GOVERNOR_PER_POLICY); 1544d5dcc42SViresh Kumar } 1553f869d6dSViresh Kumar EXPORT_SYMBOL_GPL(have_governor_per_policy); 1564d5dcc42SViresh Kumar 157944e9a03SViresh Kumar struct kobject *get_governor_parent_kobj(struct cpufreq_policy *policy) 158944e9a03SViresh Kumar { 159944e9a03SViresh Kumar if (have_governor_per_policy()) 160944e9a03SViresh Kumar return &policy->kobj; 161944e9a03SViresh Kumar else 162944e9a03SViresh Kumar return cpufreq_global_kobject; 163944e9a03SViresh Kumar } 164944e9a03SViresh Kumar EXPORT_SYMBOL_GPL(get_governor_parent_kobj); 165944e9a03SViresh Kumar 1665a31d594SViresh Kumar struct cpufreq_frequency_table *cpufreq_frequency_get_table(unsigned int cpu) 1675a31d594SViresh Kumar { 1685a31d594SViresh Kumar struct cpufreq_policy *policy = per_cpu(cpufreq_cpu_data, cpu); 1695a31d594SViresh Kumar 1705a31d594SViresh Kumar return policy && !policy_is_inactive(policy) ? 1715a31d594SViresh Kumar policy->freq_table : NULL; 1725a31d594SViresh Kumar } 1735a31d594SViresh Kumar EXPORT_SYMBOL_GPL(cpufreq_frequency_get_table); 1745a31d594SViresh Kumar 17572a4ce34SViresh Kumar static inline u64 get_cpu_idle_time_jiffy(unsigned int cpu, u64 *wall) 17672a4ce34SViresh Kumar { 17772a4ce34SViresh Kumar u64 idle_time; 17872a4ce34SViresh Kumar u64 cur_wall_time; 17972a4ce34SViresh Kumar u64 busy_time; 18072a4ce34SViresh Kumar 18172a4ce34SViresh Kumar cur_wall_time = jiffies64_to_cputime64(get_jiffies_64()); 18272a4ce34SViresh Kumar 18372a4ce34SViresh Kumar busy_time = kcpustat_cpu(cpu).cpustat[CPUTIME_USER]; 18472a4ce34SViresh Kumar busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_SYSTEM]; 18572a4ce34SViresh Kumar busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_IRQ]; 18672a4ce34SViresh Kumar busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_SOFTIRQ]; 18772a4ce34SViresh Kumar busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_STEAL]; 18872a4ce34SViresh Kumar busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_NICE]; 18972a4ce34SViresh Kumar 19072a4ce34SViresh Kumar idle_time = cur_wall_time - busy_time; 19172a4ce34SViresh Kumar if (wall) 19272a4ce34SViresh Kumar *wall = cputime_to_usecs(cur_wall_time); 19372a4ce34SViresh Kumar 19472a4ce34SViresh Kumar return cputime_to_usecs(idle_time); 19572a4ce34SViresh Kumar } 19672a4ce34SViresh Kumar 19772a4ce34SViresh Kumar u64 get_cpu_idle_time(unsigned int cpu, u64 *wall, int io_busy) 19872a4ce34SViresh Kumar { 19972a4ce34SViresh Kumar u64 idle_time = get_cpu_idle_time_us(cpu, io_busy ? wall : NULL); 20072a4ce34SViresh Kumar 20172a4ce34SViresh Kumar if (idle_time == -1ULL) 20272a4ce34SViresh Kumar return get_cpu_idle_time_jiffy(cpu, wall); 20372a4ce34SViresh Kumar else if (!io_busy) 20472a4ce34SViresh Kumar idle_time += get_cpu_iowait_time_us(cpu, wall); 20572a4ce34SViresh Kumar 20672a4ce34SViresh Kumar return idle_time; 20772a4ce34SViresh Kumar } 20872a4ce34SViresh Kumar EXPORT_SYMBOL_GPL(get_cpu_idle_time); 20972a4ce34SViresh Kumar 21070e9e778SViresh Kumar /* 21170e9e778SViresh Kumar * This is a generic cpufreq init() routine which can be used by cpufreq 21270e9e778SViresh Kumar * drivers of SMP systems. It will do following: 21370e9e778SViresh Kumar * - validate & show freq table passed 21470e9e778SViresh Kumar * - set policies transition latency 21570e9e778SViresh Kumar * - policy->cpus with all possible CPUs 21670e9e778SViresh Kumar */ 21770e9e778SViresh Kumar int cpufreq_generic_init(struct cpufreq_policy *policy, 21870e9e778SViresh Kumar struct cpufreq_frequency_table *table, 21970e9e778SViresh Kumar unsigned int transition_latency) 22070e9e778SViresh Kumar { 22170e9e778SViresh Kumar int ret; 22270e9e778SViresh Kumar 22370e9e778SViresh Kumar ret = cpufreq_table_validate_and_show(policy, table); 22470e9e778SViresh Kumar if (ret) { 22570e9e778SViresh Kumar pr_err("%s: invalid frequency table: %d\n", __func__, ret); 22670e9e778SViresh Kumar return ret; 22770e9e778SViresh Kumar } 22870e9e778SViresh Kumar 22970e9e778SViresh Kumar policy->cpuinfo.transition_latency = transition_latency; 23070e9e778SViresh Kumar 23170e9e778SViresh Kumar /* 23258405af6SShailendra Verma * The driver only supports the SMP configuration where all processors 23370e9e778SViresh Kumar * share the clock and voltage and clock. 23470e9e778SViresh Kumar */ 23570e9e778SViresh Kumar cpumask_setall(policy->cpus); 23670e9e778SViresh Kumar 23770e9e778SViresh Kumar return 0; 23870e9e778SViresh Kumar } 23970e9e778SViresh Kumar EXPORT_SYMBOL_GPL(cpufreq_generic_init); 24070e9e778SViresh Kumar 241988bed09SViresh Kumar /* Only for cpufreq core internal use */ 242988bed09SViresh Kumar struct cpufreq_policy *cpufreq_cpu_get_raw(unsigned int cpu) 243652ed95dSViresh Kumar { 244652ed95dSViresh Kumar struct cpufreq_policy *policy = per_cpu(cpufreq_cpu_data, cpu); 245652ed95dSViresh Kumar 246988bed09SViresh Kumar return policy && cpumask_test_cpu(cpu, policy->cpus) ? policy : NULL; 247988bed09SViresh Kumar } 248988bed09SViresh Kumar 249988bed09SViresh Kumar unsigned int cpufreq_generic_get(unsigned int cpu) 250988bed09SViresh Kumar { 251988bed09SViresh Kumar struct cpufreq_policy *policy = cpufreq_cpu_get_raw(cpu); 252988bed09SViresh Kumar 253652ed95dSViresh Kumar if (!policy || IS_ERR(policy->clk)) { 254e837f9b5SJoe Perches pr_err("%s: No %s associated to cpu: %d\n", 255e837f9b5SJoe Perches __func__, policy ? "clk" : "policy", cpu); 256652ed95dSViresh Kumar return 0; 257652ed95dSViresh Kumar } 258652ed95dSViresh Kumar 259652ed95dSViresh Kumar return clk_get_rate(policy->clk) / 1000; 260652ed95dSViresh Kumar } 261652ed95dSViresh Kumar EXPORT_SYMBOL_GPL(cpufreq_generic_get); 262652ed95dSViresh Kumar 26350e9c852SViresh Kumar /** 26450e9c852SViresh Kumar * cpufreq_cpu_get: returns policy for a cpu and marks it busy. 26550e9c852SViresh Kumar * 26650e9c852SViresh Kumar * @cpu: cpu to find policy for. 26750e9c852SViresh Kumar * 26850e9c852SViresh Kumar * This returns policy for 'cpu', returns NULL if it doesn't exist. 26950e9c852SViresh Kumar * It also increments the kobject reference count to mark it busy and so would 27050e9c852SViresh Kumar * require a corresponding call to cpufreq_cpu_put() to decrement it back. 27150e9c852SViresh Kumar * If corresponding call cpufreq_cpu_put() isn't made, the policy wouldn't be 27250e9c852SViresh Kumar * freed as that depends on the kobj count. 27350e9c852SViresh Kumar * 27450e9c852SViresh Kumar * Return: A valid policy on success, otherwise NULL on failure. 27550e9c852SViresh Kumar */ 2766eed9404SViresh Kumar struct cpufreq_policy *cpufreq_cpu_get(unsigned int cpu) 2771da177e4SLinus Torvalds { 2786eed9404SViresh Kumar struct cpufreq_policy *policy = NULL; 2791da177e4SLinus Torvalds unsigned long flags; 2801da177e4SLinus Torvalds 2811b947c90SViresh Kumar if (WARN_ON(cpu >= nr_cpu_ids)) 2826eed9404SViresh Kumar return NULL; 2836eed9404SViresh Kumar 2841da177e4SLinus Torvalds /* get the cpufreq driver */ 2850d1857a1SNathan Zimmer read_lock_irqsave(&cpufreq_driver_lock, flags); 2861da177e4SLinus Torvalds 2876eed9404SViresh Kumar if (cpufreq_driver) { 2881da177e4SLinus Torvalds /* get the CPU */ 289988bed09SViresh Kumar policy = cpufreq_cpu_get_raw(cpu); 2906eed9404SViresh Kumar if (policy) 2916eed9404SViresh Kumar kobject_get(&policy->kobj); 2926eed9404SViresh Kumar } 2936eed9404SViresh Kumar 2946eed9404SViresh Kumar read_unlock_irqrestore(&cpufreq_driver_lock, flags); 2951da177e4SLinus Torvalds 2963a3e9e06SViresh Kumar return policy; 297a9144436SStephen Boyd } 2981da177e4SLinus Torvalds EXPORT_SYMBOL_GPL(cpufreq_cpu_get); 2991da177e4SLinus Torvalds 30050e9c852SViresh Kumar /** 30150e9c852SViresh Kumar * cpufreq_cpu_put: Decrements the usage count of a policy 30250e9c852SViresh Kumar * 30350e9c852SViresh Kumar * @policy: policy earlier returned by cpufreq_cpu_get(). 30450e9c852SViresh Kumar * 30550e9c852SViresh Kumar * This decrements the kobject reference count incremented earlier by calling 30650e9c852SViresh Kumar * cpufreq_cpu_get(). 30750e9c852SViresh Kumar */ 3083a3e9e06SViresh Kumar void cpufreq_cpu_put(struct cpufreq_policy *policy) 309a9144436SStephen Boyd { 3106eed9404SViresh Kumar kobject_put(&policy->kobj); 311a9144436SStephen Boyd } 3121da177e4SLinus Torvalds EXPORT_SYMBOL_GPL(cpufreq_cpu_put); 3131da177e4SLinus Torvalds 3141da177e4SLinus Torvalds /********************************************************************* 3151da177e4SLinus Torvalds * EXTERNALLY AFFECTING FREQUENCY CHANGES * 3161da177e4SLinus Torvalds *********************************************************************/ 3171da177e4SLinus Torvalds 3181da177e4SLinus Torvalds /** 3191da177e4SLinus Torvalds * adjust_jiffies - adjust the system "loops_per_jiffy" 3201da177e4SLinus Torvalds * 3211da177e4SLinus Torvalds * This function alters the system "loops_per_jiffy" for the clock 3221da177e4SLinus Torvalds * speed change. Note that loops_per_jiffy cannot be updated on SMP 3231da177e4SLinus Torvalds * systems as each CPU might be scaled differently. So, use the arch 3241da177e4SLinus Torvalds * per-CPU loops_per_jiffy value wherever possible. 3251da177e4SLinus Torvalds */ 32639c132eeSViresh Kumar static void adjust_jiffies(unsigned long val, struct cpufreq_freqs *ci) 32739c132eeSViresh Kumar { 3281da177e4SLinus Torvalds #ifndef CONFIG_SMP 3291da177e4SLinus Torvalds static unsigned long l_p_j_ref; 3301da177e4SLinus Torvalds static unsigned int l_p_j_ref_freq; 3311da177e4SLinus Torvalds 3321da177e4SLinus Torvalds if (ci->flags & CPUFREQ_CONST_LOOPS) 3331da177e4SLinus Torvalds return; 3341da177e4SLinus Torvalds 3351da177e4SLinus Torvalds if (!l_p_j_ref_freq) { 3361da177e4SLinus Torvalds l_p_j_ref = loops_per_jiffy; 3371da177e4SLinus Torvalds l_p_j_ref_freq = ci->old; 338e837f9b5SJoe Perches pr_debug("saving %lu as reference value for loops_per_jiffy; freq is %u kHz\n", 339e837f9b5SJoe Perches l_p_j_ref, l_p_j_ref_freq); 3401da177e4SLinus Torvalds } 3410b443eadSViresh Kumar if (val == CPUFREQ_POSTCHANGE && ci->old != ci->new) { 342e08f5f5bSGautham R Shenoy loops_per_jiffy = cpufreq_scale(l_p_j_ref, l_p_j_ref_freq, 343e08f5f5bSGautham R Shenoy ci->new); 344e837f9b5SJoe Perches pr_debug("scaling loops_per_jiffy to %lu for frequency %u kHz\n", 345e837f9b5SJoe Perches loops_per_jiffy, ci->new); 3461da177e4SLinus Torvalds } 3471da177e4SLinus Torvalds #endif 34839c132eeSViresh Kumar } 3491da177e4SLinus Torvalds 3500956df9cSViresh Kumar static void __cpufreq_notify_transition(struct cpufreq_policy *policy, 351b43a7ffbSViresh Kumar struct cpufreq_freqs *freqs, unsigned int state) 3521da177e4SLinus Torvalds { 3531da177e4SLinus Torvalds BUG_ON(irqs_disabled()); 3541da177e4SLinus Torvalds 355d5aaffa9SDirk Brandewie if (cpufreq_disabled()) 356d5aaffa9SDirk Brandewie return; 357d5aaffa9SDirk Brandewie 3581c3d85ddSRafael J. Wysocki freqs->flags = cpufreq_driver->flags; 3592d06d8c4SDominik Brodowski pr_debug("notification %u of frequency transition to %u kHz\n", 360e4472cb3SDave Jones state, freqs->new); 3611da177e4SLinus Torvalds 3621da177e4SLinus Torvalds switch (state) { 363e4472cb3SDave Jones 3641da177e4SLinus Torvalds case CPUFREQ_PRECHANGE: 365e4472cb3SDave Jones /* detect if the driver reported a value as "old frequency" 366e4472cb3SDave Jones * which is not equal to what the cpufreq core thinks is 367e4472cb3SDave Jones * "old frequency". 3681da177e4SLinus Torvalds */ 3691c3d85ddSRafael J. Wysocki if (!(cpufreq_driver->flags & CPUFREQ_CONST_LOOPS)) { 370e4472cb3SDave Jones if ((policy) && (policy->cpu == freqs->cpu) && 371e4472cb3SDave Jones (policy->cur) && (policy->cur != freqs->old)) { 372e837f9b5SJoe Perches pr_debug("Warning: CPU frequency is %u, cpufreq assumed %u kHz\n", 373e4472cb3SDave Jones freqs->old, policy->cur); 374e4472cb3SDave Jones freqs->old = policy->cur; 3751da177e4SLinus Torvalds } 3761da177e4SLinus Torvalds } 377b4dfdbb3SAlan Stern srcu_notifier_call_chain(&cpufreq_transition_notifier_list, 378e4472cb3SDave Jones CPUFREQ_PRECHANGE, freqs); 3791da177e4SLinus Torvalds adjust_jiffies(CPUFREQ_PRECHANGE, freqs); 3801da177e4SLinus Torvalds break; 381e4472cb3SDave Jones 3821da177e4SLinus Torvalds case CPUFREQ_POSTCHANGE: 3831da177e4SLinus Torvalds adjust_jiffies(CPUFREQ_POSTCHANGE, freqs); 384e837f9b5SJoe Perches pr_debug("FREQ: %lu - CPU: %lu\n", 385e837f9b5SJoe Perches (unsigned long)freqs->new, (unsigned long)freqs->cpu); 38625e41933SThomas Renninger trace_cpu_frequency(freqs->new, freqs->cpu); 387b4dfdbb3SAlan Stern srcu_notifier_call_chain(&cpufreq_transition_notifier_list, 388e4472cb3SDave Jones CPUFREQ_POSTCHANGE, freqs); 389e4472cb3SDave Jones if (likely(policy) && likely(policy->cpu == freqs->cpu)) 390e4472cb3SDave Jones policy->cur = freqs->new; 3911da177e4SLinus Torvalds break; 3921da177e4SLinus Torvalds } 3931da177e4SLinus Torvalds } 394bb176f7dSViresh Kumar 395b43a7ffbSViresh Kumar /** 396b43a7ffbSViresh Kumar * cpufreq_notify_transition - call notifier chain and adjust_jiffies 397b43a7ffbSViresh Kumar * on frequency transition. 398b43a7ffbSViresh Kumar * 399b43a7ffbSViresh Kumar * This function calls the transition notifiers and the "adjust_jiffies" 400b43a7ffbSViresh Kumar * function. It is called twice on all CPU frequency changes that have 401b43a7ffbSViresh Kumar * external effects. 402b43a7ffbSViresh Kumar */ 403236a9800SViresh Kumar static void cpufreq_notify_transition(struct cpufreq_policy *policy, 404b43a7ffbSViresh Kumar struct cpufreq_freqs *freqs, unsigned int state) 405b43a7ffbSViresh Kumar { 406b43a7ffbSViresh Kumar for_each_cpu(freqs->cpu, policy->cpus) 407b43a7ffbSViresh Kumar __cpufreq_notify_transition(policy, freqs, state); 408b43a7ffbSViresh Kumar } 4091da177e4SLinus Torvalds 410f7ba3b41SViresh Kumar /* Do post notifications when there are chances that transition has failed */ 411236a9800SViresh Kumar static void cpufreq_notify_post_transition(struct cpufreq_policy *policy, 412f7ba3b41SViresh Kumar struct cpufreq_freqs *freqs, int transition_failed) 413f7ba3b41SViresh Kumar { 414f7ba3b41SViresh Kumar cpufreq_notify_transition(policy, freqs, CPUFREQ_POSTCHANGE); 415f7ba3b41SViresh Kumar if (!transition_failed) 416f7ba3b41SViresh Kumar return; 417f7ba3b41SViresh Kumar 418f7ba3b41SViresh Kumar swap(freqs->old, freqs->new); 419f7ba3b41SViresh Kumar cpufreq_notify_transition(policy, freqs, CPUFREQ_PRECHANGE); 420f7ba3b41SViresh Kumar cpufreq_notify_transition(policy, freqs, CPUFREQ_POSTCHANGE); 421f7ba3b41SViresh Kumar } 422f7ba3b41SViresh Kumar 42312478cf0SSrivatsa S. Bhat void cpufreq_freq_transition_begin(struct cpufreq_policy *policy, 42412478cf0SSrivatsa S. Bhat struct cpufreq_freqs *freqs) 42512478cf0SSrivatsa S. Bhat { 426ca654dc3SSrivatsa S. Bhat 427ca654dc3SSrivatsa S. Bhat /* 428ca654dc3SSrivatsa S. Bhat * Catch double invocations of _begin() which lead to self-deadlock. 429ca654dc3SSrivatsa S. Bhat * ASYNC_NOTIFICATION drivers are left out because the cpufreq core 430ca654dc3SSrivatsa S. Bhat * doesn't invoke _begin() on their behalf, and hence the chances of 431ca654dc3SSrivatsa S. Bhat * double invocations are very low. Moreover, there are scenarios 432ca654dc3SSrivatsa S. Bhat * where these checks can emit false-positive warnings in these 433ca654dc3SSrivatsa S. Bhat * drivers; so we avoid that by skipping them altogether. 434ca654dc3SSrivatsa S. Bhat */ 435ca654dc3SSrivatsa S. Bhat WARN_ON(!(cpufreq_driver->flags & CPUFREQ_ASYNC_NOTIFICATION) 436ca654dc3SSrivatsa S. Bhat && current == policy->transition_task); 437ca654dc3SSrivatsa S. Bhat 43812478cf0SSrivatsa S. Bhat wait: 43912478cf0SSrivatsa S. Bhat wait_event(policy->transition_wait, !policy->transition_ongoing); 44012478cf0SSrivatsa S. Bhat 44112478cf0SSrivatsa S. Bhat spin_lock(&policy->transition_lock); 44212478cf0SSrivatsa S. Bhat 44312478cf0SSrivatsa S. Bhat if (unlikely(policy->transition_ongoing)) { 44412478cf0SSrivatsa S. Bhat spin_unlock(&policy->transition_lock); 44512478cf0SSrivatsa S. Bhat goto wait; 44612478cf0SSrivatsa S. Bhat } 44712478cf0SSrivatsa S. Bhat 44812478cf0SSrivatsa S. Bhat policy->transition_ongoing = true; 449ca654dc3SSrivatsa S. Bhat policy->transition_task = current; 45012478cf0SSrivatsa S. Bhat 45112478cf0SSrivatsa S. Bhat spin_unlock(&policy->transition_lock); 45212478cf0SSrivatsa S. Bhat 45312478cf0SSrivatsa S. Bhat cpufreq_notify_transition(policy, freqs, CPUFREQ_PRECHANGE); 45412478cf0SSrivatsa S. Bhat } 45512478cf0SSrivatsa S. Bhat EXPORT_SYMBOL_GPL(cpufreq_freq_transition_begin); 45612478cf0SSrivatsa S. Bhat 45712478cf0SSrivatsa S. Bhat void cpufreq_freq_transition_end(struct cpufreq_policy *policy, 45812478cf0SSrivatsa S. Bhat struct cpufreq_freqs *freqs, int transition_failed) 45912478cf0SSrivatsa S. Bhat { 46012478cf0SSrivatsa S. Bhat if (unlikely(WARN_ON(!policy->transition_ongoing))) 46112478cf0SSrivatsa S. Bhat return; 46212478cf0SSrivatsa S. Bhat 46312478cf0SSrivatsa S. Bhat cpufreq_notify_post_transition(policy, freqs, transition_failed); 46412478cf0SSrivatsa S. Bhat 46512478cf0SSrivatsa S. Bhat policy->transition_ongoing = false; 466ca654dc3SSrivatsa S. Bhat policy->transition_task = NULL; 46712478cf0SSrivatsa S. Bhat 46812478cf0SSrivatsa S. Bhat wake_up(&policy->transition_wait); 46912478cf0SSrivatsa S. Bhat } 47012478cf0SSrivatsa S. Bhat EXPORT_SYMBOL_GPL(cpufreq_freq_transition_end); 47112478cf0SSrivatsa S. Bhat 4721da177e4SLinus Torvalds 4731da177e4SLinus Torvalds /********************************************************************* 4741da177e4SLinus Torvalds * SYSFS INTERFACE * 4751da177e4SLinus Torvalds *********************************************************************/ 4768a5c74a1SRashika Kheria static ssize_t show_boost(struct kobject *kobj, 4776f19efc0SLukasz Majewski struct attribute *attr, char *buf) 4786f19efc0SLukasz Majewski { 4796f19efc0SLukasz Majewski return sprintf(buf, "%d\n", cpufreq_driver->boost_enabled); 4806f19efc0SLukasz Majewski } 4816f19efc0SLukasz Majewski 4826f19efc0SLukasz Majewski static ssize_t store_boost(struct kobject *kobj, struct attribute *attr, 4836f19efc0SLukasz Majewski const char *buf, size_t count) 4846f19efc0SLukasz Majewski { 4856f19efc0SLukasz Majewski int ret, enable; 4866f19efc0SLukasz Majewski 4876f19efc0SLukasz Majewski ret = sscanf(buf, "%d", &enable); 4886f19efc0SLukasz Majewski if (ret != 1 || enable < 0 || enable > 1) 4896f19efc0SLukasz Majewski return -EINVAL; 4906f19efc0SLukasz Majewski 4916f19efc0SLukasz Majewski if (cpufreq_boost_trigger_state(enable)) { 492e837f9b5SJoe Perches pr_err("%s: Cannot %s BOOST!\n", 493e837f9b5SJoe Perches __func__, enable ? "enable" : "disable"); 4946f19efc0SLukasz Majewski return -EINVAL; 4956f19efc0SLukasz Majewski } 4966f19efc0SLukasz Majewski 497e837f9b5SJoe Perches pr_debug("%s: cpufreq BOOST %s\n", 498e837f9b5SJoe Perches __func__, enable ? "enabled" : "disabled"); 4996f19efc0SLukasz Majewski 5006f19efc0SLukasz Majewski return count; 5016f19efc0SLukasz Majewski } 5026f19efc0SLukasz Majewski define_one_global_rw(boost); 5031da177e4SLinus Torvalds 50442f91fa1SViresh Kumar static struct cpufreq_governor *find_governor(const char *str_governor) 5053bcb09a3SJeremy Fitzhardinge { 5063bcb09a3SJeremy Fitzhardinge struct cpufreq_governor *t; 5073bcb09a3SJeremy Fitzhardinge 508f7b27061SViresh Kumar for_each_governor(t) 5097c4f4539SRasmus Villemoes if (!strncasecmp(str_governor, t->name, CPUFREQ_NAME_LEN)) 5103bcb09a3SJeremy Fitzhardinge return t; 5113bcb09a3SJeremy Fitzhardinge 5123bcb09a3SJeremy Fitzhardinge return NULL; 5133bcb09a3SJeremy Fitzhardinge } 5143bcb09a3SJeremy Fitzhardinge 5151da177e4SLinus Torvalds /** 5161da177e4SLinus Torvalds * cpufreq_parse_governor - parse a governor string 5171da177e4SLinus Torvalds */ 5181da177e4SLinus Torvalds static int cpufreq_parse_governor(char *str_governor, unsigned int *policy, 5191da177e4SLinus Torvalds struct cpufreq_governor **governor) 5201da177e4SLinus Torvalds { 5213bcb09a3SJeremy Fitzhardinge int err = -EINVAL; 5223bcb09a3SJeremy Fitzhardinge 5231c3d85ddSRafael J. Wysocki if (!cpufreq_driver) 5243bcb09a3SJeremy Fitzhardinge goto out; 5253bcb09a3SJeremy Fitzhardinge 5261c3d85ddSRafael J. Wysocki if (cpufreq_driver->setpolicy) { 5277c4f4539SRasmus Villemoes if (!strncasecmp(str_governor, "performance", CPUFREQ_NAME_LEN)) { 5281da177e4SLinus Torvalds *policy = CPUFREQ_POLICY_PERFORMANCE; 5293bcb09a3SJeremy Fitzhardinge err = 0; 5307c4f4539SRasmus Villemoes } else if (!strncasecmp(str_governor, "powersave", 531e08f5f5bSGautham R Shenoy CPUFREQ_NAME_LEN)) { 5321da177e4SLinus Torvalds *policy = CPUFREQ_POLICY_POWERSAVE; 5333bcb09a3SJeremy Fitzhardinge err = 0; 5341da177e4SLinus Torvalds } 5352e1cc3a5SViresh Kumar } else { 5361da177e4SLinus Torvalds struct cpufreq_governor *t; 5373bcb09a3SJeremy Fitzhardinge 5383fc54d37Sakpm@osdl.org mutex_lock(&cpufreq_governor_mutex); 5393bcb09a3SJeremy Fitzhardinge 54042f91fa1SViresh Kumar t = find_governor(str_governor); 5413bcb09a3SJeremy Fitzhardinge 542ea714970SJeremy Fitzhardinge if (t == NULL) { 543ea714970SJeremy Fitzhardinge int ret; 544ea714970SJeremy Fitzhardinge 545ea714970SJeremy Fitzhardinge mutex_unlock(&cpufreq_governor_mutex); 5461a8e1463SKees Cook ret = request_module("cpufreq_%s", str_governor); 547ea714970SJeremy Fitzhardinge mutex_lock(&cpufreq_governor_mutex); 548ea714970SJeremy Fitzhardinge 549ea714970SJeremy Fitzhardinge if (ret == 0) 55042f91fa1SViresh Kumar t = find_governor(str_governor); 551ea714970SJeremy Fitzhardinge } 552ea714970SJeremy Fitzhardinge 5533bcb09a3SJeremy Fitzhardinge if (t != NULL) { 5541da177e4SLinus Torvalds *governor = t; 5553bcb09a3SJeremy Fitzhardinge err = 0; 5561da177e4SLinus Torvalds } 5573bcb09a3SJeremy Fitzhardinge 5583bcb09a3SJeremy Fitzhardinge mutex_unlock(&cpufreq_governor_mutex); 5591da177e4SLinus Torvalds } 5601da177e4SLinus Torvalds out: 5613bcb09a3SJeremy Fitzhardinge return err; 5621da177e4SLinus Torvalds } 5631da177e4SLinus Torvalds 5641da177e4SLinus Torvalds /** 565e08f5f5bSGautham R Shenoy * cpufreq_per_cpu_attr_read() / show_##file_name() - 566e08f5f5bSGautham R Shenoy * print out cpufreq information 5671da177e4SLinus Torvalds * 5681da177e4SLinus Torvalds * Write out information from cpufreq_driver->policy[cpu]; object must be 5691da177e4SLinus Torvalds * "unsigned int". 5701da177e4SLinus Torvalds */ 5711da177e4SLinus Torvalds 5721da177e4SLinus Torvalds #define show_one(file_name, object) \ 5731da177e4SLinus Torvalds static ssize_t show_##file_name \ 5741da177e4SLinus Torvalds (struct cpufreq_policy *policy, char *buf) \ 5751da177e4SLinus Torvalds { \ 5761da177e4SLinus Torvalds return sprintf(buf, "%u\n", policy->object); \ 5771da177e4SLinus Torvalds } 5781da177e4SLinus Torvalds 5791da177e4SLinus Torvalds show_one(cpuinfo_min_freq, cpuinfo.min_freq); 5801da177e4SLinus Torvalds show_one(cpuinfo_max_freq, cpuinfo.max_freq); 581ed129784SThomas Renninger show_one(cpuinfo_transition_latency, cpuinfo.transition_latency); 5821da177e4SLinus Torvalds show_one(scaling_min_freq, min); 5831da177e4SLinus Torvalds show_one(scaling_max_freq, max); 584c034b02eSDirk Brandewie 58509347b29SViresh Kumar static ssize_t show_scaling_cur_freq(struct cpufreq_policy *policy, char *buf) 586c034b02eSDirk Brandewie { 587c034b02eSDirk Brandewie ssize_t ret; 588c034b02eSDirk Brandewie 589c034b02eSDirk Brandewie if (cpufreq_driver && cpufreq_driver->setpolicy && cpufreq_driver->get) 590c034b02eSDirk Brandewie ret = sprintf(buf, "%u\n", cpufreq_driver->get(policy->cpu)); 591c034b02eSDirk Brandewie else 592c034b02eSDirk Brandewie ret = sprintf(buf, "%u\n", policy->cur); 593c034b02eSDirk Brandewie return ret; 594c034b02eSDirk Brandewie } 5951da177e4SLinus Torvalds 596037ce839SViresh Kumar static int cpufreq_set_policy(struct cpufreq_policy *policy, 5973a3e9e06SViresh Kumar struct cpufreq_policy *new_policy); 5987970e08bSThomas Renninger 5991da177e4SLinus Torvalds /** 6001da177e4SLinus Torvalds * cpufreq_per_cpu_attr_write() / store_##file_name() - sysfs write access 6011da177e4SLinus Torvalds */ 6021da177e4SLinus Torvalds #define store_one(file_name, object) \ 6031da177e4SLinus Torvalds static ssize_t store_##file_name \ 6041da177e4SLinus Torvalds (struct cpufreq_policy *policy, const char *buf, size_t count) \ 6051da177e4SLinus Torvalds { \ 606619c144cSVince Hsu int ret, temp; \ 6071da177e4SLinus Torvalds struct cpufreq_policy new_policy; \ 6081da177e4SLinus Torvalds \ 6091da177e4SLinus Torvalds ret = cpufreq_get_policy(&new_policy, policy->cpu); \ 6101da177e4SLinus Torvalds if (ret) \ 6111da177e4SLinus Torvalds return -EINVAL; \ 6121da177e4SLinus Torvalds \ 6131da177e4SLinus Torvalds ret = sscanf(buf, "%u", &new_policy.object); \ 6141da177e4SLinus Torvalds if (ret != 1) \ 6151da177e4SLinus Torvalds return -EINVAL; \ 6161da177e4SLinus Torvalds \ 617619c144cSVince Hsu temp = new_policy.object; \ 618037ce839SViresh Kumar ret = cpufreq_set_policy(policy, &new_policy); \ 619619c144cSVince Hsu if (!ret) \ 620619c144cSVince Hsu policy->user_policy.object = temp; \ 6211da177e4SLinus Torvalds \ 6221da177e4SLinus Torvalds return ret ? ret : count; \ 6231da177e4SLinus Torvalds } 6241da177e4SLinus Torvalds 6251da177e4SLinus Torvalds store_one(scaling_min_freq, min); 6261da177e4SLinus Torvalds store_one(scaling_max_freq, max); 6271da177e4SLinus Torvalds 6281da177e4SLinus Torvalds /** 6291da177e4SLinus Torvalds * show_cpuinfo_cur_freq - current CPU frequency as detected by hardware 6301da177e4SLinus Torvalds */ 631e08f5f5bSGautham R Shenoy static ssize_t show_cpuinfo_cur_freq(struct cpufreq_policy *policy, 632e08f5f5bSGautham R Shenoy char *buf) 6331da177e4SLinus Torvalds { 634d92d50a4SViresh Kumar unsigned int cur_freq = __cpufreq_get(policy); 6351da177e4SLinus Torvalds if (!cur_freq) 6361da177e4SLinus Torvalds return sprintf(buf, "<unknown>"); 6371da177e4SLinus Torvalds return sprintf(buf, "%u\n", cur_freq); 6381da177e4SLinus Torvalds } 6391da177e4SLinus Torvalds 6401da177e4SLinus Torvalds /** 6411da177e4SLinus Torvalds * show_scaling_governor - show the current policy for the specified CPU 6421da177e4SLinus Torvalds */ 643905d77cdSDave Jones static ssize_t show_scaling_governor(struct cpufreq_policy *policy, char *buf) 6441da177e4SLinus Torvalds { 6451da177e4SLinus Torvalds if (policy->policy == CPUFREQ_POLICY_POWERSAVE) 6461da177e4SLinus Torvalds return sprintf(buf, "powersave\n"); 6471da177e4SLinus Torvalds else if (policy->policy == CPUFREQ_POLICY_PERFORMANCE) 6481da177e4SLinus Torvalds return sprintf(buf, "performance\n"); 6491da177e4SLinus Torvalds else if (policy->governor) 6504b972f0bSviresh kumar return scnprintf(buf, CPUFREQ_NAME_PLEN, "%s\n", 65129464f28SDave Jones policy->governor->name); 6521da177e4SLinus Torvalds return -EINVAL; 6531da177e4SLinus Torvalds } 6541da177e4SLinus Torvalds 6551da177e4SLinus Torvalds /** 6561da177e4SLinus Torvalds * store_scaling_governor - store policy for the specified CPU 6571da177e4SLinus Torvalds */ 6581da177e4SLinus Torvalds static ssize_t store_scaling_governor(struct cpufreq_policy *policy, 6591da177e4SLinus Torvalds const char *buf, size_t count) 6601da177e4SLinus Torvalds { 6615136fa56SSrivatsa S. Bhat int ret; 6621da177e4SLinus Torvalds char str_governor[16]; 6631da177e4SLinus Torvalds struct cpufreq_policy new_policy; 6641da177e4SLinus Torvalds 6651da177e4SLinus Torvalds ret = cpufreq_get_policy(&new_policy, policy->cpu); 6661da177e4SLinus Torvalds if (ret) 6671da177e4SLinus Torvalds return ret; 6681da177e4SLinus Torvalds 6691da177e4SLinus Torvalds ret = sscanf(buf, "%15s", str_governor); 6701da177e4SLinus Torvalds if (ret != 1) 6711da177e4SLinus Torvalds return -EINVAL; 6721da177e4SLinus Torvalds 673e08f5f5bSGautham R Shenoy if (cpufreq_parse_governor(str_governor, &new_policy.policy, 674e08f5f5bSGautham R Shenoy &new_policy.governor)) 6751da177e4SLinus Torvalds return -EINVAL; 6761da177e4SLinus Torvalds 677037ce839SViresh Kumar ret = cpufreq_set_policy(policy, &new_policy); 6787970e08bSThomas Renninger 6797970e08bSThomas Renninger policy->user_policy.policy = policy->policy; 6807970e08bSThomas Renninger policy->user_policy.governor = policy->governor; 6817970e08bSThomas Renninger 682e08f5f5bSGautham R Shenoy if (ret) 683e08f5f5bSGautham R Shenoy return ret; 684e08f5f5bSGautham R Shenoy else 685e08f5f5bSGautham R Shenoy return count; 6861da177e4SLinus Torvalds } 6871da177e4SLinus Torvalds 6881da177e4SLinus Torvalds /** 6891da177e4SLinus Torvalds * show_scaling_driver - show the cpufreq driver currently loaded 6901da177e4SLinus Torvalds */ 6911da177e4SLinus Torvalds static ssize_t show_scaling_driver(struct cpufreq_policy *policy, char *buf) 6921da177e4SLinus Torvalds { 6931c3d85ddSRafael J. Wysocki return scnprintf(buf, CPUFREQ_NAME_PLEN, "%s\n", cpufreq_driver->name); 6941da177e4SLinus Torvalds } 6951da177e4SLinus Torvalds 6961da177e4SLinus Torvalds /** 6971da177e4SLinus Torvalds * show_scaling_available_governors - show the available CPUfreq governors 6981da177e4SLinus Torvalds */ 6991da177e4SLinus Torvalds static ssize_t show_scaling_available_governors(struct cpufreq_policy *policy, 7001da177e4SLinus Torvalds char *buf) 7011da177e4SLinus Torvalds { 7021da177e4SLinus Torvalds ssize_t i = 0; 7031da177e4SLinus Torvalds struct cpufreq_governor *t; 7041da177e4SLinus Torvalds 7059c0ebcf7SViresh Kumar if (!has_target()) { 7061da177e4SLinus Torvalds i += sprintf(buf, "performance powersave"); 7071da177e4SLinus Torvalds goto out; 7081da177e4SLinus Torvalds } 7091da177e4SLinus Torvalds 710f7b27061SViresh Kumar for_each_governor(t) { 71129464f28SDave Jones if (i >= (ssize_t) ((PAGE_SIZE / sizeof(char)) 71229464f28SDave Jones - (CPUFREQ_NAME_LEN + 2))) 7131da177e4SLinus Torvalds goto out; 7144b972f0bSviresh kumar i += scnprintf(&buf[i], CPUFREQ_NAME_PLEN, "%s ", t->name); 7151da177e4SLinus Torvalds } 7161da177e4SLinus Torvalds out: 7171da177e4SLinus Torvalds i += sprintf(&buf[i], "\n"); 7181da177e4SLinus Torvalds return i; 7191da177e4SLinus Torvalds } 720e8628dd0SDarrick J. Wong 721f4fd3797SLan Tianyu ssize_t cpufreq_show_cpus(const struct cpumask *mask, char *buf) 7221da177e4SLinus Torvalds { 7231da177e4SLinus Torvalds ssize_t i = 0; 7241da177e4SLinus Torvalds unsigned int cpu; 7251da177e4SLinus Torvalds 726835481d9SRusty Russell for_each_cpu(cpu, mask) { 7271da177e4SLinus Torvalds if (i) 7281da177e4SLinus Torvalds i += scnprintf(&buf[i], (PAGE_SIZE - i - 2), " "); 7291da177e4SLinus Torvalds i += scnprintf(&buf[i], (PAGE_SIZE - i - 2), "%u", cpu); 7301da177e4SLinus Torvalds if (i >= (PAGE_SIZE - 5)) 7311da177e4SLinus Torvalds break; 7321da177e4SLinus Torvalds } 7331da177e4SLinus Torvalds i += sprintf(&buf[i], "\n"); 7341da177e4SLinus Torvalds return i; 7351da177e4SLinus Torvalds } 736f4fd3797SLan Tianyu EXPORT_SYMBOL_GPL(cpufreq_show_cpus); 7371da177e4SLinus Torvalds 738e8628dd0SDarrick J. Wong /** 739e8628dd0SDarrick J. Wong * show_related_cpus - show the CPUs affected by each transition even if 740e8628dd0SDarrick J. Wong * hw coordination is in use 741e8628dd0SDarrick J. Wong */ 742e8628dd0SDarrick J. Wong static ssize_t show_related_cpus(struct cpufreq_policy *policy, char *buf) 743e8628dd0SDarrick J. Wong { 744f4fd3797SLan Tianyu return cpufreq_show_cpus(policy->related_cpus, buf); 745e8628dd0SDarrick J. Wong } 746e8628dd0SDarrick J. Wong 747e8628dd0SDarrick J. Wong /** 748e8628dd0SDarrick J. Wong * show_affected_cpus - show the CPUs affected by each transition 749e8628dd0SDarrick J. Wong */ 750e8628dd0SDarrick J. Wong static ssize_t show_affected_cpus(struct cpufreq_policy *policy, char *buf) 751e8628dd0SDarrick J. Wong { 752f4fd3797SLan Tianyu return cpufreq_show_cpus(policy->cpus, buf); 753e8628dd0SDarrick J. Wong } 754e8628dd0SDarrick J. Wong 7559e76988eSVenki Pallipadi static ssize_t store_scaling_setspeed(struct cpufreq_policy *policy, 7569e76988eSVenki Pallipadi const char *buf, size_t count) 7579e76988eSVenki Pallipadi { 7589e76988eSVenki Pallipadi unsigned int freq = 0; 7599e76988eSVenki Pallipadi unsigned int ret; 7609e76988eSVenki Pallipadi 761879000f9SCHIKAMA masaki if (!policy->governor || !policy->governor->store_setspeed) 7629e76988eSVenki Pallipadi return -EINVAL; 7639e76988eSVenki Pallipadi 7649e76988eSVenki Pallipadi ret = sscanf(buf, "%u", &freq); 7659e76988eSVenki Pallipadi if (ret != 1) 7669e76988eSVenki Pallipadi return -EINVAL; 7679e76988eSVenki Pallipadi 7689e76988eSVenki Pallipadi policy->governor->store_setspeed(policy, freq); 7699e76988eSVenki Pallipadi 7709e76988eSVenki Pallipadi return count; 7719e76988eSVenki Pallipadi } 7729e76988eSVenki Pallipadi 7739e76988eSVenki Pallipadi static ssize_t show_scaling_setspeed(struct cpufreq_policy *policy, char *buf) 7749e76988eSVenki Pallipadi { 775879000f9SCHIKAMA masaki if (!policy->governor || !policy->governor->show_setspeed) 7769e76988eSVenki Pallipadi return sprintf(buf, "<unsupported>\n"); 7779e76988eSVenki Pallipadi 7789e76988eSVenki Pallipadi return policy->governor->show_setspeed(policy, buf); 7799e76988eSVenki Pallipadi } 7801da177e4SLinus Torvalds 781e2f74f35SThomas Renninger /** 7828bf1ac72Sviresh kumar * show_bios_limit - show the current cpufreq HW/BIOS limitation 783e2f74f35SThomas Renninger */ 784e2f74f35SThomas Renninger static ssize_t show_bios_limit(struct cpufreq_policy *policy, char *buf) 785e2f74f35SThomas Renninger { 786e2f74f35SThomas Renninger unsigned int limit; 787e2f74f35SThomas Renninger int ret; 7881c3d85ddSRafael J. Wysocki if (cpufreq_driver->bios_limit) { 7891c3d85ddSRafael J. Wysocki ret = cpufreq_driver->bios_limit(policy->cpu, &limit); 790e2f74f35SThomas Renninger if (!ret) 791e2f74f35SThomas Renninger return sprintf(buf, "%u\n", limit); 792e2f74f35SThomas Renninger } 793e2f74f35SThomas Renninger return sprintf(buf, "%u\n", policy->cpuinfo.max_freq); 794e2f74f35SThomas Renninger } 795e2f74f35SThomas Renninger 7966dad2a29SBorislav Petkov cpufreq_freq_attr_ro_perm(cpuinfo_cur_freq, 0400); 7976dad2a29SBorislav Petkov cpufreq_freq_attr_ro(cpuinfo_min_freq); 7986dad2a29SBorislav Petkov cpufreq_freq_attr_ro(cpuinfo_max_freq); 7996dad2a29SBorislav Petkov cpufreq_freq_attr_ro(cpuinfo_transition_latency); 8006dad2a29SBorislav Petkov cpufreq_freq_attr_ro(scaling_available_governors); 8016dad2a29SBorislav Petkov cpufreq_freq_attr_ro(scaling_driver); 8026dad2a29SBorislav Petkov cpufreq_freq_attr_ro(scaling_cur_freq); 8036dad2a29SBorislav Petkov cpufreq_freq_attr_ro(bios_limit); 8046dad2a29SBorislav Petkov cpufreq_freq_attr_ro(related_cpus); 8056dad2a29SBorislav Petkov cpufreq_freq_attr_ro(affected_cpus); 8066dad2a29SBorislav Petkov cpufreq_freq_attr_rw(scaling_min_freq); 8076dad2a29SBorislav Petkov cpufreq_freq_attr_rw(scaling_max_freq); 8086dad2a29SBorislav Petkov cpufreq_freq_attr_rw(scaling_governor); 8096dad2a29SBorislav Petkov cpufreq_freq_attr_rw(scaling_setspeed); 8101da177e4SLinus Torvalds 8111da177e4SLinus Torvalds static struct attribute *default_attrs[] = { 8121da177e4SLinus Torvalds &cpuinfo_min_freq.attr, 8131da177e4SLinus Torvalds &cpuinfo_max_freq.attr, 814ed129784SThomas Renninger &cpuinfo_transition_latency.attr, 8151da177e4SLinus Torvalds &scaling_min_freq.attr, 8161da177e4SLinus Torvalds &scaling_max_freq.attr, 8171da177e4SLinus Torvalds &affected_cpus.attr, 818e8628dd0SDarrick J. Wong &related_cpus.attr, 8191da177e4SLinus Torvalds &scaling_governor.attr, 8201da177e4SLinus Torvalds &scaling_driver.attr, 8211da177e4SLinus Torvalds &scaling_available_governors.attr, 8229e76988eSVenki Pallipadi &scaling_setspeed.attr, 8231da177e4SLinus Torvalds NULL 8241da177e4SLinus Torvalds }; 8251da177e4SLinus Torvalds 8261da177e4SLinus Torvalds #define to_policy(k) container_of(k, struct cpufreq_policy, kobj) 8271da177e4SLinus Torvalds #define to_attr(a) container_of(a, struct freq_attr, attr) 8281da177e4SLinus Torvalds 8291da177e4SLinus Torvalds static ssize_t show(struct kobject *kobj, struct attribute *attr, char *buf) 8301da177e4SLinus Torvalds { 8311da177e4SLinus Torvalds struct cpufreq_policy *policy = to_policy(kobj); 8321da177e4SLinus Torvalds struct freq_attr *fattr = to_attr(attr); 8331b750e3bSViresh Kumar ssize_t ret; 8346eed9404SViresh Kumar 835ad7722daSviresh kumar down_read(&policy->rwsem); 8365a01f2e8SVenkatesh Pallipadi 837e08f5f5bSGautham R Shenoy if (fattr->show) 838e08f5f5bSGautham R Shenoy ret = fattr->show(policy, buf); 839e08f5f5bSGautham R Shenoy else 840e08f5f5bSGautham R Shenoy ret = -EIO; 841e08f5f5bSGautham R Shenoy 842ad7722daSviresh kumar up_read(&policy->rwsem); 8431b750e3bSViresh Kumar 8441da177e4SLinus Torvalds return ret; 8451da177e4SLinus Torvalds } 8461da177e4SLinus Torvalds 8471da177e4SLinus Torvalds static ssize_t store(struct kobject *kobj, struct attribute *attr, 8481da177e4SLinus Torvalds const char *buf, size_t count) 8491da177e4SLinus Torvalds { 8501da177e4SLinus Torvalds struct cpufreq_policy *policy = to_policy(kobj); 8511da177e4SLinus Torvalds struct freq_attr *fattr = to_attr(attr); 852a07530b4SDave Jones ssize_t ret = -EINVAL; 8536eed9404SViresh Kumar 8544f750c93SSrivatsa S. Bhat get_online_cpus(); 8554f750c93SSrivatsa S. Bhat 8564f750c93SSrivatsa S. Bhat if (!cpu_online(policy->cpu)) 8574f750c93SSrivatsa S. Bhat goto unlock; 8584f750c93SSrivatsa S. Bhat 859ad7722daSviresh kumar down_write(&policy->rwsem); 8605a01f2e8SVenkatesh Pallipadi 86111e584cfSViresh Kumar /* Updating inactive policies is invalid, so avoid doing that. */ 86211e584cfSViresh Kumar if (unlikely(policy_is_inactive(policy))) { 86311e584cfSViresh Kumar ret = -EBUSY; 86411e584cfSViresh Kumar goto unlock_policy_rwsem; 86511e584cfSViresh Kumar } 86611e584cfSViresh Kumar 867e08f5f5bSGautham R Shenoy if (fattr->store) 868e08f5f5bSGautham R Shenoy ret = fattr->store(policy, buf, count); 869e08f5f5bSGautham R Shenoy else 870e08f5f5bSGautham R Shenoy ret = -EIO; 871e08f5f5bSGautham R Shenoy 87211e584cfSViresh Kumar unlock_policy_rwsem: 873ad7722daSviresh kumar up_write(&policy->rwsem); 8744f750c93SSrivatsa S. Bhat unlock: 8754f750c93SSrivatsa S. Bhat put_online_cpus(); 8764f750c93SSrivatsa S. Bhat 8771da177e4SLinus Torvalds return ret; 8781da177e4SLinus Torvalds } 8791da177e4SLinus Torvalds 8801da177e4SLinus Torvalds static void cpufreq_sysfs_release(struct kobject *kobj) 8811da177e4SLinus Torvalds { 8821da177e4SLinus Torvalds struct cpufreq_policy *policy = to_policy(kobj); 8832d06d8c4SDominik Brodowski pr_debug("last reference is dropped\n"); 8841da177e4SLinus Torvalds complete(&policy->kobj_unregister); 8851da177e4SLinus Torvalds } 8861da177e4SLinus Torvalds 88752cf25d0SEmese Revfy static const struct sysfs_ops sysfs_ops = { 8881da177e4SLinus Torvalds .show = show, 8891da177e4SLinus Torvalds .store = store, 8901da177e4SLinus Torvalds }; 8911da177e4SLinus Torvalds 8921da177e4SLinus Torvalds static struct kobj_type ktype_cpufreq = { 8931da177e4SLinus Torvalds .sysfs_ops = &sysfs_ops, 8941da177e4SLinus Torvalds .default_attrs = default_attrs, 8951da177e4SLinus Torvalds .release = cpufreq_sysfs_release, 8961da177e4SLinus Torvalds }; 8971da177e4SLinus Torvalds 8982361be23SViresh Kumar struct kobject *cpufreq_global_kobject; 8992361be23SViresh Kumar EXPORT_SYMBOL(cpufreq_global_kobject); 9002361be23SViresh Kumar 9012361be23SViresh Kumar static int cpufreq_global_kobject_usage; 9022361be23SViresh Kumar 9032361be23SViresh Kumar int cpufreq_get_global_kobject(void) 9042361be23SViresh Kumar { 9052361be23SViresh Kumar if (!cpufreq_global_kobject_usage++) 9062361be23SViresh Kumar return kobject_add(cpufreq_global_kobject, 9072361be23SViresh Kumar &cpu_subsys.dev_root->kobj, "%s", "cpufreq"); 9082361be23SViresh Kumar 9092361be23SViresh Kumar return 0; 9102361be23SViresh Kumar } 9112361be23SViresh Kumar EXPORT_SYMBOL(cpufreq_get_global_kobject); 9122361be23SViresh Kumar 9132361be23SViresh Kumar void cpufreq_put_global_kobject(void) 9142361be23SViresh Kumar { 9152361be23SViresh Kumar if (!--cpufreq_global_kobject_usage) 9162361be23SViresh Kumar kobject_del(cpufreq_global_kobject); 9172361be23SViresh Kumar } 9182361be23SViresh Kumar EXPORT_SYMBOL(cpufreq_put_global_kobject); 9192361be23SViresh Kumar 9202361be23SViresh Kumar int cpufreq_sysfs_create_file(const struct attribute *attr) 9212361be23SViresh Kumar { 9222361be23SViresh Kumar int ret = cpufreq_get_global_kobject(); 9232361be23SViresh Kumar 9242361be23SViresh Kumar if (!ret) { 9252361be23SViresh Kumar ret = sysfs_create_file(cpufreq_global_kobject, attr); 9262361be23SViresh Kumar if (ret) 9272361be23SViresh Kumar cpufreq_put_global_kobject(); 9282361be23SViresh Kumar } 9292361be23SViresh Kumar 9302361be23SViresh Kumar return ret; 9312361be23SViresh Kumar } 9322361be23SViresh Kumar EXPORT_SYMBOL(cpufreq_sysfs_create_file); 9332361be23SViresh Kumar 9342361be23SViresh Kumar void cpufreq_sysfs_remove_file(const struct attribute *attr) 9352361be23SViresh Kumar { 9362361be23SViresh Kumar sysfs_remove_file(cpufreq_global_kobject, attr); 9372361be23SViresh Kumar cpufreq_put_global_kobject(); 9382361be23SViresh Kumar } 9392361be23SViresh Kumar EXPORT_SYMBOL(cpufreq_sysfs_remove_file); 9402361be23SViresh Kumar 94187549141SViresh Kumar static int add_cpu_dev_symlink(struct cpufreq_policy *policy, int cpu) 94287549141SViresh Kumar { 94387549141SViresh Kumar struct device *cpu_dev; 94487549141SViresh Kumar 94587549141SViresh Kumar pr_debug("%s: Adding symlink for CPU: %u\n", __func__, cpu); 94687549141SViresh Kumar 94787549141SViresh Kumar if (!policy) 94887549141SViresh Kumar return 0; 94987549141SViresh Kumar 95087549141SViresh Kumar cpu_dev = get_cpu_device(cpu); 95187549141SViresh Kumar if (WARN_ON(!cpu_dev)) 95287549141SViresh Kumar return 0; 95387549141SViresh Kumar 95487549141SViresh Kumar return sysfs_create_link(&cpu_dev->kobj, &policy->kobj, "cpufreq"); 95587549141SViresh Kumar } 95687549141SViresh Kumar 95787549141SViresh Kumar static void remove_cpu_dev_symlink(struct cpufreq_policy *policy, int cpu) 95887549141SViresh Kumar { 95987549141SViresh Kumar struct device *cpu_dev; 96087549141SViresh Kumar 96187549141SViresh Kumar pr_debug("%s: Removing symlink for CPU: %u\n", __func__, cpu); 96287549141SViresh Kumar 96387549141SViresh Kumar cpu_dev = get_cpu_device(cpu); 96487549141SViresh Kumar if (WARN_ON(!cpu_dev)) 96587549141SViresh Kumar return; 96687549141SViresh Kumar 96787549141SViresh Kumar sysfs_remove_link(&cpu_dev->kobj, "cpufreq"); 96887549141SViresh Kumar } 96987549141SViresh Kumar 97087549141SViresh Kumar /* Add/remove symlinks for all related CPUs */ 971308b60e7SViresh Kumar static int cpufreq_add_dev_symlink(struct cpufreq_policy *policy) 97219d6f7ecSDave Jones { 97319d6f7ecSDave Jones unsigned int j; 97419d6f7ecSDave Jones int ret = 0; 97519d6f7ecSDave Jones 97687549141SViresh Kumar /* Some related CPUs might not be present (physically hotplugged) */ 97787549141SViresh Kumar for_each_cpu_and(j, policy->related_cpus, cpu_present_mask) { 9789d16f207SSaravana Kannan if (j == policy->kobj_cpu) 97919d6f7ecSDave Jones continue; 98019d6f7ecSDave Jones 98187549141SViresh Kumar ret = add_cpu_dev_symlink(policy, j); 98271c3461eSRafael J. Wysocki if (ret) 98371c3461eSRafael J. Wysocki break; 98419d6f7ecSDave Jones } 98587549141SViresh Kumar 98619d6f7ecSDave Jones return ret; 98719d6f7ecSDave Jones } 98819d6f7ecSDave Jones 98987549141SViresh Kumar static void cpufreq_remove_dev_symlink(struct cpufreq_policy *policy) 99087549141SViresh Kumar { 99187549141SViresh Kumar unsigned int j; 99287549141SViresh Kumar 99387549141SViresh Kumar /* Some related CPUs might not be present (physically hotplugged) */ 99487549141SViresh Kumar for_each_cpu_and(j, policy->related_cpus, cpu_present_mask) { 99587549141SViresh Kumar if (j == policy->kobj_cpu) 99687549141SViresh Kumar continue; 99787549141SViresh Kumar 99887549141SViresh Kumar remove_cpu_dev_symlink(policy, j); 99987549141SViresh Kumar } 100087549141SViresh Kumar } 100187549141SViresh Kumar 1002308b60e7SViresh Kumar static int cpufreq_add_dev_interface(struct cpufreq_policy *policy, 10038a25a2fdSKay Sievers struct device *dev) 1004909a694eSDave Jones { 1005909a694eSDave Jones struct freq_attr **drv_attr; 1006909a694eSDave Jones int ret = 0; 1007909a694eSDave Jones 1008909a694eSDave Jones /* set up files for this cpu device */ 10091c3d85ddSRafael J. Wysocki drv_attr = cpufreq_driver->attr; 1010f13f1184SViresh Kumar while (drv_attr && *drv_attr) { 1011909a694eSDave Jones ret = sysfs_create_file(&policy->kobj, &((*drv_attr)->attr)); 1012909a694eSDave Jones if (ret) 10136d4e81edSTomeu Vizoso return ret; 1014909a694eSDave Jones drv_attr++; 1015909a694eSDave Jones } 10161c3d85ddSRafael J. Wysocki if (cpufreq_driver->get) { 1017909a694eSDave Jones ret = sysfs_create_file(&policy->kobj, &cpuinfo_cur_freq.attr); 1018909a694eSDave Jones if (ret) 10196d4e81edSTomeu Vizoso return ret; 1020909a694eSDave Jones } 1021c034b02eSDirk Brandewie 1022909a694eSDave Jones ret = sysfs_create_file(&policy->kobj, &scaling_cur_freq.attr); 1023909a694eSDave Jones if (ret) 10246d4e81edSTomeu Vizoso return ret; 1025c034b02eSDirk Brandewie 10261c3d85ddSRafael J. Wysocki if (cpufreq_driver->bios_limit) { 1027e2f74f35SThomas Renninger ret = sysfs_create_file(&policy->kobj, &bios_limit.attr); 1028e2f74f35SThomas Renninger if (ret) 10296d4e81edSTomeu Vizoso return ret; 1030e2f74f35SThomas Renninger } 1031909a694eSDave Jones 10326d4e81edSTomeu Vizoso return cpufreq_add_dev_symlink(policy); 1033e18f1682SSrivatsa S. Bhat } 1034e18f1682SSrivatsa S. Bhat 10357f0fa40fSViresh Kumar static int cpufreq_init_policy(struct cpufreq_policy *policy) 1036e18f1682SSrivatsa S. Bhat { 10376e2c89d1Sviresh kumar struct cpufreq_governor *gov = NULL; 1038e18f1682SSrivatsa S. Bhat struct cpufreq_policy new_policy; 1039e18f1682SSrivatsa S. Bhat 1040d5b73cd8SViresh Kumar memcpy(&new_policy, policy, sizeof(*policy)); 1041a27a9ab7SJason Baron 10426e2c89d1Sviresh kumar /* Update governor of new_policy to the governor used before hotplug */ 10434573237bSViresh Kumar gov = find_governor(policy->last_governor); 10446e2c89d1Sviresh kumar if (gov) 10456e2c89d1Sviresh kumar pr_debug("Restoring governor %s for cpu %d\n", 10466e2c89d1Sviresh kumar policy->governor->name, policy->cpu); 10476e2c89d1Sviresh kumar else 10486e2c89d1Sviresh kumar gov = CPUFREQ_DEFAULT_GOVERNOR; 10496e2c89d1Sviresh kumar 10506e2c89d1Sviresh kumar new_policy.governor = gov; 10516e2c89d1Sviresh kumar 1052a27a9ab7SJason Baron /* Use the default policy if its valid. */ 1053a27a9ab7SJason Baron if (cpufreq_driver->setpolicy) 10546e2c89d1Sviresh kumar cpufreq_parse_governor(gov->name, &new_policy.policy, NULL); 1055ecf7e461SDave Jones 1056ecf7e461SDave Jones /* set default policy */ 10577f0fa40fSViresh Kumar return cpufreq_set_policy(policy, &new_policy); 1058909a694eSDave Jones } 1059909a694eSDave Jones 1060d8d3b471SViresh Kumar static int cpufreq_add_policy_cpu(struct cpufreq_policy *policy, 106142f921a6SViresh Kumar unsigned int cpu, struct device *dev) 1062fcf80582SViresh Kumar { 10639c0ebcf7SViresh Kumar int ret = 0; 1064fcf80582SViresh Kumar 1065bb29ae15SViresh Kumar /* Has this CPU been taken care of already? */ 1066bb29ae15SViresh Kumar if (cpumask_test_cpu(cpu, policy->cpus)) 1067bb29ae15SViresh Kumar return 0; 1068bb29ae15SViresh Kumar 10699c0ebcf7SViresh Kumar if (has_target()) { 10703de9bdebSViresh Kumar ret = __cpufreq_governor(policy, CPUFREQ_GOV_STOP); 10713de9bdebSViresh Kumar if (ret) { 10723de9bdebSViresh Kumar pr_err("%s: Failed to stop governor\n", __func__); 10733de9bdebSViresh Kumar return ret; 10743de9bdebSViresh Kumar } 10753de9bdebSViresh Kumar } 1076fcf80582SViresh Kumar 1077ad7722daSviresh kumar down_write(&policy->rwsem); 1078fcf80582SViresh Kumar cpumask_set_cpu(cpu, policy->cpus); 1079ad7722daSviresh kumar up_write(&policy->rwsem); 10802eaa3e2dSViresh Kumar 10819c0ebcf7SViresh Kumar if (has_target()) { 1082e5c87b76SStratos Karafotis ret = __cpufreq_governor(policy, CPUFREQ_GOV_START); 1083e5c87b76SStratos Karafotis if (!ret) 1084e5c87b76SStratos Karafotis ret = __cpufreq_governor(policy, CPUFREQ_GOV_LIMITS); 1085e5c87b76SStratos Karafotis 1086e5c87b76SStratos Karafotis if (ret) { 10873de9bdebSViresh Kumar pr_err("%s: Failed to start governor\n", __func__); 10883de9bdebSViresh Kumar return ret; 10893de9bdebSViresh Kumar } 1090820c6ca2SViresh Kumar } 1091fcf80582SViresh Kumar 109287549141SViresh Kumar return 0; 1093fcf80582SViresh Kumar } 10941da177e4SLinus Torvalds 10958414809cSSrivatsa S. Bhat static struct cpufreq_policy *cpufreq_policy_restore(unsigned int cpu) 10968414809cSSrivatsa S. Bhat { 10978414809cSSrivatsa S. Bhat struct cpufreq_policy *policy; 10988414809cSSrivatsa S. Bhat unsigned long flags; 10998414809cSSrivatsa S. Bhat 110044871c9cSLan Tianyu read_lock_irqsave(&cpufreq_driver_lock, flags); 11013914d379SViresh Kumar policy = per_cpu(cpufreq_cpu_data, cpu); 110244871c9cSLan Tianyu read_unlock_irqrestore(&cpufreq_driver_lock, flags); 11038414809cSSrivatsa S. Bhat 11043914d379SViresh Kumar if (likely(policy)) { 11053914d379SViresh Kumar /* Policy should be inactive here */ 11063914d379SViresh Kumar WARN_ON(!policy_is_inactive(policy)); 110737829029SViresh Kumar 110837829029SViresh Kumar down_write(&policy->rwsem); 110937829029SViresh Kumar policy->cpu = cpu; 111035afd02eSViresh Kumar policy->governor = NULL; 111137829029SViresh Kumar up_write(&policy->rwsem); 11123914d379SViresh Kumar } 11136e2c89d1Sviresh kumar 11148414809cSSrivatsa S. Bhat return policy; 11158414809cSSrivatsa S. Bhat } 11168414809cSSrivatsa S. Bhat 11172fc3384dSViresh Kumar static struct cpufreq_policy *cpufreq_policy_alloc(struct device *dev) 1118e9698cc5SSrivatsa S. Bhat { 1119e9698cc5SSrivatsa S. Bhat struct cpufreq_policy *policy; 11202fc3384dSViresh Kumar int ret; 1121e9698cc5SSrivatsa S. Bhat 1122e9698cc5SSrivatsa S. Bhat policy = kzalloc(sizeof(*policy), GFP_KERNEL); 1123e9698cc5SSrivatsa S. Bhat if (!policy) 1124e9698cc5SSrivatsa S. Bhat return NULL; 1125e9698cc5SSrivatsa S. Bhat 1126e9698cc5SSrivatsa S. Bhat if (!alloc_cpumask_var(&policy->cpus, GFP_KERNEL)) 1127e9698cc5SSrivatsa S. Bhat goto err_free_policy; 1128e9698cc5SSrivatsa S. Bhat 1129e9698cc5SSrivatsa S. Bhat if (!zalloc_cpumask_var(&policy->related_cpus, GFP_KERNEL)) 1130e9698cc5SSrivatsa S. Bhat goto err_free_cpumask; 1131e9698cc5SSrivatsa S. Bhat 11322fc3384dSViresh Kumar ret = kobject_init_and_add(&policy->kobj, &ktype_cpufreq, &dev->kobj, 11332fc3384dSViresh Kumar "cpufreq"); 11342fc3384dSViresh Kumar if (ret) { 11352fc3384dSViresh Kumar pr_err("%s: failed to init policy->kobj: %d\n", __func__, ret); 11362fc3384dSViresh Kumar goto err_free_rcpumask; 11372fc3384dSViresh Kumar } 11382fc3384dSViresh Kumar 1139c88a1f8bSLukasz Majewski INIT_LIST_HEAD(&policy->policy_list); 1140ad7722daSviresh kumar init_rwsem(&policy->rwsem); 114112478cf0SSrivatsa S. Bhat spin_lock_init(&policy->transition_lock); 114212478cf0SSrivatsa S. Bhat init_waitqueue_head(&policy->transition_wait); 1143818c5712SViresh Kumar init_completion(&policy->kobj_unregister); 1144818c5712SViresh Kumar INIT_WORK(&policy->update, handle_update); 1145ad7722daSviresh kumar 11462fc3384dSViresh Kumar policy->cpu = dev->id; 114787549141SViresh Kumar 114887549141SViresh Kumar /* Set this once on allocation */ 11492fc3384dSViresh Kumar policy->kobj_cpu = dev->id; 115087549141SViresh Kumar 1151e9698cc5SSrivatsa S. Bhat return policy; 1152e9698cc5SSrivatsa S. Bhat 11532fc3384dSViresh Kumar err_free_rcpumask: 11542fc3384dSViresh Kumar free_cpumask_var(policy->related_cpus); 1155e9698cc5SSrivatsa S. Bhat err_free_cpumask: 1156e9698cc5SSrivatsa S. Bhat free_cpumask_var(policy->cpus); 1157e9698cc5SSrivatsa S. Bhat err_free_policy: 1158e9698cc5SSrivatsa S. Bhat kfree(policy); 1159e9698cc5SSrivatsa S. Bhat 1160e9698cc5SSrivatsa S. Bhat return NULL; 1161e9698cc5SSrivatsa S. Bhat } 1162e9698cc5SSrivatsa S. Bhat 11632fc3384dSViresh Kumar static void cpufreq_policy_put_kobj(struct cpufreq_policy *policy, bool notify) 116442f921a6SViresh Kumar { 116542f921a6SViresh Kumar struct kobject *kobj; 116642f921a6SViresh Kumar struct completion *cmp; 116742f921a6SViresh Kumar 11682fc3384dSViresh Kumar if (notify) 1169fcd7af91SViresh Kumar blocking_notifier_call_chain(&cpufreq_policy_notifier_list, 1170fcd7af91SViresh Kumar CPUFREQ_REMOVE_POLICY, policy); 1171fcd7af91SViresh Kumar 117287549141SViresh Kumar down_write(&policy->rwsem); 117387549141SViresh Kumar cpufreq_remove_dev_symlink(policy); 117442f921a6SViresh Kumar kobj = &policy->kobj; 117542f921a6SViresh Kumar cmp = &policy->kobj_unregister; 117687549141SViresh Kumar up_write(&policy->rwsem); 117742f921a6SViresh Kumar kobject_put(kobj); 117842f921a6SViresh Kumar 117942f921a6SViresh Kumar /* 118042f921a6SViresh Kumar * We need to make sure that the underlying kobj is 118142f921a6SViresh Kumar * actually not referenced anymore by anybody before we 118242f921a6SViresh Kumar * proceed with unloading. 118342f921a6SViresh Kumar */ 118442f921a6SViresh Kumar pr_debug("waiting for dropping of refcount\n"); 118542f921a6SViresh Kumar wait_for_completion(cmp); 118642f921a6SViresh Kumar pr_debug("wait complete\n"); 118742f921a6SViresh Kumar } 118842f921a6SViresh Kumar 11893654c5ccSViresh Kumar static void cpufreq_policy_free(struct cpufreq_policy *policy, bool notify) 1190e9698cc5SSrivatsa S. Bhat { 1191988bed09SViresh Kumar unsigned long flags; 1192988bed09SViresh Kumar int cpu; 1193988bed09SViresh Kumar 1194988bed09SViresh Kumar /* Remove policy from list */ 1195988bed09SViresh Kumar write_lock_irqsave(&cpufreq_driver_lock, flags); 1196988bed09SViresh Kumar list_del(&policy->policy_list); 1197988bed09SViresh Kumar 1198988bed09SViresh Kumar for_each_cpu(cpu, policy->related_cpus) 1199988bed09SViresh Kumar per_cpu(cpufreq_cpu_data, cpu) = NULL; 1200988bed09SViresh Kumar write_unlock_irqrestore(&cpufreq_driver_lock, flags); 1201988bed09SViresh Kumar 12023654c5ccSViresh Kumar cpufreq_policy_put_kobj(policy, notify); 1203e9698cc5SSrivatsa S. Bhat free_cpumask_var(policy->related_cpus); 1204e9698cc5SSrivatsa S. Bhat free_cpumask_var(policy->cpus); 1205e9698cc5SSrivatsa S. Bhat kfree(policy); 1206e9698cc5SSrivatsa S. Bhat } 1207e9698cc5SSrivatsa S. Bhat 120823faf0b7SViresh Kumar /** 120923faf0b7SViresh Kumar * cpufreq_add_dev - add a CPU device 121023faf0b7SViresh Kumar * 121123faf0b7SViresh Kumar * Adds the cpufreq interface for a CPU device. 121223faf0b7SViresh Kumar * 121323faf0b7SViresh Kumar * The Oracle says: try running cpufreq registration/unregistration concurrently 121423faf0b7SViresh Kumar * with with cpu hotplugging and all hell will break loose. Tried to clean this 121523faf0b7SViresh Kumar * mess up, but more thorough testing is needed. - Mathieu 121623faf0b7SViresh Kumar */ 121723faf0b7SViresh Kumar static int cpufreq_add_dev(struct device *dev, struct subsys_interface *sif) 12181da177e4SLinus Torvalds { 1219fcf80582SViresh Kumar unsigned int j, cpu = dev->id; 122065922465SViresh Kumar int ret = -ENOMEM; 12217f0c020aSViresh Kumar struct cpufreq_policy *policy; 12221da177e4SLinus Torvalds unsigned long flags; 122387549141SViresh Kumar bool recover_policy = !sif; 1224c32b6b8eSAshok Raj 12252d06d8c4SDominik Brodowski pr_debug("adding CPU %u\n", cpu); 12261da177e4SLinus Torvalds 122787549141SViresh Kumar /* 122887549141SViresh Kumar * Only possible if 'cpu' wasn't physically present earlier and we are 122987549141SViresh Kumar * here from subsys_interface add callback. A hotplug notifier will 123087549141SViresh Kumar * follow and we will handle it like logical CPU hotplug then. For now, 123187549141SViresh Kumar * just create the sysfs link. 123287549141SViresh Kumar */ 123387549141SViresh Kumar if (cpu_is_offline(cpu)) 123487549141SViresh Kumar return add_cpu_dev_symlink(per_cpu(cpufreq_cpu_data, cpu), cpu); 123587549141SViresh Kumar 1236bb29ae15SViresh Kumar /* Check if this CPU already has a policy to manage it */ 12379104bb26SViresh Kumar policy = per_cpu(cpufreq_cpu_data, cpu); 12389104bb26SViresh Kumar if (policy && !policy_is_inactive(policy)) { 12399104bb26SViresh Kumar WARN_ON(!cpumask_test_cpu(cpu, policy->related_cpus)); 12407f0c020aSViresh Kumar ret = cpufreq_add_policy_cpu(policy, cpu, dev); 12416eed9404SViresh Kumar return ret; 1242fcf80582SViresh Kumar } 12431da177e4SLinus Torvalds 124472368d12SRafael J. Wysocki /* 124572368d12SRafael J. Wysocki * Restore the saved policy when doing light-weight init and fall back 124672368d12SRafael J. Wysocki * to the full init if that fails. 124772368d12SRafael J. Wysocki */ 124896bbbe4aSViresh Kumar policy = recover_policy ? cpufreq_policy_restore(cpu) : NULL; 124972368d12SRafael J. Wysocki if (!policy) { 125096bbbe4aSViresh Kumar recover_policy = false; 12512fc3384dSViresh Kumar policy = cpufreq_policy_alloc(dev); 1252059019a3SDave Jones if (!policy) 12538101f997SViresh Kumar goto out_release_rwsem; 125472368d12SRafael J. Wysocki } 12550d66b91eSSrivatsa S. Bhat 1256835481d9SRusty Russell cpumask_copy(policy->cpus, cpumask_of(cpu)); 12571da177e4SLinus Torvalds 12581da177e4SLinus Torvalds /* call driver. From then on the cpufreq must be able 12591da177e4SLinus Torvalds * to accept all calls to ->verify and ->setpolicy for this CPU 12601da177e4SLinus Torvalds */ 12611c3d85ddSRafael J. Wysocki ret = cpufreq_driver->init(policy); 12621da177e4SLinus Torvalds if (ret) { 12632d06d8c4SDominik Brodowski pr_debug("initialization failed\n"); 12648101f997SViresh Kumar goto out_free_policy; 12651da177e4SLinus Torvalds } 1266643ae6e8SViresh Kumar 12676d4e81edSTomeu Vizoso down_write(&policy->rwsem); 12686d4e81edSTomeu Vizoso 12695a7e56a5SViresh Kumar /* related cpus should atleast have policy->cpus */ 12705a7e56a5SViresh Kumar cpumask_or(policy->related_cpus, policy->related_cpus, policy->cpus); 12715a7e56a5SViresh Kumar 12725a7e56a5SViresh Kumar /* 12735a7e56a5SViresh Kumar * affected cpus must always be the one, which are online. We aren't 12745a7e56a5SViresh Kumar * managing offline cpus here. 12755a7e56a5SViresh Kumar */ 12765a7e56a5SViresh Kumar cpumask_and(policy->cpus, policy->cpus, cpu_online_mask); 12775a7e56a5SViresh Kumar 127896bbbe4aSViresh Kumar if (!recover_policy) { 12795a7e56a5SViresh Kumar policy->user_policy.min = policy->min; 12805a7e56a5SViresh Kumar policy->user_policy.max = policy->max; 12816d4e81edSTomeu Vizoso 1282652ed95dSViresh Kumar write_lock_irqsave(&cpufreq_driver_lock, flags); 1283988bed09SViresh Kumar for_each_cpu(j, policy->related_cpus) 1284652ed95dSViresh Kumar per_cpu(cpufreq_cpu_data, j) = policy; 1285652ed95dSViresh Kumar write_unlock_irqrestore(&cpufreq_driver_lock, flags); 1286988bed09SViresh Kumar } 1287652ed95dSViresh Kumar 12882ed99e39SRafael J. Wysocki if (cpufreq_driver->get && !cpufreq_driver->setpolicy) { 1289da60ce9fSViresh Kumar policy->cur = cpufreq_driver->get(policy->cpu); 1290da60ce9fSViresh Kumar if (!policy->cur) { 1291da60ce9fSViresh Kumar pr_err("%s: ->get() failed\n", __func__); 12928101f997SViresh Kumar goto out_exit_policy; 1293da60ce9fSViresh Kumar } 1294da60ce9fSViresh Kumar } 1295da60ce9fSViresh Kumar 1296d3916691SViresh Kumar /* 1297d3916691SViresh Kumar * Sometimes boot loaders set CPU frequency to a value outside of 1298d3916691SViresh Kumar * frequency table present with cpufreq core. In such cases CPU might be 1299d3916691SViresh Kumar * unstable if it has to run on that frequency for long duration of time 1300d3916691SViresh Kumar * and so its better to set it to a frequency which is specified in 1301d3916691SViresh Kumar * freq-table. This also makes cpufreq stats inconsistent as 1302d3916691SViresh Kumar * cpufreq-stats would fail to register because current frequency of CPU 1303d3916691SViresh Kumar * isn't found in freq-table. 1304d3916691SViresh Kumar * 1305d3916691SViresh Kumar * Because we don't want this change to effect boot process badly, we go 1306d3916691SViresh Kumar * for the next freq which is >= policy->cur ('cur' must be set by now, 1307d3916691SViresh Kumar * otherwise we will end up setting freq to lowest of the table as 'cur' 1308d3916691SViresh Kumar * is initialized to zero). 1309d3916691SViresh Kumar * 1310d3916691SViresh Kumar * We are passing target-freq as "policy->cur - 1" otherwise 1311d3916691SViresh Kumar * __cpufreq_driver_target() would simply fail, as policy->cur will be 1312d3916691SViresh Kumar * equal to target-freq. 1313d3916691SViresh Kumar */ 1314d3916691SViresh Kumar if ((cpufreq_driver->flags & CPUFREQ_NEED_INITIAL_FREQ_CHECK) 1315d3916691SViresh Kumar && has_target()) { 1316d3916691SViresh Kumar /* Are we running at unknown frequency ? */ 1317d3916691SViresh Kumar ret = cpufreq_frequency_table_get_index(policy, policy->cur); 1318d3916691SViresh Kumar if (ret == -EINVAL) { 1319d3916691SViresh Kumar /* Warn user and fix it */ 1320d3916691SViresh Kumar pr_warn("%s: CPU%d: Running at unlisted freq: %u KHz\n", 1321d3916691SViresh Kumar __func__, policy->cpu, policy->cur); 1322d3916691SViresh Kumar ret = __cpufreq_driver_target(policy, policy->cur - 1, 1323d3916691SViresh Kumar CPUFREQ_RELATION_L); 1324d3916691SViresh Kumar 1325d3916691SViresh Kumar /* 1326d3916691SViresh Kumar * Reaching here after boot in a few seconds may not 1327d3916691SViresh Kumar * mean that system will remain stable at "unknown" 1328d3916691SViresh Kumar * frequency for longer duration. Hence, a BUG_ON(). 1329d3916691SViresh Kumar */ 1330d3916691SViresh Kumar BUG_ON(ret); 1331d3916691SViresh Kumar pr_warn("%s: CPU%d: Unlisted initial frequency changed to: %u KHz\n", 1332d3916691SViresh Kumar __func__, policy->cpu, policy->cur); 1333d3916691SViresh Kumar } 1334d3916691SViresh Kumar } 1335d3916691SViresh Kumar 1336a1531acdSThomas Renninger blocking_notifier_call_chain(&cpufreq_policy_notifier_list, 1337a1531acdSThomas Renninger CPUFREQ_START, policy); 1338a1531acdSThomas Renninger 133996bbbe4aSViresh Kumar if (!recover_policy) { 1340308b60e7SViresh Kumar ret = cpufreq_add_dev_interface(policy, dev); 134119d6f7ecSDave Jones if (ret) 13428101f997SViresh Kumar goto out_exit_policy; 1343fcd7af91SViresh Kumar blocking_notifier_call_chain(&cpufreq_policy_notifier_list, 1344fcd7af91SViresh Kumar CPUFREQ_CREATE_POLICY, policy); 1345c88a1f8bSLukasz Majewski 1346c88a1f8bSLukasz Majewski write_lock_irqsave(&cpufreq_driver_lock, flags); 1347c88a1f8bSLukasz Majewski list_add(&policy->policy_list, &cpufreq_policy_list); 1348c88a1f8bSLukasz Majewski write_unlock_irqrestore(&cpufreq_driver_lock, flags); 1349988bed09SViresh Kumar } 13508ff69732SDave Jones 13517f0fa40fSViresh Kumar ret = cpufreq_init_policy(policy); 13527f0fa40fSViresh Kumar if (ret) { 13537f0fa40fSViresh Kumar pr_err("%s: Failed to initialize policy for cpu: %d (%d)\n", 13547f0fa40fSViresh Kumar __func__, cpu, ret); 13557f0fa40fSViresh Kumar goto out_remove_policy_notify; 13567f0fa40fSViresh Kumar } 1357e18f1682SSrivatsa S. Bhat 135896bbbe4aSViresh Kumar if (!recover_policy) { 135908fd8c1cSViresh Kumar policy->user_policy.policy = policy->policy; 136008fd8c1cSViresh Kumar policy->user_policy.governor = policy->governor; 136108fd8c1cSViresh Kumar } 13624e97b631SViresh Kumar up_write(&policy->rwsem); 136308fd8c1cSViresh Kumar 1364038c5b3eSGreg Kroah-Hartman kobject_uevent(&policy->kobj, KOBJ_ADD); 13657c45cf31SViresh Kumar 13667c45cf31SViresh Kumar /* Callback for handling stuff after policy is ready */ 13677c45cf31SViresh Kumar if (cpufreq_driver->ready) 13687c45cf31SViresh Kumar cpufreq_driver->ready(policy); 13697c45cf31SViresh Kumar 13702d06d8c4SDominik Brodowski pr_debug("initialization complete\n"); 13711da177e4SLinus Torvalds 13721da177e4SLinus Torvalds return 0; 13731da177e4SLinus Torvalds 13747f0fa40fSViresh Kumar out_remove_policy_notify: 13757f0fa40fSViresh Kumar /* cpufreq_policy_free() will notify based on this */ 13767f0fa40fSViresh Kumar recover_policy = true; 13778101f997SViresh Kumar out_exit_policy: 13787106e02bSPrarit Bhargava up_write(&policy->rwsem); 13797106e02bSPrarit Bhargava 1380da60ce9fSViresh Kumar if (cpufreq_driver->exit) 1381da60ce9fSViresh Kumar cpufreq_driver->exit(policy); 13828101f997SViresh Kumar out_free_policy: 13833654c5ccSViresh Kumar cpufreq_policy_free(policy, recover_policy); 13848101f997SViresh Kumar out_release_rwsem: 13851da177e4SLinus Torvalds return ret; 13861da177e4SLinus Torvalds } 13871da177e4SLinus Torvalds 1388cedb70afSSrivatsa S. Bhat static int __cpufreq_remove_dev_prepare(struct device *dev, 138996bbbe4aSViresh Kumar struct subsys_interface *sif) 13901da177e4SLinus Torvalds { 13919591becbSViresh Kumar unsigned int cpu = dev->id; 13929591becbSViresh Kumar int ret = 0; 13933a3e9e06SViresh Kumar struct cpufreq_policy *policy; 13941da177e4SLinus Torvalds 1395b8eed8afSViresh Kumar pr_debug("%s: unregistering CPU %u\n", __func__, cpu); 13961da177e4SLinus Torvalds 1397988bed09SViresh Kumar policy = cpufreq_cpu_get_raw(cpu); 13983a3e9e06SViresh Kumar if (!policy) { 1399b8eed8afSViresh Kumar pr_debug("%s: No cpu_data found\n", __func__); 14001da177e4SLinus Torvalds return -EINVAL; 14011da177e4SLinus Torvalds } 14021da177e4SLinus Torvalds 14039c0ebcf7SViresh Kumar if (has_target()) { 14043de9bdebSViresh Kumar ret = __cpufreq_governor(policy, CPUFREQ_GOV_STOP); 14053de9bdebSViresh Kumar if (ret) { 14063de9bdebSViresh Kumar pr_err("%s: Failed to stop governor\n", __func__); 14073de9bdebSViresh Kumar return ret; 14083de9bdebSViresh Kumar } 1409db5f2995SViresh Kumar } 14101da177e4SLinus Torvalds 14114573237bSViresh Kumar down_write(&policy->rwsem); 14129591becbSViresh Kumar cpumask_clear_cpu(cpu, policy->cpus); 14134573237bSViresh Kumar 14149591becbSViresh Kumar if (policy_is_inactive(policy)) { 14159591becbSViresh Kumar if (has_target()) 14164573237bSViresh Kumar strncpy(policy->last_governor, policy->governor->name, 14174573237bSViresh Kumar CPUFREQ_NAME_LEN); 14189591becbSViresh Kumar } else if (cpu == policy->cpu) { 14199591becbSViresh Kumar /* Nominate new CPU */ 14209591becbSViresh Kumar policy->cpu = cpumask_any(policy->cpus); 14219591becbSViresh Kumar } 14224573237bSViresh Kumar up_write(&policy->rwsem); 14231da177e4SLinus Torvalds 14249591becbSViresh Kumar /* Start governor again for active policy */ 14259591becbSViresh Kumar if (!policy_is_inactive(policy)) { 14269591becbSViresh Kumar if (has_target()) { 14279591becbSViresh Kumar ret = __cpufreq_governor(policy, CPUFREQ_GOV_START); 14289591becbSViresh Kumar if (!ret) 14299591becbSViresh Kumar ret = __cpufreq_governor(policy, CPUFREQ_GOV_LIMITS); 143087549141SViresh Kumar 14319591becbSViresh Kumar if (ret) 14329591becbSViresh Kumar pr_err("%s: Failed to start governor\n", __func__); 14339591becbSViresh Kumar } 14349591becbSViresh Kumar } else if (cpufreq_driver->stop_cpu) { 1435367dc4aaSDirk Brandewie cpufreq_driver->stop_cpu(policy); 14369591becbSViresh Kumar } 1437b8eed8afSViresh Kumar 14389591becbSViresh Kumar return ret; 1439cedb70afSSrivatsa S. Bhat } 1440cedb70afSSrivatsa S. Bhat 1441cedb70afSSrivatsa S. Bhat static int __cpufreq_remove_dev_finish(struct device *dev, 144296bbbe4aSViresh Kumar struct subsys_interface *sif) 1443cedb70afSSrivatsa S. Bhat { 1444988bed09SViresh Kumar unsigned int cpu = dev->id; 1445cedb70afSSrivatsa S. Bhat int ret; 14469591becbSViresh Kumar struct cpufreq_policy *policy = per_cpu(cpufreq_cpu_data, cpu); 1447cedb70afSSrivatsa S. Bhat 1448cedb70afSSrivatsa S. Bhat if (!policy) { 1449cedb70afSSrivatsa S. Bhat pr_debug("%s: No cpu_data found\n", __func__); 1450cedb70afSSrivatsa S. Bhat return -EINVAL; 1451cedb70afSSrivatsa S. Bhat } 1452cedb70afSSrivatsa S. Bhat 14539591becbSViresh Kumar /* Only proceed for inactive policies */ 14549591becbSViresh Kumar if (!policy_is_inactive(policy)) 145587549141SViresh Kumar return 0; 145687549141SViresh Kumar 145787549141SViresh Kumar /* If cpu is last user of policy, free policy */ 145887549141SViresh Kumar if (has_target()) { 145987549141SViresh Kumar ret = __cpufreq_governor(policy, CPUFREQ_GOV_POLICY_EXIT); 146087549141SViresh Kumar if (ret) { 146187549141SViresh Kumar pr_err("%s: Failed to exit governor\n", __func__); 14623de9bdebSViresh Kumar return ret; 14633de9bdebSViresh Kumar } 14643de9bdebSViresh Kumar } 14652a998599SRafael J. Wysocki 14668414809cSSrivatsa S. Bhat /* 14678414809cSSrivatsa S. Bhat * Perform the ->exit() even during light-weight tear-down, 14688414809cSSrivatsa S. Bhat * since this is a core component, and is essential for the 14698414809cSSrivatsa S. Bhat * subsequent light-weight ->init() to succeed. 14708414809cSSrivatsa S. Bhat */ 14711c3d85ddSRafael J. Wysocki if (cpufreq_driver->exit) 14723a3e9e06SViresh Kumar cpufreq_driver->exit(policy); 147327ecddc2SJacob Shin 14743654c5ccSViresh Kumar /* Free the policy only if the driver is getting removed. */ 147587549141SViresh Kumar if (sif) 14763654c5ccSViresh Kumar cpufreq_policy_free(policy, true); 14771da177e4SLinus Torvalds 14781da177e4SLinus Torvalds return 0; 14791da177e4SLinus Torvalds } 14801da177e4SLinus Torvalds 1481cedb70afSSrivatsa S. Bhat /** 148227a862e9SViresh Kumar * cpufreq_remove_dev - remove a CPU device 1483cedb70afSSrivatsa S. Bhat * 1484cedb70afSSrivatsa S. Bhat * Removes the cpufreq interface for a CPU device. 1485cedb70afSSrivatsa S. Bhat */ 14868a25a2fdSKay Sievers static int cpufreq_remove_dev(struct device *dev, struct subsys_interface *sif) 14875a01f2e8SVenkatesh Pallipadi { 14888a25a2fdSKay Sievers unsigned int cpu = dev->id; 148927a862e9SViresh Kumar int ret; 1490ec28297aSVenki Pallipadi 149187549141SViresh Kumar /* 149287549141SViresh Kumar * Only possible if 'cpu' is getting physically removed now. A hotplug 149387549141SViresh Kumar * notifier should have already been called and we just need to remove 149487549141SViresh Kumar * link or free policy here. 149587549141SViresh Kumar */ 149687549141SViresh Kumar if (cpu_is_offline(cpu)) { 149787549141SViresh Kumar struct cpufreq_policy *policy = per_cpu(cpufreq_cpu_data, cpu); 149887549141SViresh Kumar struct cpumask mask; 149987549141SViresh Kumar 150087549141SViresh Kumar if (!policy) 1501ec28297aSVenki Pallipadi return 0; 1502ec28297aSVenki Pallipadi 150387549141SViresh Kumar cpumask_copy(&mask, policy->related_cpus); 150487549141SViresh Kumar cpumask_clear_cpu(cpu, &mask); 150587549141SViresh Kumar 150687549141SViresh Kumar /* 150787549141SViresh Kumar * Free policy only if all policy->related_cpus are removed 150887549141SViresh Kumar * physically. 150987549141SViresh Kumar */ 151087549141SViresh Kumar if (cpumask_intersects(&mask, cpu_present_mask)) { 151187549141SViresh Kumar remove_cpu_dev_symlink(policy, cpu); 151287549141SViresh Kumar return 0; 151387549141SViresh Kumar } 151487549141SViresh Kumar 15153654c5ccSViresh Kumar cpufreq_policy_free(policy, true); 151687549141SViresh Kumar return 0; 151787549141SViresh Kumar } 151887549141SViresh Kumar 151996bbbe4aSViresh Kumar ret = __cpufreq_remove_dev_prepare(dev, sif); 152027a862e9SViresh Kumar 152127a862e9SViresh Kumar if (!ret) 152296bbbe4aSViresh Kumar ret = __cpufreq_remove_dev_finish(dev, sif); 152327a862e9SViresh Kumar 152427a862e9SViresh Kumar return ret; 15255a01f2e8SVenkatesh Pallipadi } 15265a01f2e8SVenkatesh Pallipadi 152765f27f38SDavid Howells static void handle_update(struct work_struct *work) 15281da177e4SLinus Torvalds { 152965f27f38SDavid Howells struct cpufreq_policy *policy = 153065f27f38SDavid Howells container_of(work, struct cpufreq_policy, update); 153165f27f38SDavid Howells unsigned int cpu = policy->cpu; 15322d06d8c4SDominik Brodowski pr_debug("handle_update for cpu %u called\n", cpu); 15331da177e4SLinus Torvalds cpufreq_update_policy(cpu); 15341da177e4SLinus Torvalds } 15351da177e4SLinus Torvalds 15361da177e4SLinus Torvalds /** 1537bb176f7dSViresh Kumar * cpufreq_out_of_sync - If actual and saved CPU frequency differs, we're 1538bb176f7dSViresh Kumar * in deep trouble. 1539a1e1dc41SViresh Kumar * @policy: policy managing CPUs 15401da177e4SLinus Torvalds * @new_freq: CPU frequency the CPU actually runs at 15411da177e4SLinus Torvalds * 154229464f28SDave Jones * We adjust to current frequency first, and need to clean up later. 154329464f28SDave Jones * So either call to cpufreq_update_policy() or schedule handle_update()). 15441da177e4SLinus Torvalds */ 1545a1e1dc41SViresh Kumar static void cpufreq_out_of_sync(struct cpufreq_policy *policy, 1546e08f5f5bSGautham R Shenoy unsigned int new_freq) 15471da177e4SLinus Torvalds { 15481da177e4SLinus Torvalds struct cpufreq_freqs freqs; 1549b43a7ffbSViresh Kumar 1550e837f9b5SJoe Perches pr_debug("Warning: CPU frequency out of sync: cpufreq and timing core thinks of %u, is %u kHz\n", 1551a1e1dc41SViresh Kumar policy->cur, new_freq); 15521da177e4SLinus Torvalds 1553a1e1dc41SViresh Kumar freqs.old = policy->cur; 15541da177e4SLinus Torvalds freqs.new = new_freq; 1555b43a7ffbSViresh Kumar 15568fec051eSViresh Kumar cpufreq_freq_transition_begin(policy, &freqs); 15578fec051eSViresh Kumar cpufreq_freq_transition_end(policy, &freqs, 0); 15581da177e4SLinus Torvalds } 15591da177e4SLinus Torvalds 15601da177e4SLinus Torvalds /** 15614ab70df4SDhaval Giani * cpufreq_quick_get - get the CPU frequency (in kHz) from policy->cur 156295235ca2SVenkatesh Pallipadi * @cpu: CPU number 156395235ca2SVenkatesh Pallipadi * 156495235ca2SVenkatesh Pallipadi * This is the last known freq, without actually getting it from the driver. 156595235ca2SVenkatesh Pallipadi * Return value will be same as what is shown in scaling_cur_freq in sysfs. 156695235ca2SVenkatesh Pallipadi */ 156795235ca2SVenkatesh Pallipadi unsigned int cpufreq_quick_get(unsigned int cpu) 156895235ca2SVenkatesh Pallipadi { 15699e21ba8bSDirk Brandewie struct cpufreq_policy *policy; 1570e08f5f5bSGautham R Shenoy unsigned int ret_freq = 0; 157195235ca2SVenkatesh Pallipadi 15721c3d85ddSRafael J. Wysocki if (cpufreq_driver && cpufreq_driver->setpolicy && cpufreq_driver->get) 15731c3d85ddSRafael J. Wysocki return cpufreq_driver->get(cpu); 15749e21ba8bSDirk Brandewie 15759e21ba8bSDirk Brandewie policy = cpufreq_cpu_get(cpu); 157695235ca2SVenkatesh Pallipadi if (policy) { 1577e08f5f5bSGautham R Shenoy ret_freq = policy->cur; 157895235ca2SVenkatesh Pallipadi cpufreq_cpu_put(policy); 157995235ca2SVenkatesh Pallipadi } 158095235ca2SVenkatesh Pallipadi 15814d34a67dSDave Jones return ret_freq; 158295235ca2SVenkatesh Pallipadi } 158395235ca2SVenkatesh Pallipadi EXPORT_SYMBOL(cpufreq_quick_get); 158495235ca2SVenkatesh Pallipadi 15853d737108SJesse Barnes /** 15863d737108SJesse Barnes * cpufreq_quick_get_max - get the max reported CPU frequency for this CPU 15873d737108SJesse Barnes * @cpu: CPU number 15883d737108SJesse Barnes * 15893d737108SJesse Barnes * Just return the max possible frequency for a given CPU. 15903d737108SJesse Barnes */ 15913d737108SJesse Barnes unsigned int cpufreq_quick_get_max(unsigned int cpu) 15923d737108SJesse Barnes { 15933d737108SJesse Barnes struct cpufreq_policy *policy = cpufreq_cpu_get(cpu); 15943d737108SJesse Barnes unsigned int ret_freq = 0; 15953d737108SJesse Barnes 15963d737108SJesse Barnes if (policy) { 15973d737108SJesse Barnes ret_freq = policy->max; 15983d737108SJesse Barnes cpufreq_cpu_put(policy); 15993d737108SJesse Barnes } 16003d737108SJesse Barnes 16013d737108SJesse Barnes return ret_freq; 16023d737108SJesse Barnes } 16033d737108SJesse Barnes EXPORT_SYMBOL(cpufreq_quick_get_max); 16043d737108SJesse Barnes 1605d92d50a4SViresh Kumar static unsigned int __cpufreq_get(struct cpufreq_policy *policy) 16061da177e4SLinus Torvalds { 1607e08f5f5bSGautham R Shenoy unsigned int ret_freq = 0; 16081da177e4SLinus Torvalds 16091c3d85ddSRafael J. Wysocki if (!cpufreq_driver->get) 16104d34a67dSDave Jones return ret_freq; 16111da177e4SLinus Torvalds 1612d92d50a4SViresh Kumar ret_freq = cpufreq_driver->get(policy->cpu); 16131da177e4SLinus Torvalds 161411e584cfSViresh Kumar /* Updating inactive policies is invalid, so avoid doing that. */ 161511e584cfSViresh Kumar if (unlikely(policy_is_inactive(policy))) 161611e584cfSViresh Kumar return ret_freq; 161711e584cfSViresh Kumar 1618e08f5f5bSGautham R Shenoy if (ret_freq && policy->cur && 16191c3d85ddSRafael J. Wysocki !(cpufreq_driver->flags & CPUFREQ_CONST_LOOPS)) { 1620e08f5f5bSGautham R Shenoy /* verify no discrepancy between actual and 1621e08f5f5bSGautham R Shenoy saved value exists */ 1622e08f5f5bSGautham R Shenoy if (unlikely(ret_freq != policy->cur)) { 1623a1e1dc41SViresh Kumar cpufreq_out_of_sync(policy, ret_freq); 16241da177e4SLinus Torvalds schedule_work(&policy->update); 16251da177e4SLinus Torvalds } 16261da177e4SLinus Torvalds } 16271da177e4SLinus Torvalds 16284d34a67dSDave Jones return ret_freq; 16295a01f2e8SVenkatesh Pallipadi } 16301da177e4SLinus Torvalds 16315a01f2e8SVenkatesh Pallipadi /** 16325a01f2e8SVenkatesh Pallipadi * cpufreq_get - get the current CPU frequency (in kHz) 16335a01f2e8SVenkatesh Pallipadi * @cpu: CPU number 16345a01f2e8SVenkatesh Pallipadi * 16355a01f2e8SVenkatesh Pallipadi * Get the CPU current (static) CPU frequency 16365a01f2e8SVenkatesh Pallipadi */ 16375a01f2e8SVenkatesh Pallipadi unsigned int cpufreq_get(unsigned int cpu) 16385a01f2e8SVenkatesh Pallipadi { 1639999976e0SAaron Plattner struct cpufreq_policy *policy = cpufreq_cpu_get(cpu); 16405a01f2e8SVenkatesh Pallipadi unsigned int ret_freq = 0; 16415a01f2e8SVenkatesh Pallipadi 1642999976e0SAaron Plattner if (policy) { 1643ad7722daSviresh kumar down_read(&policy->rwsem); 1644d92d50a4SViresh Kumar ret_freq = __cpufreq_get(policy); 1645ad7722daSviresh kumar up_read(&policy->rwsem); 1646999976e0SAaron Plattner 1647999976e0SAaron Plattner cpufreq_cpu_put(policy); 1648999976e0SAaron Plattner } 16496eed9404SViresh Kumar 16504d34a67dSDave Jones return ret_freq; 16511da177e4SLinus Torvalds } 16521da177e4SLinus Torvalds EXPORT_SYMBOL(cpufreq_get); 16531da177e4SLinus Torvalds 16548a25a2fdSKay Sievers static struct subsys_interface cpufreq_interface = { 16558a25a2fdSKay Sievers .name = "cpufreq", 16568a25a2fdSKay Sievers .subsys = &cpu_subsys, 16578a25a2fdSKay Sievers .add_dev = cpufreq_add_dev, 16588a25a2fdSKay Sievers .remove_dev = cpufreq_remove_dev, 1659e00e56dfSRafael J. Wysocki }; 1660e00e56dfSRafael J. Wysocki 1661e28867eaSViresh Kumar /* 1662e28867eaSViresh Kumar * In case platform wants some specific frequency to be configured 1663e28867eaSViresh Kumar * during suspend.. 166442d4dc3fSBenjamin Herrenschmidt */ 1665e28867eaSViresh Kumar int cpufreq_generic_suspend(struct cpufreq_policy *policy) 166642d4dc3fSBenjamin Herrenschmidt { 1667e28867eaSViresh Kumar int ret; 16684bc5d341SDave Jones 1669e28867eaSViresh Kumar if (!policy->suspend_freq) { 1670e28867eaSViresh Kumar pr_err("%s: suspend_freq can't be zero\n", __func__); 1671e28867eaSViresh Kumar return -EINVAL; 167242d4dc3fSBenjamin Herrenschmidt } 167342d4dc3fSBenjamin Herrenschmidt 1674e28867eaSViresh Kumar pr_debug("%s: Setting suspend-freq: %u\n", __func__, 1675e28867eaSViresh Kumar policy->suspend_freq); 1676e28867eaSViresh Kumar 1677e28867eaSViresh Kumar ret = __cpufreq_driver_target(policy, policy->suspend_freq, 1678e28867eaSViresh Kumar CPUFREQ_RELATION_H); 1679e28867eaSViresh Kumar if (ret) 1680e28867eaSViresh Kumar pr_err("%s: unable to set suspend-freq: %u. err: %d\n", 1681e28867eaSViresh Kumar __func__, policy->suspend_freq, ret); 1682e28867eaSViresh Kumar 1683c9060494SDave Jones return ret; 168442d4dc3fSBenjamin Herrenschmidt } 1685e28867eaSViresh Kumar EXPORT_SYMBOL(cpufreq_generic_suspend); 168642d4dc3fSBenjamin Herrenschmidt 168742d4dc3fSBenjamin Herrenschmidt /** 16882f0aea93SViresh Kumar * cpufreq_suspend() - Suspend CPUFreq governors 16891da177e4SLinus Torvalds * 16902f0aea93SViresh Kumar * Called during system wide Suspend/Hibernate cycles for suspending governors 16912f0aea93SViresh Kumar * as some platforms can't change frequency after this point in suspend cycle. 16922f0aea93SViresh Kumar * Because some of the devices (like: i2c, regulators, etc) they use for 16932f0aea93SViresh Kumar * changing frequency are suspended quickly after this point. 16941da177e4SLinus Torvalds */ 16952f0aea93SViresh Kumar void cpufreq_suspend(void) 16961da177e4SLinus Torvalds { 16973a3e9e06SViresh Kumar struct cpufreq_policy *policy; 16981da177e4SLinus Torvalds 16992f0aea93SViresh Kumar if (!cpufreq_driver) 1700e00e56dfSRafael J. Wysocki return; 17011da177e4SLinus Torvalds 17022f0aea93SViresh Kumar if (!has_target()) 1703b1b12babSViresh Kumar goto suspend; 17041da177e4SLinus Torvalds 17052f0aea93SViresh Kumar pr_debug("%s: Suspending Governors\n", __func__); 17062f0aea93SViresh Kumar 1707f963735aSViresh Kumar for_each_active_policy(policy) { 17082f0aea93SViresh Kumar if (__cpufreq_governor(policy, CPUFREQ_GOV_STOP)) 17092f0aea93SViresh Kumar pr_err("%s: Failed to stop governor for policy: %p\n", 17102f0aea93SViresh Kumar __func__, policy); 17112f0aea93SViresh Kumar else if (cpufreq_driver->suspend 17122f0aea93SViresh Kumar && cpufreq_driver->suspend(policy)) 17132f0aea93SViresh Kumar pr_err("%s: Failed to suspend driver: %p\n", __func__, 17142f0aea93SViresh Kumar policy); 17151da177e4SLinus Torvalds } 1716b1b12babSViresh Kumar 1717b1b12babSViresh Kumar suspend: 1718b1b12babSViresh Kumar cpufreq_suspended = true; 17191da177e4SLinus Torvalds } 17201da177e4SLinus Torvalds 17211da177e4SLinus Torvalds /** 17222f0aea93SViresh Kumar * cpufreq_resume() - Resume CPUFreq governors 17231da177e4SLinus Torvalds * 17242f0aea93SViresh Kumar * Called during system wide Suspend/Hibernate cycle for resuming governors that 17252f0aea93SViresh Kumar * are suspended with cpufreq_suspend(). 17261da177e4SLinus Torvalds */ 17272f0aea93SViresh Kumar void cpufreq_resume(void) 17281da177e4SLinus Torvalds { 17291da177e4SLinus Torvalds struct cpufreq_policy *policy; 17301da177e4SLinus Torvalds 17312f0aea93SViresh Kumar if (!cpufreq_driver) 17321da177e4SLinus Torvalds return; 17331da177e4SLinus Torvalds 17348e30444eSLan Tianyu cpufreq_suspended = false; 17358e30444eSLan Tianyu 17362f0aea93SViresh Kumar if (!has_target()) 17372f0aea93SViresh Kumar return; 17381da177e4SLinus Torvalds 17392f0aea93SViresh Kumar pr_debug("%s: Resuming Governors\n", __func__); 17402f0aea93SViresh Kumar 1741f963735aSViresh Kumar for_each_active_policy(policy) { 17420c5aa405SViresh Kumar if (cpufreq_driver->resume && cpufreq_driver->resume(policy)) 17430c5aa405SViresh Kumar pr_err("%s: Failed to resume driver: %p\n", __func__, 17440c5aa405SViresh Kumar policy); 17450c5aa405SViresh Kumar else if (__cpufreq_governor(policy, CPUFREQ_GOV_START) 17462f0aea93SViresh Kumar || __cpufreq_governor(policy, CPUFREQ_GOV_LIMITS)) 17472f0aea93SViresh Kumar pr_err("%s: Failed to start governor for policy: %p\n", 17482f0aea93SViresh Kumar __func__, policy); 1749c75de0acSViresh Kumar } 17502f0aea93SViresh Kumar 17512f0aea93SViresh Kumar /* 1752c75de0acSViresh Kumar * schedule call cpufreq_update_policy() for first-online CPU, as that 1753c75de0acSViresh Kumar * wouldn't be hotplugged-out on suspend. It will verify that the 1754c75de0acSViresh Kumar * current freq is in sync with what we believe it to be. 17552f0aea93SViresh Kumar */ 1756c75de0acSViresh Kumar policy = cpufreq_cpu_get_raw(cpumask_first(cpu_online_mask)); 1757c75de0acSViresh Kumar if (WARN_ON(!policy)) 1758c75de0acSViresh Kumar return; 1759c75de0acSViresh Kumar 17603a3e9e06SViresh Kumar schedule_work(&policy->update); 17611da177e4SLinus Torvalds } 17621da177e4SLinus Torvalds 17639d95046eSBorislav Petkov /** 17649d95046eSBorislav Petkov * cpufreq_get_current_driver - return current driver's name 17659d95046eSBorislav Petkov * 17669d95046eSBorislav Petkov * Return the name string of the currently loaded cpufreq driver 17679d95046eSBorislav Petkov * or NULL, if none. 17689d95046eSBorislav Petkov */ 17699d95046eSBorislav Petkov const char *cpufreq_get_current_driver(void) 17709d95046eSBorislav Petkov { 17711c3d85ddSRafael J. Wysocki if (cpufreq_driver) 17721c3d85ddSRafael J. Wysocki return cpufreq_driver->name; 17731c3d85ddSRafael J. Wysocki 17741c3d85ddSRafael J. Wysocki return NULL; 17759d95046eSBorislav Petkov } 17769d95046eSBorislav Petkov EXPORT_SYMBOL_GPL(cpufreq_get_current_driver); 17771da177e4SLinus Torvalds 177851315cdfSThomas Petazzoni /** 177951315cdfSThomas Petazzoni * cpufreq_get_driver_data - return current driver data 178051315cdfSThomas Petazzoni * 178151315cdfSThomas Petazzoni * Return the private data of the currently loaded cpufreq 178251315cdfSThomas Petazzoni * driver, or NULL if no cpufreq driver is loaded. 178351315cdfSThomas Petazzoni */ 178451315cdfSThomas Petazzoni void *cpufreq_get_driver_data(void) 178551315cdfSThomas Petazzoni { 178651315cdfSThomas Petazzoni if (cpufreq_driver) 178751315cdfSThomas Petazzoni return cpufreq_driver->driver_data; 178851315cdfSThomas Petazzoni 178951315cdfSThomas Petazzoni return NULL; 179051315cdfSThomas Petazzoni } 179151315cdfSThomas Petazzoni EXPORT_SYMBOL_GPL(cpufreq_get_driver_data); 179251315cdfSThomas Petazzoni 17931da177e4SLinus Torvalds /********************************************************************* 17941da177e4SLinus Torvalds * NOTIFIER LISTS INTERFACE * 17951da177e4SLinus Torvalds *********************************************************************/ 17961da177e4SLinus Torvalds 17971da177e4SLinus Torvalds /** 17981da177e4SLinus Torvalds * cpufreq_register_notifier - register a driver with cpufreq 17991da177e4SLinus Torvalds * @nb: notifier function to register 18001da177e4SLinus Torvalds * @list: CPUFREQ_TRANSITION_NOTIFIER or CPUFREQ_POLICY_NOTIFIER 18011da177e4SLinus Torvalds * 18021da177e4SLinus Torvalds * Add a driver to one of two lists: either a list of drivers that 18031da177e4SLinus Torvalds * are notified about clock rate changes (once before and once after 18041da177e4SLinus Torvalds * the transition), or a list of drivers that are notified about 18051da177e4SLinus Torvalds * changes in cpufreq policy. 18061da177e4SLinus Torvalds * 18071da177e4SLinus Torvalds * This function may sleep, and has the same return conditions as 1808e041c683SAlan Stern * blocking_notifier_chain_register. 18091da177e4SLinus Torvalds */ 18101da177e4SLinus Torvalds int cpufreq_register_notifier(struct notifier_block *nb, unsigned int list) 18111da177e4SLinus Torvalds { 18121da177e4SLinus Torvalds int ret; 18131da177e4SLinus Torvalds 1814d5aaffa9SDirk Brandewie if (cpufreq_disabled()) 1815d5aaffa9SDirk Brandewie return -EINVAL; 1816d5aaffa9SDirk Brandewie 181774212ca4SCesar Eduardo Barros WARN_ON(!init_cpufreq_transition_notifier_list_called); 181874212ca4SCesar Eduardo Barros 18191da177e4SLinus Torvalds switch (list) { 18201da177e4SLinus Torvalds case CPUFREQ_TRANSITION_NOTIFIER: 1821b4dfdbb3SAlan Stern ret = srcu_notifier_chain_register( 1822e041c683SAlan Stern &cpufreq_transition_notifier_list, nb); 18231da177e4SLinus Torvalds break; 18241da177e4SLinus Torvalds case CPUFREQ_POLICY_NOTIFIER: 1825e041c683SAlan Stern ret = blocking_notifier_chain_register( 1826e041c683SAlan Stern &cpufreq_policy_notifier_list, nb); 18271da177e4SLinus Torvalds break; 18281da177e4SLinus Torvalds default: 18291da177e4SLinus Torvalds ret = -EINVAL; 18301da177e4SLinus Torvalds } 18311da177e4SLinus Torvalds 18321da177e4SLinus Torvalds return ret; 18331da177e4SLinus Torvalds } 18341da177e4SLinus Torvalds EXPORT_SYMBOL(cpufreq_register_notifier); 18351da177e4SLinus Torvalds 18361da177e4SLinus Torvalds /** 18371da177e4SLinus Torvalds * cpufreq_unregister_notifier - unregister a driver with cpufreq 18381da177e4SLinus Torvalds * @nb: notifier block to be unregistered 18391da177e4SLinus Torvalds * @list: CPUFREQ_TRANSITION_NOTIFIER or CPUFREQ_POLICY_NOTIFIER 18401da177e4SLinus Torvalds * 18411da177e4SLinus Torvalds * Remove a driver from the CPU frequency notifier list. 18421da177e4SLinus Torvalds * 18431da177e4SLinus Torvalds * This function may sleep, and has the same return conditions as 1844e041c683SAlan Stern * blocking_notifier_chain_unregister. 18451da177e4SLinus Torvalds */ 18461da177e4SLinus Torvalds int cpufreq_unregister_notifier(struct notifier_block *nb, unsigned int list) 18471da177e4SLinus Torvalds { 18481da177e4SLinus Torvalds int ret; 18491da177e4SLinus Torvalds 1850d5aaffa9SDirk Brandewie if (cpufreq_disabled()) 1851d5aaffa9SDirk Brandewie return -EINVAL; 1852d5aaffa9SDirk Brandewie 18531da177e4SLinus Torvalds switch (list) { 18541da177e4SLinus Torvalds case CPUFREQ_TRANSITION_NOTIFIER: 1855b4dfdbb3SAlan Stern ret = srcu_notifier_chain_unregister( 1856e041c683SAlan Stern &cpufreq_transition_notifier_list, nb); 18571da177e4SLinus Torvalds break; 18581da177e4SLinus Torvalds case CPUFREQ_POLICY_NOTIFIER: 1859e041c683SAlan Stern ret = blocking_notifier_chain_unregister( 1860e041c683SAlan Stern &cpufreq_policy_notifier_list, nb); 18611da177e4SLinus Torvalds break; 18621da177e4SLinus Torvalds default: 18631da177e4SLinus Torvalds ret = -EINVAL; 18641da177e4SLinus Torvalds } 18651da177e4SLinus Torvalds 18661da177e4SLinus Torvalds return ret; 18671da177e4SLinus Torvalds } 18681da177e4SLinus Torvalds EXPORT_SYMBOL(cpufreq_unregister_notifier); 18691da177e4SLinus Torvalds 18701da177e4SLinus Torvalds 18711da177e4SLinus Torvalds /********************************************************************* 18721da177e4SLinus Torvalds * GOVERNORS * 18731da177e4SLinus Torvalds *********************************************************************/ 18741da177e4SLinus Torvalds 18751c03a2d0SViresh Kumar /* Must set freqs->new to intermediate frequency */ 18761c03a2d0SViresh Kumar static int __target_intermediate(struct cpufreq_policy *policy, 18771c03a2d0SViresh Kumar struct cpufreq_freqs *freqs, int index) 18781c03a2d0SViresh Kumar { 18791c03a2d0SViresh Kumar int ret; 18801c03a2d0SViresh Kumar 18811c03a2d0SViresh Kumar freqs->new = cpufreq_driver->get_intermediate(policy, index); 18821c03a2d0SViresh Kumar 18831c03a2d0SViresh Kumar /* We don't need to switch to intermediate freq */ 18841c03a2d0SViresh Kumar if (!freqs->new) 18851c03a2d0SViresh Kumar return 0; 18861c03a2d0SViresh Kumar 18871c03a2d0SViresh Kumar pr_debug("%s: cpu: %d, switching to intermediate freq: oldfreq: %u, intermediate freq: %u\n", 18881c03a2d0SViresh Kumar __func__, policy->cpu, freqs->old, freqs->new); 18891c03a2d0SViresh Kumar 18901c03a2d0SViresh Kumar cpufreq_freq_transition_begin(policy, freqs); 18911c03a2d0SViresh Kumar ret = cpufreq_driver->target_intermediate(policy, index); 18921c03a2d0SViresh Kumar cpufreq_freq_transition_end(policy, freqs, ret); 18931c03a2d0SViresh Kumar 18941c03a2d0SViresh Kumar if (ret) 18951c03a2d0SViresh Kumar pr_err("%s: Failed to change to intermediate frequency: %d\n", 18961c03a2d0SViresh Kumar __func__, ret); 18971c03a2d0SViresh Kumar 18981c03a2d0SViresh Kumar return ret; 18991c03a2d0SViresh Kumar } 19001c03a2d0SViresh Kumar 19018d65775dSViresh Kumar static int __target_index(struct cpufreq_policy *policy, 19028d65775dSViresh Kumar struct cpufreq_frequency_table *freq_table, int index) 19038d65775dSViresh Kumar { 19041c03a2d0SViresh Kumar struct cpufreq_freqs freqs = {.old = policy->cur, .flags = 0}; 19051c03a2d0SViresh Kumar unsigned int intermediate_freq = 0; 19068d65775dSViresh Kumar int retval = -EINVAL; 19078d65775dSViresh Kumar bool notify; 19088d65775dSViresh Kumar 19098d65775dSViresh Kumar notify = !(cpufreq_driver->flags & CPUFREQ_ASYNC_NOTIFICATION); 19108d65775dSViresh Kumar if (notify) { 19111c03a2d0SViresh Kumar /* Handle switching to intermediate frequency */ 19121c03a2d0SViresh Kumar if (cpufreq_driver->get_intermediate) { 19131c03a2d0SViresh Kumar retval = __target_intermediate(policy, &freqs, index); 19141c03a2d0SViresh Kumar if (retval) 19151c03a2d0SViresh Kumar return retval; 19168d65775dSViresh Kumar 19171c03a2d0SViresh Kumar intermediate_freq = freqs.new; 19181c03a2d0SViresh Kumar /* Set old freq to intermediate */ 19191c03a2d0SViresh Kumar if (intermediate_freq) 19201c03a2d0SViresh Kumar freqs.old = freqs.new; 19211c03a2d0SViresh Kumar } 19221c03a2d0SViresh Kumar 19231c03a2d0SViresh Kumar freqs.new = freq_table[index].frequency; 19248d65775dSViresh Kumar pr_debug("%s: cpu: %d, oldfreq: %u, new freq: %u\n", 19258d65775dSViresh Kumar __func__, policy->cpu, freqs.old, freqs.new); 19268d65775dSViresh Kumar 19278d65775dSViresh Kumar cpufreq_freq_transition_begin(policy, &freqs); 19288d65775dSViresh Kumar } 19298d65775dSViresh Kumar 19308d65775dSViresh Kumar retval = cpufreq_driver->target_index(policy, index); 19318d65775dSViresh Kumar if (retval) 19328d65775dSViresh Kumar pr_err("%s: Failed to change cpu frequency: %d\n", __func__, 19338d65775dSViresh Kumar retval); 19348d65775dSViresh Kumar 19351c03a2d0SViresh Kumar if (notify) { 19368d65775dSViresh Kumar cpufreq_freq_transition_end(policy, &freqs, retval); 19378d65775dSViresh Kumar 19381c03a2d0SViresh Kumar /* 19391c03a2d0SViresh Kumar * Failed after setting to intermediate freq? Driver should have 19401c03a2d0SViresh Kumar * reverted back to initial frequency and so should we. Check 19411c03a2d0SViresh Kumar * here for intermediate_freq instead of get_intermediate, in 194258405af6SShailendra Verma * case we haven't switched to intermediate freq at all. 19431c03a2d0SViresh Kumar */ 19441c03a2d0SViresh Kumar if (unlikely(retval && intermediate_freq)) { 19451c03a2d0SViresh Kumar freqs.old = intermediate_freq; 19461c03a2d0SViresh Kumar freqs.new = policy->restore_freq; 19471c03a2d0SViresh Kumar cpufreq_freq_transition_begin(policy, &freqs); 19481c03a2d0SViresh Kumar cpufreq_freq_transition_end(policy, &freqs, 0); 19491c03a2d0SViresh Kumar } 19501c03a2d0SViresh Kumar } 19511c03a2d0SViresh Kumar 19528d65775dSViresh Kumar return retval; 19538d65775dSViresh Kumar } 19548d65775dSViresh Kumar 19551da177e4SLinus Torvalds int __cpufreq_driver_target(struct cpufreq_policy *policy, 19561da177e4SLinus Torvalds unsigned int target_freq, 19571da177e4SLinus Torvalds unsigned int relation) 19581da177e4SLinus Torvalds { 19597249924eSViresh Kumar unsigned int old_target_freq = target_freq; 19608d65775dSViresh Kumar int retval = -EINVAL; 1961c32b6b8eSAshok Raj 1962a7b422cdSKonrad Rzeszutek Wilk if (cpufreq_disabled()) 1963a7b422cdSKonrad Rzeszutek Wilk return -ENODEV; 1964a7b422cdSKonrad Rzeszutek Wilk 19657249924eSViresh Kumar /* Make sure that target_freq is within supported range */ 19667249924eSViresh Kumar if (target_freq > policy->max) 19677249924eSViresh Kumar target_freq = policy->max; 19687249924eSViresh Kumar if (target_freq < policy->min) 19697249924eSViresh Kumar target_freq = policy->min; 19707249924eSViresh Kumar 19717249924eSViresh Kumar pr_debug("target for CPU %u: %u kHz, relation %u, requested %u kHz\n", 19727249924eSViresh Kumar policy->cpu, target_freq, relation, old_target_freq); 19735a1c0228SViresh Kumar 19749c0ebcf7SViresh Kumar /* 19759c0ebcf7SViresh Kumar * This might look like a redundant call as we are checking it again 19769c0ebcf7SViresh Kumar * after finding index. But it is left intentionally for cases where 19779c0ebcf7SViresh Kumar * exactly same freq is called again and so we can save on few function 19789c0ebcf7SViresh Kumar * calls. 19799c0ebcf7SViresh Kumar */ 19805a1c0228SViresh Kumar if (target_freq == policy->cur) 19815a1c0228SViresh Kumar return 0; 19825a1c0228SViresh Kumar 19831c03a2d0SViresh Kumar /* Save last value to restore later on errors */ 19841c03a2d0SViresh Kumar policy->restore_freq = policy->cur; 19851c03a2d0SViresh Kumar 19861c3d85ddSRafael J. Wysocki if (cpufreq_driver->target) 19871c3d85ddSRafael J. Wysocki retval = cpufreq_driver->target(policy, target_freq, relation); 19889c0ebcf7SViresh Kumar else if (cpufreq_driver->target_index) { 19899c0ebcf7SViresh Kumar struct cpufreq_frequency_table *freq_table; 19909c0ebcf7SViresh Kumar int index; 199190d45d17SAshok Raj 19929c0ebcf7SViresh Kumar freq_table = cpufreq_frequency_get_table(policy->cpu); 19939c0ebcf7SViresh Kumar if (unlikely(!freq_table)) { 19949c0ebcf7SViresh Kumar pr_err("%s: Unable to find freq_table\n", __func__); 19959c0ebcf7SViresh Kumar goto out; 19969c0ebcf7SViresh Kumar } 19979c0ebcf7SViresh Kumar 19989c0ebcf7SViresh Kumar retval = cpufreq_frequency_table_target(policy, freq_table, 19999c0ebcf7SViresh Kumar target_freq, relation, &index); 20009c0ebcf7SViresh Kumar if (unlikely(retval)) { 20019c0ebcf7SViresh Kumar pr_err("%s: Unable to find matching freq\n", __func__); 20029c0ebcf7SViresh Kumar goto out; 20039c0ebcf7SViresh Kumar } 20049c0ebcf7SViresh Kumar 2005d4019f0aSViresh Kumar if (freq_table[index].frequency == policy->cur) { 20069c0ebcf7SViresh Kumar retval = 0; 2007d4019f0aSViresh Kumar goto out; 2008d4019f0aSViresh Kumar } 2009d4019f0aSViresh Kumar 20108d65775dSViresh Kumar retval = __target_index(policy, freq_table, index); 20119c0ebcf7SViresh Kumar } 20129c0ebcf7SViresh Kumar 20139c0ebcf7SViresh Kumar out: 20141da177e4SLinus Torvalds return retval; 20151da177e4SLinus Torvalds } 20161da177e4SLinus Torvalds EXPORT_SYMBOL_GPL(__cpufreq_driver_target); 20171da177e4SLinus Torvalds 20181da177e4SLinus Torvalds int cpufreq_driver_target(struct cpufreq_policy *policy, 20191da177e4SLinus Torvalds unsigned int target_freq, 20201da177e4SLinus Torvalds unsigned int relation) 20211da177e4SLinus Torvalds { 2022f1829e4aSJulia Lawall int ret = -EINVAL; 20231da177e4SLinus Torvalds 2024ad7722daSviresh kumar down_write(&policy->rwsem); 20251da177e4SLinus Torvalds 20261da177e4SLinus Torvalds ret = __cpufreq_driver_target(policy, target_freq, relation); 20271da177e4SLinus Torvalds 2028ad7722daSviresh kumar up_write(&policy->rwsem); 20291da177e4SLinus Torvalds 20301da177e4SLinus Torvalds return ret; 20311da177e4SLinus Torvalds } 20321da177e4SLinus Torvalds EXPORT_SYMBOL_GPL(cpufreq_driver_target); 20331da177e4SLinus Torvalds 2034e08f5f5bSGautham R Shenoy static int __cpufreq_governor(struct cpufreq_policy *policy, 2035e08f5f5bSGautham R Shenoy unsigned int event) 20361da177e4SLinus Torvalds { 2037cc993cabSDave Jones int ret; 20386afde10cSThomas Renninger 20396afde10cSThomas Renninger /* Only must be defined when default governor is known to have latency 20406afde10cSThomas Renninger restrictions, like e.g. conservative or ondemand. 20416afde10cSThomas Renninger That this is the case is already ensured in Kconfig 20426afde10cSThomas Renninger */ 20436afde10cSThomas Renninger #ifdef CONFIG_CPU_FREQ_GOV_PERFORMANCE 20446afde10cSThomas Renninger struct cpufreq_governor *gov = &cpufreq_gov_performance; 20456afde10cSThomas Renninger #else 20466afde10cSThomas Renninger struct cpufreq_governor *gov = NULL; 20476afde10cSThomas Renninger #endif 20481c256245SThomas Renninger 20492f0aea93SViresh Kumar /* Don't start any governor operations if we are entering suspend */ 20502f0aea93SViresh Kumar if (cpufreq_suspended) 20512f0aea93SViresh Kumar return 0; 2052cb57720bSEthan Zhao /* 2053cb57720bSEthan Zhao * Governor might not be initiated here if ACPI _PPC changed 2054cb57720bSEthan Zhao * notification happened, so check it. 2055cb57720bSEthan Zhao */ 2056cb57720bSEthan Zhao if (!policy->governor) 2057cb57720bSEthan Zhao return -EINVAL; 20582f0aea93SViresh Kumar 20591c256245SThomas Renninger if (policy->governor->max_transition_latency && 20601c256245SThomas Renninger policy->cpuinfo.transition_latency > 20611c256245SThomas Renninger policy->governor->max_transition_latency) { 20626afde10cSThomas Renninger if (!gov) 20636afde10cSThomas Renninger return -EINVAL; 20646afde10cSThomas Renninger else { 2065e837f9b5SJoe Perches pr_warn("%s governor failed, too long transition latency of HW, fallback to %s governor\n", 2066e837f9b5SJoe Perches policy->governor->name, gov->name); 20671c256245SThomas Renninger policy->governor = gov; 20681c256245SThomas Renninger } 20696afde10cSThomas Renninger } 20701da177e4SLinus Torvalds 2071fe492f3fSViresh Kumar if (event == CPUFREQ_GOV_POLICY_INIT) 20721da177e4SLinus Torvalds if (!try_module_get(policy->governor->owner)) 20731da177e4SLinus Torvalds return -EINVAL; 20741da177e4SLinus Torvalds 20752d06d8c4SDominik Brodowski pr_debug("__cpufreq_governor for CPU %u, event %u\n", 2076e08f5f5bSGautham R Shenoy policy->cpu, event); 207795731ebbSXiaoguang Chen 207895731ebbSXiaoguang Chen mutex_lock(&cpufreq_governor_lock); 207956d07db2SSrivatsa S. Bhat if ((policy->governor_enabled && event == CPUFREQ_GOV_START) 2080f73d3933SViresh Kumar || (!policy->governor_enabled 2081f73d3933SViresh Kumar && (event == CPUFREQ_GOV_LIMITS || event == CPUFREQ_GOV_STOP))) { 208295731ebbSXiaoguang Chen mutex_unlock(&cpufreq_governor_lock); 208395731ebbSXiaoguang Chen return -EBUSY; 208495731ebbSXiaoguang Chen } 208595731ebbSXiaoguang Chen 208695731ebbSXiaoguang Chen if (event == CPUFREQ_GOV_STOP) 208795731ebbSXiaoguang Chen policy->governor_enabled = false; 208895731ebbSXiaoguang Chen else if (event == CPUFREQ_GOV_START) 208995731ebbSXiaoguang Chen policy->governor_enabled = true; 209095731ebbSXiaoguang Chen 209195731ebbSXiaoguang Chen mutex_unlock(&cpufreq_governor_lock); 209295731ebbSXiaoguang Chen 20931da177e4SLinus Torvalds ret = policy->governor->governor(policy, event); 20941da177e4SLinus Torvalds 20954d5dcc42SViresh Kumar if (!ret) { 20964d5dcc42SViresh Kumar if (event == CPUFREQ_GOV_POLICY_INIT) 20978e53695fSViresh Kumar policy->governor->initialized++; 20984d5dcc42SViresh Kumar else if (event == CPUFREQ_GOV_POLICY_EXIT) 20998e53695fSViresh Kumar policy->governor->initialized--; 210095731ebbSXiaoguang Chen } else { 210195731ebbSXiaoguang Chen /* Restore original values */ 210295731ebbSXiaoguang Chen mutex_lock(&cpufreq_governor_lock); 210395731ebbSXiaoguang Chen if (event == CPUFREQ_GOV_STOP) 210495731ebbSXiaoguang Chen policy->governor_enabled = true; 210595731ebbSXiaoguang Chen else if (event == CPUFREQ_GOV_START) 210695731ebbSXiaoguang Chen policy->governor_enabled = false; 210795731ebbSXiaoguang Chen mutex_unlock(&cpufreq_governor_lock); 21084d5dcc42SViresh Kumar } 2109b394058fSViresh Kumar 2110fe492f3fSViresh Kumar if (((event == CPUFREQ_GOV_POLICY_INIT) && ret) || 2111fe492f3fSViresh Kumar ((event == CPUFREQ_GOV_POLICY_EXIT) && !ret)) 21121da177e4SLinus Torvalds module_put(policy->governor->owner); 21131da177e4SLinus Torvalds 21141da177e4SLinus Torvalds return ret; 21151da177e4SLinus Torvalds } 21161da177e4SLinus Torvalds 21171da177e4SLinus Torvalds int cpufreq_register_governor(struct cpufreq_governor *governor) 21181da177e4SLinus Torvalds { 21193bcb09a3SJeremy Fitzhardinge int err; 21201da177e4SLinus Torvalds 21211da177e4SLinus Torvalds if (!governor) 21221da177e4SLinus Torvalds return -EINVAL; 21231da177e4SLinus Torvalds 2124a7b422cdSKonrad Rzeszutek Wilk if (cpufreq_disabled()) 2125a7b422cdSKonrad Rzeszutek Wilk return -ENODEV; 2126a7b422cdSKonrad Rzeszutek Wilk 21273fc54d37Sakpm@osdl.org mutex_lock(&cpufreq_governor_mutex); 21281da177e4SLinus Torvalds 2129b394058fSViresh Kumar governor->initialized = 0; 21303bcb09a3SJeremy Fitzhardinge err = -EBUSY; 213142f91fa1SViresh Kumar if (!find_governor(governor->name)) { 21323bcb09a3SJeremy Fitzhardinge err = 0; 21331da177e4SLinus Torvalds list_add(&governor->governor_list, &cpufreq_governor_list); 21343bcb09a3SJeremy Fitzhardinge } 21351da177e4SLinus Torvalds 21363fc54d37Sakpm@osdl.org mutex_unlock(&cpufreq_governor_mutex); 21373bcb09a3SJeremy Fitzhardinge return err; 21381da177e4SLinus Torvalds } 21391da177e4SLinus Torvalds EXPORT_SYMBOL_GPL(cpufreq_register_governor); 21401da177e4SLinus Torvalds 21411da177e4SLinus Torvalds void cpufreq_unregister_governor(struct cpufreq_governor *governor) 21421da177e4SLinus Torvalds { 21434573237bSViresh Kumar struct cpufreq_policy *policy; 21444573237bSViresh Kumar unsigned long flags; 214590e41bacSPrarit Bhargava 21461da177e4SLinus Torvalds if (!governor) 21471da177e4SLinus Torvalds return; 21481da177e4SLinus Torvalds 2149a7b422cdSKonrad Rzeszutek Wilk if (cpufreq_disabled()) 2150a7b422cdSKonrad Rzeszutek Wilk return; 2151a7b422cdSKonrad Rzeszutek Wilk 21524573237bSViresh Kumar /* clear last_governor for all inactive policies */ 21534573237bSViresh Kumar read_lock_irqsave(&cpufreq_driver_lock, flags); 21544573237bSViresh Kumar for_each_inactive_policy(policy) { 215518bf3a12SViresh Kumar if (!strcmp(policy->last_governor, governor->name)) { 215618bf3a12SViresh Kumar policy->governor = NULL; 21574573237bSViresh Kumar strcpy(policy->last_governor, "\0"); 215890e41bacSPrarit Bhargava } 215918bf3a12SViresh Kumar } 21604573237bSViresh Kumar read_unlock_irqrestore(&cpufreq_driver_lock, flags); 216190e41bacSPrarit Bhargava 21623fc54d37Sakpm@osdl.org mutex_lock(&cpufreq_governor_mutex); 21631da177e4SLinus Torvalds list_del(&governor->governor_list); 21643fc54d37Sakpm@osdl.org mutex_unlock(&cpufreq_governor_mutex); 21651da177e4SLinus Torvalds return; 21661da177e4SLinus Torvalds } 21671da177e4SLinus Torvalds EXPORT_SYMBOL_GPL(cpufreq_unregister_governor); 21681da177e4SLinus Torvalds 21691da177e4SLinus Torvalds 21701da177e4SLinus Torvalds /********************************************************************* 21711da177e4SLinus Torvalds * POLICY INTERFACE * 21721da177e4SLinus Torvalds *********************************************************************/ 21731da177e4SLinus Torvalds 21741da177e4SLinus Torvalds /** 21751da177e4SLinus Torvalds * cpufreq_get_policy - get the current cpufreq_policy 217629464f28SDave Jones * @policy: struct cpufreq_policy into which the current cpufreq_policy 217729464f28SDave Jones * is written 21781da177e4SLinus Torvalds * 21791da177e4SLinus Torvalds * Reads the current cpufreq policy. 21801da177e4SLinus Torvalds */ 21811da177e4SLinus Torvalds int cpufreq_get_policy(struct cpufreq_policy *policy, unsigned int cpu) 21821da177e4SLinus Torvalds { 21831da177e4SLinus Torvalds struct cpufreq_policy *cpu_policy; 21841da177e4SLinus Torvalds if (!policy) 21851da177e4SLinus Torvalds return -EINVAL; 21861da177e4SLinus Torvalds 21871da177e4SLinus Torvalds cpu_policy = cpufreq_cpu_get(cpu); 21881da177e4SLinus Torvalds if (!cpu_policy) 21891da177e4SLinus Torvalds return -EINVAL; 21901da177e4SLinus Torvalds 2191d5b73cd8SViresh Kumar memcpy(policy, cpu_policy, sizeof(*policy)); 21921da177e4SLinus Torvalds 21931da177e4SLinus Torvalds cpufreq_cpu_put(cpu_policy); 21941da177e4SLinus Torvalds return 0; 21951da177e4SLinus Torvalds } 21961da177e4SLinus Torvalds EXPORT_SYMBOL(cpufreq_get_policy); 21971da177e4SLinus Torvalds 2198153d7f3fSArjan van de Ven /* 2199037ce839SViresh Kumar * policy : current policy. 2200037ce839SViresh Kumar * new_policy: policy to be set. 2201153d7f3fSArjan van de Ven */ 2202037ce839SViresh Kumar static int cpufreq_set_policy(struct cpufreq_policy *policy, 22033a3e9e06SViresh Kumar struct cpufreq_policy *new_policy) 22041da177e4SLinus Torvalds { 2205d9a789c7SRafael J. Wysocki struct cpufreq_governor *old_gov; 2206d9a789c7SRafael J. Wysocki int ret; 22071da177e4SLinus Torvalds 2208e837f9b5SJoe Perches pr_debug("setting new policy for CPU %u: %u - %u kHz\n", 2209e837f9b5SJoe Perches new_policy->cpu, new_policy->min, new_policy->max); 22101da177e4SLinus Torvalds 2211d5b73cd8SViresh Kumar memcpy(&new_policy->cpuinfo, &policy->cpuinfo, sizeof(policy->cpuinfo)); 22121da177e4SLinus Torvalds 2213d9a789c7SRafael J. Wysocki if (new_policy->min > policy->max || new_policy->max < policy->min) 2214d9a789c7SRafael J. Wysocki return -EINVAL; 22159c9a43edSMattia Dongili 22161da177e4SLinus Torvalds /* verify the cpu speed can be set within this limit */ 22173a3e9e06SViresh Kumar ret = cpufreq_driver->verify(new_policy); 22181da177e4SLinus Torvalds if (ret) 2219d9a789c7SRafael J. Wysocki return ret; 22201da177e4SLinus Torvalds 22211da177e4SLinus Torvalds /* adjust if necessary - all reasons */ 2222e041c683SAlan Stern blocking_notifier_call_chain(&cpufreq_policy_notifier_list, 22233a3e9e06SViresh Kumar CPUFREQ_ADJUST, new_policy); 22241da177e4SLinus Torvalds 22251da177e4SLinus Torvalds /* adjust if necessary - hardware incompatibility*/ 2226e041c683SAlan Stern blocking_notifier_call_chain(&cpufreq_policy_notifier_list, 22273a3e9e06SViresh Kumar CPUFREQ_INCOMPATIBLE, new_policy); 22281da177e4SLinus Torvalds 2229bb176f7dSViresh Kumar /* 2230bb176f7dSViresh Kumar * verify the cpu speed can be set within this limit, which might be 2231bb176f7dSViresh Kumar * different to the first one 2232bb176f7dSViresh Kumar */ 22333a3e9e06SViresh Kumar ret = cpufreq_driver->verify(new_policy); 2234e041c683SAlan Stern if (ret) 2235d9a789c7SRafael J. Wysocki return ret; 22361da177e4SLinus Torvalds 22371da177e4SLinus Torvalds /* notification of the new policy */ 2238e041c683SAlan Stern blocking_notifier_call_chain(&cpufreq_policy_notifier_list, 22393a3e9e06SViresh Kumar CPUFREQ_NOTIFY, new_policy); 22401da177e4SLinus Torvalds 22413a3e9e06SViresh Kumar policy->min = new_policy->min; 22423a3e9e06SViresh Kumar policy->max = new_policy->max; 22431da177e4SLinus Torvalds 22442d06d8c4SDominik Brodowski pr_debug("new min and max freqs are %u - %u kHz\n", 22453a3e9e06SViresh Kumar policy->min, policy->max); 22461da177e4SLinus Torvalds 22471c3d85ddSRafael J. Wysocki if (cpufreq_driver->setpolicy) { 22483a3e9e06SViresh Kumar policy->policy = new_policy->policy; 22492d06d8c4SDominik Brodowski pr_debug("setting range\n"); 2250d9a789c7SRafael J. Wysocki return cpufreq_driver->setpolicy(new_policy); 2251d9a789c7SRafael J. Wysocki } 2252d9a789c7SRafael J. Wysocki 2253d9a789c7SRafael J. Wysocki if (new_policy->governor == policy->governor) 2254d9a789c7SRafael J. Wysocki goto out; 22551da177e4SLinus Torvalds 22562d06d8c4SDominik Brodowski pr_debug("governor switch\n"); 22571da177e4SLinus Torvalds 2258d9a789c7SRafael J. Wysocki /* save old, working values */ 2259d9a789c7SRafael J. Wysocki old_gov = policy->governor; 22601da177e4SLinus Torvalds /* end old governor */ 2261d9a789c7SRafael J. Wysocki if (old_gov) { 22624bc384aeSViresh Kumar ret = __cpufreq_governor(policy, CPUFREQ_GOV_STOP); 22634bc384aeSViresh Kumar if (ret) { 22644bc384aeSViresh Kumar /* This can happen due to race with other operations */ 22654bc384aeSViresh Kumar pr_debug("%s: Failed to Stop Governor: %s (%d)\n", 22664bc384aeSViresh Kumar __func__, old_gov->name, ret); 22674bc384aeSViresh Kumar return ret; 22684bc384aeSViresh Kumar } 22694bc384aeSViresh Kumar 2270ad7722daSviresh kumar up_write(&policy->rwsem); 22714bc384aeSViresh Kumar ret = __cpufreq_governor(policy, CPUFREQ_GOV_POLICY_EXIT); 2272ad7722daSviresh kumar down_write(&policy->rwsem); 22734bc384aeSViresh Kumar 22744bc384aeSViresh Kumar if (ret) { 22754bc384aeSViresh Kumar pr_err("%s: Failed to Exit Governor: %s (%d)\n", 22764bc384aeSViresh Kumar __func__, old_gov->name, ret); 22774bc384aeSViresh Kumar return ret; 22784bc384aeSViresh Kumar } 22797bd353a9SViresh Kumar } 22801da177e4SLinus Torvalds 22811da177e4SLinus Torvalds /* start new governor */ 22823a3e9e06SViresh Kumar policy->governor = new_policy->governor; 22834bc384aeSViresh Kumar ret = __cpufreq_governor(policy, CPUFREQ_GOV_POLICY_INIT); 22844bc384aeSViresh Kumar if (!ret) { 22854bc384aeSViresh Kumar ret = __cpufreq_governor(policy, CPUFREQ_GOV_START); 22864bc384aeSViresh Kumar if (!ret) 2287d9a789c7SRafael J. Wysocki goto out; 2288d9a789c7SRafael J. Wysocki 2289ad7722daSviresh kumar up_write(&policy->rwsem); 2290d9a789c7SRafael J. Wysocki __cpufreq_governor(policy, CPUFREQ_GOV_POLICY_EXIT); 2291ad7722daSviresh kumar down_write(&policy->rwsem); 2292955ef483SViresh Kumar } 22937bd353a9SViresh Kumar 22941da177e4SLinus Torvalds /* new governor failed, so re-start old one */ 2295d9a789c7SRafael J. Wysocki pr_debug("starting governor %s failed\n", policy->governor->name); 22961da177e4SLinus Torvalds if (old_gov) { 22973a3e9e06SViresh Kumar policy->governor = old_gov; 22984bc384aeSViresh Kumar if (__cpufreq_governor(policy, CPUFREQ_GOV_POLICY_INIT)) 22994bc384aeSViresh Kumar policy->governor = NULL; 23004bc384aeSViresh Kumar else 2301d9a789c7SRafael J. Wysocki __cpufreq_governor(policy, CPUFREQ_GOV_START); 23021da177e4SLinus Torvalds } 23031da177e4SLinus Torvalds 23044bc384aeSViresh Kumar return ret; 2305d9a789c7SRafael J. Wysocki 2306d9a789c7SRafael J. Wysocki out: 2307d9a789c7SRafael J. Wysocki pr_debug("governor: change or update limits\n"); 2308d9a789c7SRafael J. Wysocki return __cpufreq_governor(policy, CPUFREQ_GOV_LIMITS); 23091da177e4SLinus Torvalds } 23101da177e4SLinus Torvalds 23111da177e4SLinus Torvalds /** 23121da177e4SLinus Torvalds * cpufreq_update_policy - re-evaluate an existing cpufreq policy 23131da177e4SLinus Torvalds * @cpu: CPU which shall be re-evaluated 23141da177e4SLinus Torvalds * 231525985edcSLucas De Marchi * Useful for policy notifiers which have different necessities 23161da177e4SLinus Torvalds * at different times. 23171da177e4SLinus Torvalds */ 23181da177e4SLinus Torvalds int cpufreq_update_policy(unsigned int cpu) 23191da177e4SLinus Torvalds { 23203a3e9e06SViresh Kumar struct cpufreq_policy *policy = cpufreq_cpu_get(cpu); 23213a3e9e06SViresh Kumar struct cpufreq_policy new_policy; 2322f1829e4aSJulia Lawall int ret; 23231da177e4SLinus Torvalds 2324fefa8ff8SAaron Plattner if (!policy) 2325fefa8ff8SAaron Plattner return -ENODEV; 23261da177e4SLinus Torvalds 2327ad7722daSviresh kumar down_write(&policy->rwsem); 23281da177e4SLinus Torvalds 23292d06d8c4SDominik Brodowski pr_debug("updating policy for CPU %u\n", cpu); 2330d5b73cd8SViresh Kumar memcpy(&new_policy, policy, sizeof(*policy)); 23313a3e9e06SViresh Kumar new_policy.min = policy->user_policy.min; 23323a3e9e06SViresh Kumar new_policy.max = policy->user_policy.max; 23333a3e9e06SViresh Kumar new_policy.policy = policy->user_policy.policy; 23343a3e9e06SViresh Kumar new_policy.governor = policy->user_policy.governor; 23351da177e4SLinus Torvalds 2336bb176f7dSViresh Kumar /* 2337bb176f7dSViresh Kumar * BIOS might change freq behind our back 2338bb176f7dSViresh Kumar * -> ask driver for current freq and notify governors about a change 2339bb176f7dSViresh Kumar */ 23402ed99e39SRafael J. Wysocki if (cpufreq_driver->get && !cpufreq_driver->setpolicy) { 23413a3e9e06SViresh Kumar new_policy.cur = cpufreq_driver->get(cpu); 2342bd0fa9bbSViresh Kumar if (WARN_ON(!new_policy.cur)) { 2343bd0fa9bbSViresh Kumar ret = -EIO; 2344fefa8ff8SAaron Plattner goto unlock; 2345bd0fa9bbSViresh Kumar } 2346bd0fa9bbSViresh Kumar 23473a3e9e06SViresh Kumar if (!policy->cur) { 2348e837f9b5SJoe Perches pr_debug("Driver did not initialize current freq\n"); 23493a3e9e06SViresh Kumar policy->cur = new_policy.cur; 2350a85f7bd3SThomas Renninger } else { 23519c0ebcf7SViresh Kumar if (policy->cur != new_policy.cur && has_target()) 2352a1e1dc41SViresh Kumar cpufreq_out_of_sync(policy, new_policy.cur); 23530961dd0dSThomas Renninger } 2354a85f7bd3SThomas Renninger } 23550961dd0dSThomas Renninger 2356037ce839SViresh Kumar ret = cpufreq_set_policy(policy, &new_policy); 23571da177e4SLinus Torvalds 2358fefa8ff8SAaron Plattner unlock: 2359ad7722daSviresh kumar up_write(&policy->rwsem); 23605a01f2e8SVenkatesh Pallipadi 23613a3e9e06SViresh Kumar cpufreq_cpu_put(policy); 23621da177e4SLinus Torvalds return ret; 23631da177e4SLinus Torvalds } 23641da177e4SLinus Torvalds EXPORT_SYMBOL(cpufreq_update_policy); 23651da177e4SLinus Torvalds 23662760984fSPaul Gortmaker static int cpufreq_cpu_callback(struct notifier_block *nfb, 2367c32b6b8eSAshok Raj unsigned long action, void *hcpu) 2368c32b6b8eSAshok Raj { 2369c32b6b8eSAshok Raj unsigned int cpu = (unsigned long)hcpu; 23708a25a2fdSKay Sievers struct device *dev; 2371c32b6b8eSAshok Raj 23728a25a2fdSKay Sievers dev = get_cpu_device(cpu); 23738a25a2fdSKay Sievers if (dev) { 23745302c3fbSSrivatsa S. Bhat switch (action & ~CPU_TASKS_FROZEN) { 2375c32b6b8eSAshok Raj case CPU_ONLINE: 237623faf0b7SViresh Kumar cpufreq_add_dev(dev, NULL); 2377c32b6b8eSAshok Raj break; 23785302c3fbSSrivatsa S. Bhat 2379c32b6b8eSAshok Raj case CPU_DOWN_PREPARE: 238096bbbe4aSViresh Kumar __cpufreq_remove_dev_prepare(dev, NULL); 23811aee40acSSrivatsa S. Bhat break; 23821aee40acSSrivatsa S. Bhat 23831aee40acSSrivatsa S. Bhat case CPU_POST_DEAD: 238496bbbe4aSViresh Kumar __cpufreq_remove_dev_finish(dev, NULL); 2385c32b6b8eSAshok Raj break; 23865302c3fbSSrivatsa S. Bhat 23875a01f2e8SVenkatesh Pallipadi case CPU_DOWN_FAILED: 238823faf0b7SViresh Kumar cpufreq_add_dev(dev, NULL); 2389c32b6b8eSAshok Raj break; 2390c32b6b8eSAshok Raj } 2391c32b6b8eSAshok Raj } 2392c32b6b8eSAshok Raj return NOTIFY_OK; 2393c32b6b8eSAshok Raj } 2394c32b6b8eSAshok Raj 23959c36f746SNeal Buckendahl static struct notifier_block __refdata cpufreq_cpu_notifier = { 2396c32b6b8eSAshok Raj .notifier_call = cpufreq_cpu_callback, 2397c32b6b8eSAshok Raj }; 23981da177e4SLinus Torvalds 23991da177e4SLinus Torvalds /********************************************************************* 24006f19efc0SLukasz Majewski * BOOST * 24016f19efc0SLukasz Majewski *********************************************************************/ 24026f19efc0SLukasz Majewski static int cpufreq_boost_set_sw(int state) 24036f19efc0SLukasz Majewski { 24046f19efc0SLukasz Majewski struct cpufreq_frequency_table *freq_table; 24056f19efc0SLukasz Majewski struct cpufreq_policy *policy; 24066f19efc0SLukasz Majewski int ret = -EINVAL; 24076f19efc0SLukasz Majewski 2408f963735aSViresh Kumar for_each_active_policy(policy) { 24096f19efc0SLukasz Majewski freq_table = cpufreq_frequency_get_table(policy->cpu); 24106f19efc0SLukasz Majewski if (freq_table) { 24116f19efc0SLukasz Majewski ret = cpufreq_frequency_table_cpuinfo(policy, 24126f19efc0SLukasz Majewski freq_table); 24136f19efc0SLukasz Majewski if (ret) { 24146f19efc0SLukasz Majewski pr_err("%s: Policy frequency update failed\n", 24156f19efc0SLukasz Majewski __func__); 24166f19efc0SLukasz Majewski break; 24176f19efc0SLukasz Majewski } 24186f19efc0SLukasz Majewski policy->user_policy.max = policy->max; 24196f19efc0SLukasz Majewski __cpufreq_governor(policy, CPUFREQ_GOV_LIMITS); 24206f19efc0SLukasz Majewski } 24216f19efc0SLukasz Majewski } 24226f19efc0SLukasz Majewski 24236f19efc0SLukasz Majewski return ret; 24246f19efc0SLukasz Majewski } 24256f19efc0SLukasz Majewski 24266f19efc0SLukasz Majewski int cpufreq_boost_trigger_state(int state) 24276f19efc0SLukasz Majewski { 24286f19efc0SLukasz Majewski unsigned long flags; 24296f19efc0SLukasz Majewski int ret = 0; 24306f19efc0SLukasz Majewski 24316f19efc0SLukasz Majewski if (cpufreq_driver->boost_enabled == state) 24326f19efc0SLukasz Majewski return 0; 24336f19efc0SLukasz Majewski 24346f19efc0SLukasz Majewski write_lock_irqsave(&cpufreq_driver_lock, flags); 24356f19efc0SLukasz Majewski cpufreq_driver->boost_enabled = state; 24366f19efc0SLukasz Majewski write_unlock_irqrestore(&cpufreq_driver_lock, flags); 24376f19efc0SLukasz Majewski 24386f19efc0SLukasz Majewski ret = cpufreq_driver->set_boost(state); 24396f19efc0SLukasz Majewski if (ret) { 24406f19efc0SLukasz Majewski write_lock_irqsave(&cpufreq_driver_lock, flags); 24416f19efc0SLukasz Majewski cpufreq_driver->boost_enabled = !state; 24426f19efc0SLukasz Majewski write_unlock_irqrestore(&cpufreq_driver_lock, flags); 24436f19efc0SLukasz Majewski 2444e837f9b5SJoe Perches pr_err("%s: Cannot %s BOOST\n", 2445e837f9b5SJoe Perches __func__, state ? "enable" : "disable"); 24466f19efc0SLukasz Majewski } 24476f19efc0SLukasz Majewski 24486f19efc0SLukasz Majewski return ret; 24496f19efc0SLukasz Majewski } 24506f19efc0SLukasz Majewski 24516f19efc0SLukasz Majewski int cpufreq_boost_supported(void) 24526f19efc0SLukasz Majewski { 24536f19efc0SLukasz Majewski if (likely(cpufreq_driver)) 24546f19efc0SLukasz Majewski return cpufreq_driver->boost_supported; 24556f19efc0SLukasz Majewski 24566f19efc0SLukasz Majewski return 0; 24576f19efc0SLukasz Majewski } 24586f19efc0SLukasz Majewski EXPORT_SYMBOL_GPL(cpufreq_boost_supported); 24596f19efc0SLukasz Majewski 24606f19efc0SLukasz Majewski int cpufreq_boost_enabled(void) 24616f19efc0SLukasz Majewski { 24626f19efc0SLukasz Majewski return cpufreq_driver->boost_enabled; 24636f19efc0SLukasz Majewski } 24646f19efc0SLukasz Majewski EXPORT_SYMBOL_GPL(cpufreq_boost_enabled); 24656f19efc0SLukasz Majewski 24666f19efc0SLukasz Majewski /********************************************************************* 24671da177e4SLinus Torvalds * REGISTER / UNREGISTER CPUFREQ DRIVER * 24681da177e4SLinus Torvalds *********************************************************************/ 24691da177e4SLinus Torvalds 24701da177e4SLinus Torvalds /** 24711da177e4SLinus Torvalds * cpufreq_register_driver - register a CPU Frequency driver 24721da177e4SLinus Torvalds * @driver_data: A struct cpufreq_driver containing the values# 24731da177e4SLinus Torvalds * submitted by the CPU Frequency driver. 24741da177e4SLinus Torvalds * 24751da177e4SLinus Torvalds * Registers a CPU Frequency driver to this core code. This code 24761da177e4SLinus Torvalds * returns zero on success, -EBUSY when another driver got here first 24771da177e4SLinus Torvalds * (and isn't unregistered in the meantime). 24781da177e4SLinus Torvalds * 24791da177e4SLinus Torvalds */ 2480221dee28SLinus Torvalds int cpufreq_register_driver(struct cpufreq_driver *driver_data) 24811da177e4SLinus Torvalds { 24821da177e4SLinus Torvalds unsigned long flags; 24831da177e4SLinus Torvalds int ret; 24841da177e4SLinus Torvalds 2485a7b422cdSKonrad Rzeszutek Wilk if (cpufreq_disabled()) 2486a7b422cdSKonrad Rzeszutek Wilk return -ENODEV; 2487a7b422cdSKonrad Rzeszutek Wilk 24881da177e4SLinus Torvalds if (!driver_data || !driver_data->verify || !driver_data->init || 24899c0ebcf7SViresh Kumar !(driver_data->setpolicy || driver_data->target_index || 24909832235fSRafael J. Wysocki driver_data->target) || 24919832235fSRafael J. Wysocki (driver_data->setpolicy && (driver_data->target_index || 24921c03a2d0SViresh Kumar driver_data->target)) || 24931c03a2d0SViresh Kumar (!!driver_data->get_intermediate != !!driver_data->target_intermediate)) 24941da177e4SLinus Torvalds return -EINVAL; 24951da177e4SLinus Torvalds 24962d06d8c4SDominik Brodowski pr_debug("trying to register driver %s\n", driver_data->name); 24971da177e4SLinus Torvalds 24980d1857a1SNathan Zimmer write_lock_irqsave(&cpufreq_driver_lock, flags); 24991c3d85ddSRafael J. Wysocki if (cpufreq_driver) { 25000d1857a1SNathan Zimmer write_unlock_irqrestore(&cpufreq_driver_lock, flags); 25014dea5806SYinghai Lu return -EEXIST; 25021da177e4SLinus Torvalds } 25031c3d85ddSRafael J. Wysocki cpufreq_driver = driver_data; 25040d1857a1SNathan Zimmer write_unlock_irqrestore(&cpufreq_driver_lock, flags); 25051da177e4SLinus Torvalds 2506bc68b7dfSViresh Kumar if (driver_data->setpolicy) 2507bc68b7dfSViresh Kumar driver_data->flags |= CPUFREQ_CONST_LOOPS; 2508bc68b7dfSViresh Kumar 25096f19efc0SLukasz Majewski if (cpufreq_boost_supported()) { 25106f19efc0SLukasz Majewski /* 25116f19efc0SLukasz Majewski * Check if driver provides function to enable boost - 25126f19efc0SLukasz Majewski * if not, use cpufreq_boost_set_sw as default 25136f19efc0SLukasz Majewski */ 25146f19efc0SLukasz Majewski if (!cpufreq_driver->set_boost) 25156f19efc0SLukasz Majewski cpufreq_driver->set_boost = cpufreq_boost_set_sw; 25166f19efc0SLukasz Majewski 25176f19efc0SLukasz Majewski ret = cpufreq_sysfs_create_file(&boost.attr); 25186f19efc0SLukasz Majewski if (ret) { 25196f19efc0SLukasz Majewski pr_err("%s: cannot register global BOOST sysfs file\n", 25206f19efc0SLukasz Majewski __func__); 25216f19efc0SLukasz Majewski goto err_null_driver; 25226f19efc0SLukasz Majewski } 25236f19efc0SLukasz Majewski } 25246f19efc0SLukasz Majewski 25258a25a2fdSKay Sievers ret = subsys_interface_register(&cpufreq_interface); 25268f5bc2abSJiri Slaby if (ret) 25276f19efc0SLukasz Majewski goto err_boost_unreg; 25281da177e4SLinus Torvalds 2529ce1bcfe9SViresh Kumar if (!(cpufreq_driver->flags & CPUFREQ_STICKY) && 2530ce1bcfe9SViresh Kumar list_empty(&cpufreq_policy_list)) { 25311da177e4SLinus Torvalds /* if all ->init() calls failed, unregister */ 2532ce1bcfe9SViresh Kumar pr_debug("%s: No CPU initialized for driver %s\n", __func__, 2533e08f5f5bSGautham R Shenoy driver_data->name); 25348a25a2fdSKay Sievers goto err_if_unreg; 25351da177e4SLinus Torvalds } 25361da177e4SLinus Torvalds 253765edc68cSChandra Seetharaman register_hotcpu_notifier(&cpufreq_cpu_notifier); 25382d06d8c4SDominik Brodowski pr_debug("driver %s up and running\n", driver_data->name); 25391da177e4SLinus Torvalds 25408f5bc2abSJiri Slaby return 0; 25418a25a2fdSKay Sievers err_if_unreg: 25428a25a2fdSKay Sievers subsys_interface_unregister(&cpufreq_interface); 25436f19efc0SLukasz Majewski err_boost_unreg: 25446f19efc0SLukasz Majewski if (cpufreq_boost_supported()) 25456f19efc0SLukasz Majewski cpufreq_sysfs_remove_file(&boost.attr); 25468f5bc2abSJiri Slaby err_null_driver: 25470d1857a1SNathan Zimmer write_lock_irqsave(&cpufreq_driver_lock, flags); 25481c3d85ddSRafael J. Wysocki cpufreq_driver = NULL; 25490d1857a1SNathan Zimmer write_unlock_irqrestore(&cpufreq_driver_lock, flags); 25504d34a67dSDave Jones return ret; 25511da177e4SLinus Torvalds } 25521da177e4SLinus Torvalds EXPORT_SYMBOL_GPL(cpufreq_register_driver); 25531da177e4SLinus Torvalds 25541da177e4SLinus Torvalds /** 25551da177e4SLinus Torvalds * cpufreq_unregister_driver - unregister the current CPUFreq driver 25561da177e4SLinus Torvalds * 25571da177e4SLinus Torvalds * Unregister the current CPUFreq driver. Only call this if you have 25581da177e4SLinus Torvalds * the right to do so, i.e. if you have succeeded in initialising before! 25591da177e4SLinus Torvalds * Returns zero if successful, and -EINVAL if the cpufreq_driver is 25601da177e4SLinus Torvalds * currently not initialised. 25611da177e4SLinus Torvalds */ 2562221dee28SLinus Torvalds int cpufreq_unregister_driver(struct cpufreq_driver *driver) 25631da177e4SLinus Torvalds { 25641da177e4SLinus Torvalds unsigned long flags; 25651da177e4SLinus Torvalds 25661c3d85ddSRafael J. Wysocki if (!cpufreq_driver || (driver != cpufreq_driver)) 25671da177e4SLinus Torvalds return -EINVAL; 25681da177e4SLinus Torvalds 25692d06d8c4SDominik Brodowski pr_debug("unregistering driver %s\n", driver->name); 25701da177e4SLinus Torvalds 2571*454d3a25SSebastian Andrzej Siewior /* Protect against concurrent cpu hotplug */ 2572*454d3a25SSebastian Andrzej Siewior get_online_cpus(); 25738a25a2fdSKay Sievers subsys_interface_unregister(&cpufreq_interface); 25746f19efc0SLukasz Majewski if (cpufreq_boost_supported()) 25756f19efc0SLukasz Majewski cpufreq_sysfs_remove_file(&boost.attr); 25766f19efc0SLukasz Majewski 257765edc68cSChandra Seetharaman unregister_hotcpu_notifier(&cpufreq_cpu_notifier); 25781da177e4SLinus Torvalds 25790d1857a1SNathan Zimmer write_lock_irqsave(&cpufreq_driver_lock, flags); 25806eed9404SViresh Kumar 25811c3d85ddSRafael J. Wysocki cpufreq_driver = NULL; 25826eed9404SViresh Kumar 25830d1857a1SNathan Zimmer write_unlock_irqrestore(&cpufreq_driver_lock, flags); 2584*454d3a25SSebastian Andrzej Siewior put_online_cpus(); 25851da177e4SLinus Torvalds 25861da177e4SLinus Torvalds return 0; 25871da177e4SLinus Torvalds } 25881da177e4SLinus Torvalds EXPORT_SYMBOL_GPL(cpufreq_unregister_driver); 25895a01f2e8SVenkatesh Pallipadi 259090de2a4aSDoug Anderson /* 259190de2a4aSDoug Anderson * Stop cpufreq at shutdown to make sure it isn't holding any locks 259290de2a4aSDoug Anderson * or mutexes when secondary CPUs are halted. 259390de2a4aSDoug Anderson */ 259490de2a4aSDoug Anderson static struct syscore_ops cpufreq_syscore_ops = { 259590de2a4aSDoug Anderson .shutdown = cpufreq_suspend, 259690de2a4aSDoug Anderson }; 259790de2a4aSDoug Anderson 25985a01f2e8SVenkatesh Pallipadi static int __init cpufreq_core_init(void) 25995a01f2e8SVenkatesh Pallipadi { 2600a7b422cdSKonrad Rzeszutek Wilk if (cpufreq_disabled()) 2601a7b422cdSKonrad Rzeszutek Wilk return -ENODEV; 2602a7b422cdSKonrad Rzeszutek Wilk 26032361be23SViresh Kumar cpufreq_global_kobject = kobject_create(); 26048aa84ad8SThomas Renninger BUG_ON(!cpufreq_global_kobject); 26058aa84ad8SThomas Renninger 260690de2a4aSDoug Anderson register_syscore_ops(&cpufreq_syscore_ops); 260790de2a4aSDoug Anderson 26085a01f2e8SVenkatesh Pallipadi return 0; 26095a01f2e8SVenkatesh Pallipadi } 26105a01f2e8SVenkatesh Pallipadi core_initcall(cpufreq_core_init); 2611