11da177e4SLinus Torvalds /* 21da177e4SLinus Torvalds * linux/drivers/cpufreq/cpufreq.c 31da177e4SLinus Torvalds * 41da177e4SLinus Torvalds * Copyright (C) 2001 Russell King 51da177e4SLinus Torvalds * (C) 2002 - 2003 Dominik Brodowski <linux@brodo.de> 6bb176f7dSViresh Kumar * (C) 2013 Viresh Kumar <viresh.kumar@linaro.org> 71da177e4SLinus Torvalds * 8c32b6b8eSAshok Raj * Oct 2005 - Ashok Raj <ashok.raj@intel.com> 9c32b6b8eSAshok Raj * Added handling for CPU hotplug 108ff69732SDave Jones * Feb 2006 - Jacob Shin <jacob.shin@amd.com> 118ff69732SDave Jones * Fix handling for CPU hotplug -- affected CPUs 12c32b6b8eSAshok Raj * 131da177e4SLinus Torvalds * This program is free software; you can redistribute it and/or modify 141da177e4SLinus Torvalds * it under the terms of the GNU General Public License version 2 as 151da177e4SLinus Torvalds * published by the Free Software Foundation. 161da177e4SLinus Torvalds */ 171da177e4SLinus Torvalds 18db701151SViresh Kumar #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 19db701151SViresh Kumar 205ff0a268SViresh Kumar #include <linux/cpu.h> 211da177e4SLinus Torvalds #include <linux/cpufreq.h> 221da177e4SLinus Torvalds #include <linux/delay.h> 231da177e4SLinus Torvalds #include <linux/device.h> 245ff0a268SViresh Kumar #include <linux/init.h> 255ff0a268SViresh Kumar #include <linux/kernel_stat.h> 265ff0a268SViresh Kumar #include <linux/module.h> 273fc54d37Sakpm@osdl.org #include <linux/mutex.h> 285ff0a268SViresh Kumar #include <linux/slab.h> 292f0aea93SViresh Kumar #include <linux/suspend.h> 3090de2a4aSDoug Anderson #include <linux/syscore_ops.h> 315ff0a268SViresh Kumar #include <linux/tick.h> 326f4f2723SThomas Renninger #include <trace/events/power.h> 336f4f2723SThomas Renninger 34b4f0676fSViresh Kumar static LIST_HEAD(cpufreq_policy_list); 35f963735aSViresh Kumar 36f963735aSViresh Kumar static inline bool policy_is_inactive(struct cpufreq_policy *policy) 37f963735aSViresh Kumar { 38f963735aSViresh Kumar return cpumask_empty(policy->cpus); 39f963735aSViresh Kumar } 40f963735aSViresh Kumar 41f963735aSViresh Kumar static bool suitable_policy(struct cpufreq_policy *policy, bool active) 42f963735aSViresh Kumar { 43f963735aSViresh Kumar return active == !policy_is_inactive(policy); 44f963735aSViresh Kumar } 45f963735aSViresh Kumar 46f963735aSViresh Kumar /* Finds Next Acive/Inactive policy */ 47f963735aSViresh Kumar static struct cpufreq_policy *next_policy(struct cpufreq_policy *policy, 48f963735aSViresh Kumar bool active) 49f963735aSViresh Kumar { 50f963735aSViresh Kumar do { 51f963735aSViresh Kumar policy = list_next_entry(policy, policy_list); 52f963735aSViresh Kumar 53f963735aSViresh Kumar /* No more policies in the list */ 54f963735aSViresh Kumar if (&policy->policy_list == &cpufreq_policy_list) 55f963735aSViresh Kumar return NULL; 56f963735aSViresh Kumar } while (!suitable_policy(policy, active)); 57f963735aSViresh Kumar 58f963735aSViresh Kumar return policy; 59f963735aSViresh Kumar } 60f963735aSViresh Kumar 61f963735aSViresh Kumar static struct cpufreq_policy *first_policy(bool active) 62f963735aSViresh Kumar { 63f963735aSViresh Kumar struct cpufreq_policy *policy; 64f963735aSViresh Kumar 65f963735aSViresh Kumar /* No policies in the list */ 66f963735aSViresh Kumar if (list_empty(&cpufreq_policy_list)) 67f963735aSViresh Kumar return NULL; 68f963735aSViresh Kumar 69f963735aSViresh Kumar policy = list_first_entry(&cpufreq_policy_list, typeof(*policy), 70f963735aSViresh Kumar policy_list); 71f963735aSViresh Kumar 72f963735aSViresh Kumar if (!suitable_policy(policy, active)) 73f963735aSViresh Kumar policy = next_policy(policy, active); 74f963735aSViresh Kumar 75f963735aSViresh Kumar return policy; 76f963735aSViresh Kumar } 77f963735aSViresh Kumar 78f963735aSViresh Kumar /* Macros to iterate over CPU policies */ 79f963735aSViresh Kumar #define for_each_suitable_policy(__policy, __active) \ 80f963735aSViresh Kumar for (__policy = first_policy(__active); \ 81f963735aSViresh Kumar __policy; \ 82f963735aSViresh Kumar __policy = next_policy(__policy, __active)) 83f963735aSViresh Kumar 84f963735aSViresh Kumar #define for_each_active_policy(__policy) \ 85f963735aSViresh Kumar for_each_suitable_policy(__policy, true) 86f963735aSViresh Kumar #define for_each_inactive_policy(__policy) \ 87f963735aSViresh Kumar for_each_suitable_policy(__policy, false) 88f963735aSViresh Kumar 89b4f0676fSViresh Kumar #define for_each_policy(__policy) \ 90b4f0676fSViresh Kumar list_for_each_entry(__policy, &cpufreq_policy_list, policy_list) 91b4f0676fSViresh Kumar 92f7b27061SViresh Kumar /* Iterate over governors */ 93f7b27061SViresh Kumar static LIST_HEAD(cpufreq_governor_list); 94f7b27061SViresh Kumar #define for_each_governor(__governor) \ 95f7b27061SViresh Kumar list_for_each_entry(__governor, &cpufreq_governor_list, governor_list) 96f7b27061SViresh Kumar 971da177e4SLinus Torvalds /** 98cd878479SDave Jones * The "cpufreq driver" - the arch- or hardware-dependent low 991da177e4SLinus Torvalds * level driver of CPUFreq support, and its spinlock. This lock 1001da177e4SLinus Torvalds * also protects the cpufreq_cpu_data array. 1011da177e4SLinus Torvalds */ 1021c3d85ddSRafael J. Wysocki static struct cpufreq_driver *cpufreq_driver; 1037a6aedfaSMike Travis static DEFINE_PER_CPU(struct cpufreq_policy *, cpufreq_cpu_data); 104bb176f7dSViresh Kumar static DEFINE_RWLOCK(cpufreq_driver_lock); 1056f1e4efdSJane Li DEFINE_MUTEX(cpufreq_governor_lock); 106bb176f7dSViresh Kumar 1072f0aea93SViresh Kumar /* Flag to suspend/resume CPUFreq governors */ 1082f0aea93SViresh Kumar static bool cpufreq_suspended; 1091da177e4SLinus Torvalds 1109c0ebcf7SViresh Kumar static inline bool has_target(void) 1119c0ebcf7SViresh Kumar { 1129c0ebcf7SViresh Kumar return cpufreq_driver->target_index || cpufreq_driver->target; 1139c0ebcf7SViresh Kumar } 1149c0ebcf7SViresh Kumar 1151da177e4SLinus Torvalds /* internal prototypes */ 11629464f28SDave Jones static int __cpufreq_governor(struct cpufreq_policy *policy, 11729464f28SDave Jones unsigned int event); 118d92d50a4SViresh Kumar static unsigned int __cpufreq_get(struct cpufreq_policy *policy); 11965f27f38SDavid Howells static void handle_update(struct work_struct *work); 1201da177e4SLinus Torvalds 1211da177e4SLinus Torvalds /** 1221da177e4SLinus Torvalds * Two notifier lists: the "policy" list is involved in the 1231da177e4SLinus Torvalds * validation process for a new CPU frequency policy; the 1241da177e4SLinus Torvalds * "transition" list for kernel code that needs to handle 1251da177e4SLinus Torvalds * changes to devices when the CPU clock speed changes. 1261da177e4SLinus Torvalds * The mutex locks both lists. 1271da177e4SLinus Torvalds */ 128e041c683SAlan Stern static BLOCKING_NOTIFIER_HEAD(cpufreq_policy_notifier_list); 129b4dfdbb3SAlan Stern static struct srcu_notifier_head cpufreq_transition_notifier_list; 1301da177e4SLinus Torvalds 13174212ca4SCesar Eduardo Barros static bool init_cpufreq_transition_notifier_list_called; 132b4dfdbb3SAlan Stern static int __init init_cpufreq_transition_notifier_list(void) 133b4dfdbb3SAlan Stern { 134b4dfdbb3SAlan Stern srcu_init_notifier_head(&cpufreq_transition_notifier_list); 13574212ca4SCesar Eduardo Barros init_cpufreq_transition_notifier_list_called = true; 136b4dfdbb3SAlan Stern return 0; 137b4dfdbb3SAlan Stern } 138b3438f82SLinus Torvalds pure_initcall(init_cpufreq_transition_notifier_list); 1391da177e4SLinus Torvalds 140a7b422cdSKonrad Rzeszutek Wilk static int off __read_mostly; 141da584455SViresh Kumar static int cpufreq_disabled(void) 142a7b422cdSKonrad Rzeszutek Wilk { 143a7b422cdSKonrad Rzeszutek Wilk return off; 144a7b422cdSKonrad Rzeszutek Wilk } 145a7b422cdSKonrad Rzeszutek Wilk void disable_cpufreq(void) 146a7b422cdSKonrad Rzeszutek Wilk { 147a7b422cdSKonrad Rzeszutek Wilk off = 1; 148a7b422cdSKonrad Rzeszutek Wilk } 1493fc54d37Sakpm@osdl.org static DEFINE_MUTEX(cpufreq_governor_mutex); 1501da177e4SLinus Torvalds 1514d5dcc42SViresh Kumar bool have_governor_per_policy(void) 1524d5dcc42SViresh Kumar { 1530b981e70SViresh Kumar return !!(cpufreq_driver->flags & CPUFREQ_HAVE_GOVERNOR_PER_POLICY); 1544d5dcc42SViresh Kumar } 1553f869d6dSViresh Kumar EXPORT_SYMBOL_GPL(have_governor_per_policy); 1564d5dcc42SViresh Kumar 157944e9a03SViresh Kumar struct kobject *get_governor_parent_kobj(struct cpufreq_policy *policy) 158944e9a03SViresh Kumar { 159944e9a03SViresh Kumar if (have_governor_per_policy()) 160944e9a03SViresh Kumar return &policy->kobj; 161944e9a03SViresh Kumar else 162944e9a03SViresh Kumar return cpufreq_global_kobject; 163944e9a03SViresh Kumar } 164944e9a03SViresh Kumar EXPORT_SYMBOL_GPL(get_governor_parent_kobj); 165944e9a03SViresh Kumar 1665a31d594SViresh Kumar struct cpufreq_frequency_table *cpufreq_frequency_get_table(unsigned int cpu) 1675a31d594SViresh Kumar { 1685a31d594SViresh Kumar struct cpufreq_policy *policy = per_cpu(cpufreq_cpu_data, cpu); 1695a31d594SViresh Kumar 1705a31d594SViresh Kumar return policy && !policy_is_inactive(policy) ? 1715a31d594SViresh Kumar policy->freq_table : NULL; 1725a31d594SViresh Kumar } 1735a31d594SViresh Kumar EXPORT_SYMBOL_GPL(cpufreq_frequency_get_table); 1745a31d594SViresh Kumar 17572a4ce34SViresh Kumar static inline u64 get_cpu_idle_time_jiffy(unsigned int cpu, u64 *wall) 17672a4ce34SViresh Kumar { 17772a4ce34SViresh Kumar u64 idle_time; 17872a4ce34SViresh Kumar u64 cur_wall_time; 17972a4ce34SViresh Kumar u64 busy_time; 18072a4ce34SViresh Kumar 18172a4ce34SViresh Kumar cur_wall_time = jiffies64_to_cputime64(get_jiffies_64()); 18272a4ce34SViresh Kumar 18372a4ce34SViresh Kumar busy_time = kcpustat_cpu(cpu).cpustat[CPUTIME_USER]; 18472a4ce34SViresh Kumar busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_SYSTEM]; 18572a4ce34SViresh Kumar busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_IRQ]; 18672a4ce34SViresh Kumar busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_SOFTIRQ]; 18772a4ce34SViresh Kumar busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_STEAL]; 18872a4ce34SViresh Kumar busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_NICE]; 18972a4ce34SViresh Kumar 19072a4ce34SViresh Kumar idle_time = cur_wall_time - busy_time; 19172a4ce34SViresh Kumar if (wall) 19272a4ce34SViresh Kumar *wall = cputime_to_usecs(cur_wall_time); 19372a4ce34SViresh Kumar 19472a4ce34SViresh Kumar return cputime_to_usecs(idle_time); 19572a4ce34SViresh Kumar } 19672a4ce34SViresh Kumar 19772a4ce34SViresh Kumar u64 get_cpu_idle_time(unsigned int cpu, u64 *wall, int io_busy) 19872a4ce34SViresh Kumar { 19972a4ce34SViresh Kumar u64 idle_time = get_cpu_idle_time_us(cpu, io_busy ? wall : NULL); 20072a4ce34SViresh Kumar 20172a4ce34SViresh Kumar if (idle_time == -1ULL) 20272a4ce34SViresh Kumar return get_cpu_idle_time_jiffy(cpu, wall); 20372a4ce34SViresh Kumar else if (!io_busy) 20472a4ce34SViresh Kumar idle_time += get_cpu_iowait_time_us(cpu, wall); 20572a4ce34SViresh Kumar 20672a4ce34SViresh Kumar return idle_time; 20772a4ce34SViresh Kumar } 20872a4ce34SViresh Kumar EXPORT_SYMBOL_GPL(get_cpu_idle_time); 20972a4ce34SViresh Kumar 21070e9e778SViresh Kumar /* 21170e9e778SViresh Kumar * This is a generic cpufreq init() routine which can be used by cpufreq 21270e9e778SViresh Kumar * drivers of SMP systems. It will do following: 21370e9e778SViresh Kumar * - validate & show freq table passed 21470e9e778SViresh Kumar * - set policies transition latency 21570e9e778SViresh Kumar * - policy->cpus with all possible CPUs 21670e9e778SViresh Kumar */ 21770e9e778SViresh Kumar int cpufreq_generic_init(struct cpufreq_policy *policy, 21870e9e778SViresh Kumar struct cpufreq_frequency_table *table, 21970e9e778SViresh Kumar unsigned int transition_latency) 22070e9e778SViresh Kumar { 22170e9e778SViresh Kumar int ret; 22270e9e778SViresh Kumar 22370e9e778SViresh Kumar ret = cpufreq_table_validate_and_show(policy, table); 22470e9e778SViresh Kumar if (ret) { 22570e9e778SViresh Kumar pr_err("%s: invalid frequency table: %d\n", __func__, ret); 22670e9e778SViresh Kumar return ret; 22770e9e778SViresh Kumar } 22870e9e778SViresh Kumar 22970e9e778SViresh Kumar policy->cpuinfo.transition_latency = transition_latency; 23070e9e778SViresh Kumar 23170e9e778SViresh Kumar /* 23258405af6SShailendra Verma * The driver only supports the SMP configuration where all processors 23370e9e778SViresh Kumar * share the clock and voltage and clock. 23470e9e778SViresh Kumar */ 23570e9e778SViresh Kumar cpumask_setall(policy->cpus); 23670e9e778SViresh Kumar 23770e9e778SViresh Kumar return 0; 23870e9e778SViresh Kumar } 23970e9e778SViresh Kumar EXPORT_SYMBOL_GPL(cpufreq_generic_init); 24070e9e778SViresh Kumar 241988bed09SViresh Kumar /* Only for cpufreq core internal use */ 242988bed09SViresh Kumar struct cpufreq_policy *cpufreq_cpu_get_raw(unsigned int cpu) 243652ed95dSViresh Kumar { 244652ed95dSViresh Kumar struct cpufreq_policy *policy = per_cpu(cpufreq_cpu_data, cpu); 245652ed95dSViresh Kumar 246988bed09SViresh Kumar return policy && cpumask_test_cpu(cpu, policy->cpus) ? policy : NULL; 247988bed09SViresh Kumar } 248988bed09SViresh Kumar 249988bed09SViresh Kumar unsigned int cpufreq_generic_get(unsigned int cpu) 250988bed09SViresh Kumar { 251988bed09SViresh Kumar struct cpufreq_policy *policy = cpufreq_cpu_get_raw(cpu); 252988bed09SViresh Kumar 253652ed95dSViresh Kumar if (!policy || IS_ERR(policy->clk)) { 254e837f9b5SJoe Perches pr_err("%s: No %s associated to cpu: %d\n", 255e837f9b5SJoe Perches __func__, policy ? "clk" : "policy", cpu); 256652ed95dSViresh Kumar return 0; 257652ed95dSViresh Kumar } 258652ed95dSViresh Kumar 259652ed95dSViresh Kumar return clk_get_rate(policy->clk) / 1000; 260652ed95dSViresh Kumar } 261652ed95dSViresh Kumar EXPORT_SYMBOL_GPL(cpufreq_generic_get); 262652ed95dSViresh Kumar 26350e9c852SViresh Kumar /** 26450e9c852SViresh Kumar * cpufreq_cpu_get: returns policy for a cpu and marks it busy. 26550e9c852SViresh Kumar * 26650e9c852SViresh Kumar * @cpu: cpu to find policy for. 26750e9c852SViresh Kumar * 26850e9c852SViresh Kumar * This returns policy for 'cpu', returns NULL if it doesn't exist. 26950e9c852SViresh Kumar * It also increments the kobject reference count to mark it busy and so would 27050e9c852SViresh Kumar * require a corresponding call to cpufreq_cpu_put() to decrement it back. 27150e9c852SViresh Kumar * If corresponding call cpufreq_cpu_put() isn't made, the policy wouldn't be 27250e9c852SViresh Kumar * freed as that depends on the kobj count. 27350e9c852SViresh Kumar * 27450e9c852SViresh Kumar * Return: A valid policy on success, otherwise NULL on failure. 27550e9c852SViresh Kumar */ 2766eed9404SViresh Kumar struct cpufreq_policy *cpufreq_cpu_get(unsigned int cpu) 2771da177e4SLinus Torvalds { 2786eed9404SViresh Kumar struct cpufreq_policy *policy = NULL; 2791da177e4SLinus Torvalds unsigned long flags; 2801da177e4SLinus Torvalds 2811b947c90SViresh Kumar if (WARN_ON(cpu >= nr_cpu_ids)) 2826eed9404SViresh Kumar return NULL; 2836eed9404SViresh Kumar 2841da177e4SLinus Torvalds /* get the cpufreq driver */ 2850d1857a1SNathan Zimmer read_lock_irqsave(&cpufreq_driver_lock, flags); 2861da177e4SLinus Torvalds 2876eed9404SViresh Kumar if (cpufreq_driver) { 2881da177e4SLinus Torvalds /* get the CPU */ 289988bed09SViresh Kumar policy = cpufreq_cpu_get_raw(cpu); 2906eed9404SViresh Kumar if (policy) 2916eed9404SViresh Kumar kobject_get(&policy->kobj); 2926eed9404SViresh Kumar } 2936eed9404SViresh Kumar 2946eed9404SViresh Kumar read_unlock_irqrestore(&cpufreq_driver_lock, flags); 2951da177e4SLinus Torvalds 2963a3e9e06SViresh Kumar return policy; 297a9144436SStephen Boyd } 2981da177e4SLinus Torvalds EXPORT_SYMBOL_GPL(cpufreq_cpu_get); 2991da177e4SLinus Torvalds 30050e9c852SViresh Kumar /** 30150e9c852SViresh Kumar * cpufreq_cpu_put: Decrements the usage count of a policy 30250e9c852SViresh Kumar * 30350e9c852SViresh Kumar * @policy: policy earlier returned by cpufreq_cpu_get(). 30450e9c852SViresh Kumar * 30550e9c852SViresh Kumar * This decrements the kobject reference count incremented earlier by calling 30650e9c852SViresh Kumar * cpufreq_cpu_get(). 30750e9c852SViresh Kumar */ 3083a3e9e06SViresh Kumar void cpufreq_cpu_put(struct cpufreq_policy *policy) 309a9144436SStephen Boyd { 3106eed9404SViresh Kumar kobject_put(&policy->kobj); 311a9144436SStephen Boyd } 3121da177e4SLinus Torvalds EXPORT_SYMBOL_GPL(cpufreq_cpu_put); 3131da177e4SLinus Torvalds 3141da177e4SLinus Torvalds /********************************************************************* 3151da177e4SLinus Torvalds * EXTERNALLY AFFECTING FREQUENCY CHANGES * 3161da177e4SLinus Torvalds *********************************************************************/ 3171da177e4SLinus Torvalds 3181da177e4SLinus Torvalds /** 3191da177e4SLinus Torvalds * adjust_jiffies - adjust the system "loops_per_jiffy" 3201da177e4SLinus Torvalds * 3211da177e4SLinus Torvalds * This function alters the system "loops_per_jiffy" for the clock 3221da177e4SLinus Torvalds * speed change. Note that loops_per_jiffy cannot be updated on SMP 3231da177e4SLinus Torvalds * systems as each CPU might be scaled differently. So, use the arch 3241da177e4SLinus Torvalds * per-CPU loops_per_jiffy value wherever possible. 3251da177e4SLinus Torvalds */ 32639c132eeSViresh Kumar static void adjust_jiffies(unsigned long val, struct cpufreq_freqs *ci) 32739c132eeSViresh Kumar { 3281da177e4SLinus Torvalds #ifndef CONFIG_SMP 3291da177e4SLinus Torvalds static unsigned long l_p_j_ref; 3301da177e4SLinus Torvalds static unsigned int l_p_j_ref_freq; 3311da177e4SLinus Torvalds 3321da177e4SLinus Torvalds if (ci->flags & CPUFREQ_CONST_LOOPS) 3331da177e4SLinus Torvalds return; 3341da177e4SLinus Torvalds 3351da177e4SLinus Torvalds if (!l_p_j_ref_freq) { 3361da177e4SLinus Torvalds l_p_j_ref = loops_per_jiffy; 3371da177e4SLinus Torvalds l_p_j_ref_freq = ci->old; 338e837f9b5SJoe Perches pr_debug("saving %lu as reference value for loops_per_jiffy; freq is %u kHz\n", 339e837f9b5SJoe Perches l_p_j_ref, l_p_j_ref_freq); 3401da177e4SLinus Torvalds } 3410b443eadSViresh Kumar if (val == CPUFREQ_POSTCHANGE && ci->old != ci->new) { 342e08f5f5bSGautham R Shenoy loops_per_jiffy = cpufreq_scale(l_p_j_ref, l_p_j_ref_freq, 343e08f5f5bSGautham R Shenoy ci->new); 344e837f9b5SJoe Perches pr_debug("scaling loops_per_jiffy to %lu for frequency %u kHz\n", 345e837f9b5SJoe Perches loops_per_jiffy, ci->new); 3461da177e4SLinus Torvalds } 3471da177e4SLinus Torvalds #endif 34839c132eeSViresh Kumar } 3491da177e4SLinus Torvalds 3500956df9cSViresh Kumar static void __cpufreq_notify_transition(struct cpufreq_policy *policy, 351b43a7ffbSViresh Kumar struct cpufreq_freqs *freqs, unsigned int state) 3521da177e4SLinus Torvalds { 3531da177e4SLinus Torvalds BUG_ON(irqs_disabled()); 3541da177e4SLinus Torvalds 355d5aaffa9SDirk Brandewie if (cpufreq_disabled()) 356d5aaffa9SDirk Brandewie return; 357d5aaffa9SDirk Brandewie 3581c3d85ddSRafael J. Wysocki freqs->flags = cpufreq_driver->flags; 3592d06d8c4SDominik Brodowski pr_debug("notification %u of frequency transition to %u kHz\n", 360e4472cb3SDave Jones state, freqs->new); 3611da177e4SLinus Torvalds 3621da177e4SLinus Torvalds switch (state) { 363e4472cb3SDave Jones 3641da177e4SLinus Torvalds case CPUFREQ_PRECHANGE: 365e4472cb3SDave Jones /* detect if the driver reported a value as "old frequency" 366e4472cb3SDave Jones * which is not equal to what the cpufreq core thinks is 367e4472cb3SDave Jones * "old frequency". 3681da177e4SLinus Torvalds */ 3691c3d85ddSRafael J. Wysocki if (!(cpufreq_driver->flags & CPUFREQ_CONST_LOOPS)) { 370e4472cb3SDave Jones if ((policy) && (policy->cpu == freqs->cpu) && 371e4472cb3SDave Jones (policy->cur) && (policy->cur != freqs->old)) { 372e837f9b5SJoe Perches pr_debug("Warning: CPU frequency is %u, cpufreq assumed %u kHz\n", 373e4472cb3SDave Jones freqs->old, policy->cur); 374e4472cb3SDave Jones freqs->old = policy->cur; 3751da177e4SLinus Torvalds } 3761da177e4SLinus Torvalds } 377b4dfdbb3SAlan Stern srcu_notifier_call_chain(&cpufreq_transition_notifier_list, 378e4472cb3SDave Jones CPUFREQ_PRECHANGE, freqs); 3791da177e4SLinus Torvalds adjust_jiffies(CPUFREQ_PRECHANGE, freqs); 3801da177e4SLinus Torvalds break; 381e4472cb3SDave Jones 3821da177e4SLinus Torvalds case CPUFREQ_POSTCHANGE: 3831da177e4SLinus Torvalds adjust_jiffies(CPUFREQ_POSTCHANGE, freqs); 384e837f9b5SJoe Perches pr_debug("FREQ: %lu - CPU: %lu\n", 385e837f9b5SJoe Perches (unsigned long)freqs->new, (unsigned long)freqs->cpu); 38625e41933SThomas Renninger trace_cpu_frequency(freqs->new, freqs->cpu); 387b4dfdbb3SAlan Stern srcu_notifier_call_chain(&cpufreq_transition_notifier_list, 388e4472cb3SDave Jones CPUFREQ_POSTCHANGE, freqs); 389e4472cb3SDave Jones if (likely(policy) && likely(policy->cpu == freqs->cpu)) 390e4472cb3SDave Jones policy->cur = freqs->new; 3911da177e4SLinus Torvalds break; 3921da177e4SLinus Torvalds } 3931da177e4SLinus Torvalds } 394bb176f7dSViresh Kumar 395b43a7ffbSViresh Kumar /** 396b43a7ffbSViresh Kumar * cpufreq_notify_transition - call notifier chain and adjust_jiffies 397b43a7ffbSViresh Kumar * on frequency transition. 398b43a7ffbSViresh Kumar * 399b43a7ffbSViresh Kumar * This function calls the transition notifiers and the "adjust_jiffies" 400b43a7ffbSViresh Kumar * function. It is called twice on all CPU frequency changes that have 401b43a7ffbSViresh Kumar * external effects. 402b43a7ffbSViresh Kumar */ 403236a9800SViresh Kumar static void cpufreq_notify_transition(struct cpufreq_policy *policy, 404b43a7ffbSViresh Kumar struct cpufreq_freqs *freqs, unsigned int state) 405b43a7ffbSViresh Kumar { 406b43a7ffbSViresh Kumar for_each_cpu(freqs->cpu, policy->cpus) 407b43a7ffbSViresh Kumar __cpufreq_notify_transition(policy, freqs, state); 408b43a7ffbSViresh Kumar } 4091da177e4SLinus Torvalds 410f7ba3b41SViresh Kumar /* Do post notifications when there are chances that transition has failed */ 411236a9800SViresh Kumar static void cpufreq_notify_post_transition(struct cpufreq_policy *policy, 412f7ba3b41SViresh Kumar struct cpufreq_freqs *freqs, int transition_failed) 413f7ba3b41SViresh Kumar { 414f7ba3b41SViresh Kumar cpufreq_notify_transition(policy, freqs, CPUFREQ_POSTCHANGE); 415f7ba3b41SViresh Kumar if (!transition_failed) 416f7ba3b41SViresh Kumar return; 417f7ba3b41SViresh Kumar 418f7ba3b41SViresh Kumar swap(freqs->old, freqs->new); 419f7ba3b41SViresh Kumar cpufreq_notify_transition(policy, freqs, CPUFREQ_PRECHANGE); 420f7ba3b41SViresh Kumar cpufreq_notify_transition(policy, freqs, CPUFREQ_POSTCHANGE); 421f7ba3b41SViresh Kumar } 422f7ba3b41SViresh Kumar 42312478cf0SSrivatsa S. Bhat void cpufreq_freq_transition_begin(struct cpufreq_policy *policy, 42412478cf0SSrivatsa S. Bhat struct cpufreq_freqs *freqs) 42512478cf0SSrivatsa S. Bhat { 426ca654dc3SSrivatsa S. Bhat 427ca654dc3SSrivatsa S. Bhat /* 428ca654dc3SSrivatsa S. Bhat * Catch double invocations of _begin() which lead to self-deadlock. 429ca654dc3SSrivatsa S. Bhat * ASYNC_NOTIFICATION drivers are left out because the cpufreq core 430ca654dc3SSrivatsa S. Bhat * doesn't invoke _begin() on their behalf, and hence the chances of 431ca654dc3SSrivatsa S. Bhat * double invocations are very low. Moreover, there are scenarios 432ca654dc3SSrivatsa S. Bhat * where these checks can emit false-positive warnings in these 433ca654dc3SSrivatsa S. Bhat * drivers; so we avoid that by skipping them altogether. 434ca654dc3SSrivatsa S. Bhat */ 435ca654dc3SSrivatsa S. Bhat WARN_ON(!(cpufreq_driver->flags & CPUFREQ_ASYNC_NOTIFICATION) 436ca654dc3SSrivatsa S. Bhat && current == policy->transition_task); 437ca654dc3SSrivatsa S. Bhat 43812478cf0SSrivatsa S. Bhat wait: 43912478cf0SSrivatsa S. Bhat wait_event(policy->transition_wait, !policy->transition_ongoing); 44012478cf0SSrivatsa S. Bhat 44112478cf0SSrivatsa S. Bhat spin_lock(&policy->transition_lock); 44212478cf0SSrivatsa S. Bhat 44312478cf0SSrivatsa S. Bhat if (unlikely(policy->transition_ongoing)) { 44412478cf0SSrivatsa S. Bhat spin_unlock(&policy->transition_lock); 44512478cf0SSrivatsa S. Bhat goto wait; 44612478cf0SSrivatsa S. Bhat } 44712478cf0SSrivatsa S. Bhat 44812478cf0SSrivatsa S. Bhat policy->transition_ongoing = true; 449ca654dc3SSrivatsa S. Bhat policy->transition_task = current; 45012478cf0SSrivatsa S. Bhat 45112478cf0SSrivatsa S. Bhat spin_unlock(&policy->transition_lock); 45212478cf0SSrivatsa S. Bhat 45312478cf0SSrivatsa S. Bhat cpufreq_notify_transition(policy, freqs, CPUFREQ_PRECHANGE); 45412478cf0SSrivatsa S. Bhat } 45512478cf0SSrivatsa S. Bhat EXPORT_SYMBOL_GPL(cpufreq_freq_transition_begin); 45612478cf0SSrivatsa S. Bhat 45712478cf0SSrivatsa S. Bhat void cpufreq_freq_transition_end(struct cpufreq_policy *policy, 45812478cf0SSrivatsa S. Bhat struct cpufreq_freqs *freqs, int transition_failed) 45912478cf0SSrivatsa S. Bhat { 46012478cf0SSrivatsa S. Bhat if (unlikely(WARN_ON(!policy->transition_ongoing))) 46112478cf0SSrivatsa S. Bhat return; 46212478cf0SSrivatsa S. Bhat 46312478cf0SSrivatsa S. Bhat cpufreq_notify_post_transition(policy, freqs, transition_failed); 46412478cf0SSrivatsa S. Bhat 46512478cf0SSrivatsa S. Bhat policy->transition_ongoing = false; 466ca654dc3SSrivatsa S. Bhat policy->transition_task = NULL; 46712478cf0SSrivatsa S. Bhat 46812478cf0SSrivatsa S. Bhat wake_up(&policy->transition_wait); 46912478cf0SSrivatsa S. Bhat } 47012478cf0SSrivatsa S. Bhat EXPORT_SYMBOL_GPL(cpufreq_freq_transition_end); 47112478cf0SSrivatsa S. Bhat 4721da177e4SLinus Torvalds 4731da177e4SLinus Torvalds /********************************************************************* 4741da177e4SLinus Torvalds * SYSFS INTERFACE * 4751da177e4SLinus Torvalds *********************************************************************/ 4768a5c74a1SRashika Kheria static ssize_t show_boost(struct kobject *kobj, 4776f19efc0SLukasz Majewski struct attribute *attr, char *buf) 4786f19efc0SLukasz Majewski { 4796f19efc0SLukasz Majewski return sprintf(buf, "%d\n", cpufreq_driver->boost_enabled); 4806f19efc0SLukasz Majewski } 4816f19efc0SLukasz Majewski 4826f19efc0SLukasz Majewski static ssize_t store_boost(struct kobject *kobj, struct attribute *attr, 4836f19efc0SLukasz Majewski const char *buf, size_t count) 4846f19efc0SLukasz Majewski { 4856f19efc0SLukasz Majewski int ret, enable; 4866f19efc0SLukasz Majewski 4876f19efc0SLukasz Majewski ret = sscanf(buf, "%d", &enable); 4886f19efc0SLukasz Majewski if (ret != 1 || enable < 0 || enable > 1) 4896f19efc0SLukasz Majewski return -EINVAL; 4906f19efc0SLukasz Majewski 4916f19efc0SLukasz Majewski if (cpufreq_boost_trigger_state(enable)) { 492e837f9b5SJoe Perches pr_err("%s: Cannot %s BOOST!\n", 493e837f9b5SJoe Perches __func__, enable ? "enable" : "disable"); 4946f19efc0SLukasz Majewski return -EINVAL; 4956f19efc0SLukasz Majewski } 4966f19efc0SLukasz Majewski 497e837f9b5SJoe Perches pr_debug("%s: cpufreq BOOST %s\n", 498e837f9b5SJoe Perches __func__, enable ? "enabled" : "disabled"); 4996f19efc0SLukasz Majewski 5006f19efc0SLukasz Majewski return count; 5016f19efc0SLukasz Majewski } 5026f19efc0SLukasz Majewski define_one_global_rw(boost); 5031da177e4SLinus Torvalds 50442f91fa1SViresh Kumar static struct cpufreq_governor *find_governor(const char *str_governor) 5053bcb09a3SJeremy Fitzhardinge { 5063bcb09a3SJeremy Fitzhardinge struct cpufreq_governor *t; 5073bcb09a3SJeremy Fitzhardinge 508f7b27061SViresh Kumar for_each_governor(t) 5097c4f4539SRasmus Villemoes if (!strncasecmp(str_governor, t->name, CPUFREQ_NAME_LEN)) 5103bcb09a3SJeremy Fitzhardinge return t; 5113bcb09a3SJeremy Fitzhardinge 5123bcb09a3SJeremy Fitzhardinge return NULL; 5133bcb09a3SJeremy Fitzhardinge } 5143bcb09a3SJeremy Fitzhardinge 5151da177e4SLinus Torvalds /** 5161da177e4SLinus Torvalds * cpufreq_parse_governor - parse a governor string 5171da177e4SLinus Torvalds */ 5181da177e4SLinus Torvalds static int cpufreq_parse_governor(char *str_governor, unsigned int *policy, 5191da177e4SLinus Torvalds struct cpufreq_governor **governor) 5201da177e4SLinus Torvalds { 5213bcb09a3SJeremy Fitzhardinge int err = -EINVAL; 5223bcb09a3SJeremy Fitzhardinge 5231c3d85ddSRafael J. Wysocki if (!cpufreq_driver) 5243bcb09a3SJeremy Fitzhardinge goto out; 5253bcb09a3SJeremy Fitzhardinge 5261c3d85ddSRafael J. Wysocki if (cpufreq_driver->setpolicy) { 5277c4f4539SRasmus Villemoes if (!strncasecmp(str_governor, "performance", CPUFREQ_NAME_LEN)) { 5281da177e4SLinus Torvalds *policy = CPUFREQ_POLICY_PERFORMANCE; 5293bcb09a3SJeremy Fitzhardinge err = 0; 5307c4f4539SRasmus Villemoes } else if (!strncasecmp(str_governor, "powersave", 531e08f5f5bSGautham R Shenoy CPUFREQ_NAME_LEN)) { 5321da177e4SLinus Torvalds *policy = CPUFREQ_POLICY_POWERSAVE; 5333bcb09a3SJeremy Fitzhardinge err = 0; 5341da177e4SLinus Torvalds } 5352e1cc3a5SViresh Kumar } else { 5361da177e4SLinus Torvalds struct cpufreq_governor *t; 5373bcb09a3SJeremy Fitzhardinge 5383fc54d37Sakpm@osdl.org mutex_lock(&cpufreq_governor_mutex); 5393bcb09a3SJeremy Fitzhardinge 54042f91fa1SViresh Kumar t = find_governor(str_governor); 5413bcb09a3SJeremy Fitzhardinge 542ea714970SJeremy Fitzhardinge if (t == NULL) { 543ea714970SJeremy Fitzhardinge int ret; 544ea714970SJeremy Fitzhardinge 545ea714970SJeremy Fitzhardinge mutex_unlock(&cpufreq_governor_mutex); 5461a8e1463SKees Cook ret = request_module("cpufreq_%s", str_governor); 547ea714970SJeremy Fitzhardinge mutex_lock(&cpufreq_governor_mutex); 548ea714970SJeremy Fitzhardinge 549ea714970SJeremy Fitzhardinge if (ret == 0) 55042f91fa1SViresh Kumar t = find_governor(str_governor); 551ea714970SJeremy Fitzhardinge } 552ea714970SJeremy Fitzhardinge 5533bcb09a3SJeremy Fitzhardinge if (t != NULL) { 5541da177e4SLinus Torvalds *governor = t; 5553bcb09a3SJeremy Fitzhardinge err = 0; 5561da177e4SLinus Torvalds } 5573bcb09a3SJeremy Fitzhardinge 5583bcb09a3SJeremy Fitzhardinge mutex_unlock(&cpufreq_governor_mutex); 5591da177e4SLinus Torvalds } 5601da177e4SLinus Torvalds out: 5613bcb09a3SJeremy Fitzhardinge return err; 5621da177e4SLinus Torvalds } 5631da177e4SLinus Torvalds 5641da177e4SLinus Torvalds /** 565e08f5f5bSGautham R Shenoy * cpufreq_per_cpu_attr_read() / show_##file_name() - 566e08f5f5bSGautham R Shenoy * print out cpufreq information 5671da177e4SLinus Torvalds * 5681da177e4SLinus Torvalds * Write out information from cpufreq_driver->policy[cpu]; object must be 5691da177e4SLinus Torvalds * "unsigned int". 5701da177e4SLinus Torvalds */ 5711da177e4SLinus Torvalds 5721da177e4SLinus Torvalds #define show_one(file_name, object) \ 5731da177e4SLinus Torvalds static ssize_t show_##file_name \ 5741da177e4SLinus Torvalds (struct cpufreq_policy *policy, char *buf) \ 5751da177e4SLinus Torvalds { \ 5761da177e4SLinus Torvalds return sprintf(buf, "%u\n", policy->object); \ 5771da177e4SLinus Torvalds } 5781da177e4SLinus Torvalds 5791da177e4SLinus Torvalds show_one(cpuinfo_min_freq, cpuinfo.min_freq); 5801da177e4SLinus Torvalds show_one(cpuinfo_max_freq, cpuinfo.max_freq); 581ed129784SThomas Renninger show_one(cpuinfo_transition_latency, cpuinfo.transition_latency); 5821da177e4SLinus Torvalds show_one(scaling_min_freq, min); 5831da177e4SLinus Torvalds show_one(scaling_max_freq, max); 584c034b02eSDirk Brandewie 58509347b29SViresh Kumar static ssize_t show_scaling_cur_freq(struct cpufreq_policy *policy, char *buf) 586c034b02eSDirk Brandewie { 587c034b02eSDirk Brandewie ssize_t ret; 588c034b02eSDirk Brandewie 589c034b02eSDirk Brandewie if (cpufreq_driver && cpufreq_driver->setpolicy && cpufreq_driver->get) 590c034b02eSDirk Brandewie ret = sprintf(buf, "%u\n", cpufreq_driver->get(policy->cpu)); 591c034b02eSDirk Brandewie else 592c034b02eSDirk Brandewie ret = sprintf(buf, "%u\n", policy->cur); 593c034b02eSDirk Brandewie return ret; 594c034b02eSDirk Brandewie } 5951da177e4SLinus Torvalds 596037ce839SViresh Kumar static int cpufreq_set_policy(struct cpufreq_policy *policy, 5973a3e9e06SViresh Kumar struct cpufreq_policy *new_policy); 5987970e08bSThomas Renninger 5991da177e4SLinus Torvalds /** 6001da177e4SLinus Torvalds * cpufreq_per_cpu_attr_write() / store_##file_name() - sysfs write access 6011da177e4SLinus Torvalds */ 6021da177e4SLinus Torvalds #define store_one(file_name, object) \ 6031da177e4SLinus Torvalds static ssize_t store_##file_name \ 6041da177e4SLinus Torvalds (struct cpufreq_policy *policy, const char *buf, size_t count) \ 6051da177e4SLinus Torvalds { \ 606619c144cSVince Hsu int ret, temp; \ 6071da177e4SLinus Torvalds struct cpufreq_policy new_policy; \ 6081da177e4SLinus Torvalds \ 6091da177e4SLinus Torvalds ret = cpufreq_get_policy(&new_policy, policy->cpu); \ 6101da177e4SLinus Torvalds if (ret) \ 6111da177e4SLinus Torvalds return -EINVAL; \ 6121da177e4SLinus Torvalds \ 6131da177e4SLinus Torvalds ret = sscanf(buf, "%u", &new_policy.object); \ 6141da177e4SLinus Torvalds if (ret != 1) \ 6151da177e4SLinus Torvalds return -EINVAL; \ 6161da177e4SLinus Torvalds \ 617619c144cSVince Hsu temp = new_policy.object; \ 618037ce839SViresh Kumar ret = cpufreq_set_policy(policy, &new_policy); \ 619619c144cSVince Hsu if (!ret) \ 620619c144cSVince Hsu policy->user_policy.object = temp; \ 6211da177e4SLinus Torvalds \ 6221da177e4SLinus Torvalds return ret ? ret : count; \ 6231da177e4SLinus Torvalds } 6241da177e4SLinus Torvalds 6251da177e4SLinus Torvalds store_one(scaling_min_freq, min); 6261da177e4SLinus Torvalds store_one(scaling_max_freq, max); 6271da177e4SLinus Torvalds 6281da177e4SLinus Torvalds /** 6291da177e4SLinus Torvalds * show_cpuinfo_cur_freq - current CPU frequency as detected by hardware 6301da177e4SLinus Torvalds */ 631e08f5f5bSGautham R Shenoy static ssize_t show_cpuinfo_cur_freq(struct cpufreq_policy *policy, 632e08f5f5bSGautham R Shenoy char *buf) 6331da177e4SLinus Torvalds { 634d92d50a4SViresh Kumar unsigned int cur_freq = __cpufreq_get(policy); 6351da177e4SLinus Torvalds if (!cur_freq) 6361da177e4SLinus Torvalds return sprintf(buf, "<unknown>"); 6371da177e4SLinus Torvalds return sprintf(buf, "%u\n", cur_freq); 6381da177e4SLinus Torvalds } 6391da177e4SLinus Torvalds 6401da177e4SLinus Torvalds /** 6411da177e4SLinus Torvalds * show_scaling_governor - show the current policy for the specified CPU 6421da177e4SLinus Torvalds */ 643905d77cdSDave Jones static ssize_t show_scaling_governor(struct cpufreq_policy *policy, char *buf) 6441da177e4SLinus Torvalds { 6451da177e4SLinus Torvalds if (policy->policy == CPUFREQ_POLICY_POWERSAVE) 6461da177e4SLinus Torvalds return sprintf(buf, "powersave\n"); 6471da177e4SLinus Torvalds else if (policy->policy == CPUFREQ_POLICY_PERFORMANCE) 6481da177e4SLinus Torvalds return sprintf(buf, "performance\n"); 6491da177e4SLinus Torvalds else if (policy->governor) 6504b972f0bSviresh kumar return scnprintf(buf, CPUFREQ_NAME_PLEN, "%s\n", 65129464f28SDave Jones policy->governor->name); 6521da177e4SLinus Torvalds return -EINVAL; 6531da177e4SLinus Torvalds } 6541da177e4SLinus Torvalds 6551da177e4SLinus Torvalds /** 6561da177e4SLinus Torvalds * store_scaling_governor - store policy for the specified CPU 6571da177e4SLinus Torvalds */ 6581da177e4SLinus Torvalds static ssize_t store_scaling_governor(struct cpufreq_policy *policy, 6591da177e4SLinus Torvalds const char *buf, size_t count) 6601da177e4SLinus Torvalds { 6615136fa56SSrivatsa S. Bhat int ret; 6621da177e4SLinus Torvalds char str_governor[16]; 6631da177e4SLinus Torvalds struct cpufreq_policy new_policy; 6641da177e4SLinus Torvalds 6651da177e4SLinus Torvalds ret = cpufreq_get_policy(&new_policy, policy->cpu); 6661da177e4SLinus Torvalds if (ret) 6671da177e4SLinus Torvalds return ret; 6681da177e4SLinus Torvalds 6691da177e4SLinus Torvalds ret = sscanf(buf, "%15s", str_governor); 6701da177e4SLinus Torvalds if (ret != 1) 6711da177e4SLinus Torvalds return -EINVAL; 6721da177e4SLinus Torvalds 673e08f5f5bSGautham R Shenoy if (cpufreq_parse_governor(str_governor, &new_policy.policy, 674e08f5f5bSGautham R Shenoy &new_policy.governor)) 6751da177e4SLinus Torvalds return -EINVAL; 6761da177e4SLinus Torvalds 677037ce839SViresh Kumar ret = cpufreq_set_policy(policy, &new_policy); 6787970e08bSThomas Renninger 6797970e08bSThomas Renninger policy->user_policy.policy = policy->policy; 6807970e08bSThomas Renninger policy->user_policy.governor = policy->governor; 6817970e08bSThomas Renninger 682e08f5f5bSGautham R Shenoy if (ret) 683e08f5f5bSGautham R Shenoy return ret; 684e08f5f5bSGautham R Shenoy else 685e08f5f5bSGautham R Shenoy return count; 6861da177e4SLinus Torvalds } 6871da177e4SLinus Torvalds 6881da177e4SLinus Torvalds /** 6891da177e4SLinus Torvalds * show_scaling_driver - show the cpufreq driver currently loaded 6901da177e4SLinus Torvalds */ 6911da177e4SLinus Torvalds static ssize_t show_scaling_driver(struct cpufreq_policy *policy, char *buf) 6921da177e4SLinus Torvalds { 6931c3d85ddSRafael J. Wysocki return scnprintf(buf, CPUFREQ_NAME_PLEN, "%s\n", cpufreq_driver->name); 6941da177e4SLinus Torvalds } 6951da177e4SLinus Torvalds 6961da177e4SLinus Torvalds /** 6971da177e4SLinus Torvalds * show_scaling_available_governors - show the available CPUfreq governors 6981da177e4SLinus Torvalds */ 6991da177e4SLinus Torvalds static ssize_t show_scaling_available_governors(struct cpufreq_policy *policy, 7001da177e4SLinus Torvalds char *buf) 7011da177e4SLinus Torvalds { 7021da177e4SLinus Torvalds ssize_t i = 0; 7031da177e4SLinus Torvalds struct cpufreq_governor *t; 7041da177e4SLinus Torvalds 7059c0ebcf7SViresh Kumar if (!has_target()) { 7061da177e4SLinus Torvalds i += sprintf(buf, "performance powersave"); 7071da177e4SLinus Torvalds goto out; 7081da177e4SLinus Torvalds } 7091da177e4SLinus Torvalds 710f7b27061SViresh Kumar for_each_governor(t) { 71129464f28SDave Jones if (i >= (ssize_t) ((PAGE_SIZE / sizeof(char)) 71229464f28SDave Jones - (CPUFREQ_NAME_LEN + 2))) 7131da177e4SLinus Torvalds goto out; 7144b972f0bSviresh kumar i += scnprintf(&buf[i], CPUFREQ_NAME_PLEN, "%s ", t->name); 7151da177e4SLinus Torvalds } 7161da177e4SLinus Torvalds out: 7171da177e4SLinus Torvalds i += sprintf(&buf[i], "\n"); 7181da177e4SLinus Torvalds return i; 7191da177e4SLinus Torvalds } 720e8628dd0SDarrick J. Wong 721f4fd3797SLan Tianyu ssize_t cpufreq_show_cpus(const struct cpumask *mask, char *buf) 7221da177e4SLinus Torvalds { 7231da177e4SLinus Torvalds ssize_t i = 0; 7241da177e4SLinus Torvalds unsigned int cpu; 7251da177e4SLinus Torvalds 726835481d9SRusty Russell for_each_cpu(cpu, mask) { 7271da177e4SLinus Torvalds if (i) 7281da177e4SLinus Torvalds i += scnprintf(&buf[i], (PAGE_SIZE - i - 2), " "); 7291da177e4SLinus Torvalds i += scnprintf(&buf[i], (PAGE_SIZE - i - 2), "%u", cpu); 7301da177e4SLinus Torvalds if (i >= (PAGE_SIZE - 5)) 7311da177e4SLinus Torvalds break; 7321da177e4SLinus Torvalds } 7331da177e4SLinus Torvalds i += sprintf(&buf[i], "\n"); 7341da177e4SLinus Torvalds return i; 7351da177e4SLinus Torvalds } 736f4fd3797SLan Tianyu EXPORT_SYMBOL_GPL(cpufreq_show_cpus); 7371da177e4SLinus Torvalds 738e8628dd0SDarrick J. Wong /** 739e8628dd0SDarrick J. Wong * show_related_cpus - show the CPUs affected by each transition even if 740e8628dd0SDarrick J. Wong * hw coordination is in use 741e8628dd0SDarrick J. Wong */ 742e8628dd0SDarrick J. Wong static ssize_t show_related_cpus(struct cpufreq_policy *policy, char *buf) 743e8628dd0SDarrick J. Wong { 744f4fd3797SLan Tianyu return cpufreq_show_cpus(policy->related_cpus, buf); 745e8628dd0SDarrick J. Wong } 746e8628dd0SDarrick J. Wong 747e8628dd0SDarrick J. Wong /** 748e8628dd0SDarrick J. Wong * show_affected_cpus - show the CPUs affected by each transition 749e8628dd0SDarrick J. Wong */ 750e8628dd0SDarrick J. Wong static ssize_t show_affected_cpus(struct cpufreq_policy *policy, char *buf) 751e8628dd0SDarrick J. Wong { 752f4fd3797SLan Tianyu return cpufreq_show_cpus(policy->cpus, buf); 753e8628dd0SDarrick J. Wong } 754e8628dd0SDarrick J. Wong 7559e76988eSVenki Pallipadi static ssize_t store_scaling_setspeed(struct cpufreq_policy *policy, 7569e76988eSVenki Pallipadi const char *buf, size_t count) 7579e76988eSVenki Pallipadi { 7589e76988eSVenki Pallipadi unsigned int freq = 0; 7599e76988eSVenki Pallipadi unsigned int ret; 7609e76988eSVenki Pallipadi 761879000f9SCHIKAMA masaki if (!policy->governor || !policy->governor->store_setspeed) 7629e76988eSVenki Pallipadi return -EINVAL; 7639e76988eSVenki Pallipadi 7649e76988eSVenki Pallipadi ret = sscanf(buf, "%u", &freq); 7659e76988eSVenki Pallipadi if (ret != 1) 7669e76988eSVenki Pallipadi return -EINVAL; 7679e76988eSVenki Pallipadi 7689e76988eSVenki Pallipadi policy->governor->store_setspeed(policy, freq); 7699e76988eSVenki Pallipadi 7709e76988eSVenki Pallipadi return count; 7719e76988eSVenki Pallipadi } 7729e76988eSVenki Pallipadi 7739e76988eSVenki Pallipadi static ssize_t show_scaling_setspeed(struct cpufreq_policy *policy, char *buf) 7749e76988eSVenki Pallipadi { 775879000f9SCHIKAMA masaki if (!policy->governor || !policy->governor->show_setspeed) 7769e76988eSVenki Pallipadi return sprintf(buf, "<unsupported>\n"); 7779e76988eSVenki Pallipadi 7789e76988eSVenki Pallipadi return policy->governor->show_setspeed(policy, buf); 7799e76988eSVenki Pallipadi } 7801da177e4SLinus Torvalds 781e2f74f35SThomas Renninger /** 7828bf1ac72Sviresh kumar * show_bios_limit - show the current cpufreq HW/BIOS limitation 783e2f74f35SThomas Renninger */ 784e2f74f35SThomas Renninger static ssize_t show_bios_limit(struct cpufreq_policy *policy, char *buf) 785e2f74f35SThomas Renninger { 786e2f74f35SThomas Renninger unsigned int limit; 787e2f74f35SThomas Renninger int ret; 7881c3d85ddSRafael J. Wysocki if (cpufreq_driver->bios_limit) { 7891c3d85ddSRafael J. Wysocki ret = cpufreq_driver->bios_limit(policy->cpu, &limit); 790e2f74f35SThomas Renninger if (!ret) 791e2f74f35SThomas Renninger return sprintf(buf, "%u\n", limit); 792e2f74f35SThomas Renninger } 793e2f74f35SThomas Renninger return sprintf(buf, "%u\n", policy->cpuinfo.max_freq); 794e2f74f35SThomas Renninger } 795e2f74f35SThomas Renninger 7966dad2a29SBorislav Petkov cpufreq_freq_attr_ro_perm(cpuinfo_cur_freq, 0400); 7976dad2a29SBorislav Petkov cpufreq_freq_attr_ro(cpuinfo_min_freq); 7986dad2a29SBorislav Petkov cpufreq_freq_attr_ro(cpuinfo_max_freq); 7996dad2a29SBorislav Petkov cpufreq_freq_attr_ro(cpuinfo_transition_latency); 8006dad2a29SBorislav Petkov cpufreq_freq_attr_ro(scaling_available_governors); 8016dad2a29SBorislav Petkov cpufreq_freq_attr_ro(scaling_driver); 8026dad2a29SBorislav Petkov cpufreq_freq_attr_ro(scaling_cur_freq); 8036dad2a29SBorislav Petkov cpufreq_freq_attr_ro(bios_limit); 8046dad2a29SBorislav Petkov cpufreq_freq_attr_ro(related_cpus); 8056dad2a29SBorislav Petkov cpufreq_freq_attr_ro(affected_cpus); 8066dad2a29SBorislav Petkov cpufreq_freq_attr_rw(scaling_min_freq); 8076dad2a29SBorislav Petkov cpufreq_freq_attr_rw(scaling_max_freq); 8086dad2a29SBorislav Petkov cpufreq_freq_attr_rw(scaling_governor); 8096dad2a29SBorislav Petkov cpufreq_freq_attr_rw(scaling_setspeed); 8101da177e4SLinus Torvalds 8111da177e4SLinus Torvalds static struct attribute *default_attrs[] = { 8121da177e4SLinus Torvalds &cpuinfo_min_freq.attr, 8131da177e4SLinus Torvalds &cpuinfo_max_freq.attr, 814ed129784SThomas Renninger &cpuinfo_transition_latency.attr, 8151da177e4SLinus Torvalds &scaling_min_freq.attr, 8161da177e4SLinus Torvalds &scaling_max_freq.attr, 8171da177e4SLinus Torvalds &affected_cpus.attr, 818e8628dd0SDarrick J. Wong &related_cpus.attr, 8191da177e4SLinus Torvalds &scaling_governor.attr, 8201da177e4SLinus Torvalds &scaling_driver.attr, 8211da177e4SLinus Torvalds &scaling_available_governors.attr, 8229e76988eSVenki Pallipadi &scaling_setspeed.attr, 8231da177e4SLinus Torvalds NULL 8241da177e4SLinus Torvalds }; 8251da177e4SLinus Torvalds 8261da177e4SLinus Torvalds #define to_policy(k) container_of(k, struct cpufreq_policy, kobj) 8271da177e4SLinus Torvalds #define to_attr(a) container_of(a, struct freq_attr, attr) 8281da177e4SLinus Torvalds 8291da177e4SLinus Torvalds static ssize_t show(struct kobject *kobj, struct attribute *attr, char *buf) 8301da177e4SLinus Torvalds { 8311da177e4SLinus Torvalds struct cpufreq_policy *policy = to_policy(kobj); 8321da177e4SLinus Torvalds struct freq_attr *fattr = to_attr(attr); 8331b750e3bSViresh Kumar ssize_t ret; 8346eed9404SViresh Kumar 835ad7722daSviresh kumar down_read(&policy->rwsem); 8365a01f2e8SVenkatesh Pallipadi 837e08f5f5bSGautham R Shenoy if (fattr->show) 838e08f5f5bSGautham R Shenoy ret = fattr->show(policy, buf); 839e08f5f5bSGautham R Shenoy else 840e08f5f5bSGautham R Shenoy ret = -EIO; 841e08f5f5bSGautham R Shenoy 842ad7722daSviresh kumar up_read(&policy->rwsem); 8431b750e3bSViresh Kumar 8441da177e4SLinus Torvalds return ret; 8451da177e4SLinus Torvalds } 8461da177e4SLinus Torvalds 8471da177e4SLinus Torvalds static ssize_t store(struct kobject *kobj, struct attribute *attr, 8481da177e4SLinus Torvalds const char *buf, size_t count) 8491da177e4SLinus Torvalds { 8501da177e4SLinus Torvalds struct cpufreq_policy *policy = to_policy(kobj); 8511da177e4SLinus Torvalds struct freq_attr *fattr = to_attr(attr); 852a07530b4SDave Jones ssize_t ret = -EINVAL; 8536eed9404SViresh Kumar 8544f750c93SSrivatsa S. Bhat get_online_cpus(); 8554f750c93SSrivatsa S. Bhat 8564f750c93SSrivatsa S. Bhat if (!cpu_online(policy->cpu)) 8574f750c93SSrivatsa S. Bhat goto unlock; 8584f750c93SSrivatsa S. Bhat 859ad7722daSviresh kumar down_write(&policy->rwsem); 8605a01f2e8SVenkatesh Pallipadi 86111e584cfSViresh Kumar /* Updating inactive policies is invalid, so avoid doing that. */ 86211e584cfSViresh Kumar if (unlikely(policy_is_inactive(policy))) { 86311e584cfSViresh Kumar ret = -EBUSY; 86411e584cfSViresh Kumar goto unlock_policy_rwsem; 86511e584cfSViresh Kumar } 86611e584cfSViresh Kumar 867e08f5f5bSGautham R Shenoy if (fattr->store) 868e08f5f5bSGautham R Shenoy ret = fattr->store(policy, buf, count); 869e08f5f5bSGautham R Shenoy else 870e08f5f5bSGautham R Shenoy ret = -EIO; 871e08f5f5bSGautham R Shenoy 87211e584cfSViresh Kumar unlock_policy_rwsem: 873ad7722daSviresh kumar up_write(&policy->rwsem); 8744f750c93SSrivatsa S. Bhat unlock: 8754f750c93SSrivatsa S. Bhat put_online_cpus(); 8764f750c93SSrivatsa S. Bhat 8771da177e4SLinus Torvalds return ret; 8781da177e4SLinus Torvalds } 8791da177e4SLinus Torvalds 8801da177e4SLinus Torvalds static void cpufreq_sysfs_release(struct kobject *kobj) 8811da177e4SLinus Torvalds { 8821da177e4SLinus Torvalds struct cpufreq_policy *policy = to_policy(kobj); 8832d06d8c4SDominik Brodowski pr_debug("last reference is dropped\n"); 8841da177e4SLinus Torvalds complete(&policy->kobj_unregister); 8851da177e4SLinus Torvalds } 8861da177e4SLinus Torvalds 88752cf25d0SEmese Revfy static const struct sysfs_ops sysfs_ops = { 8881da177e4SLinus Torvalds .show = show, 8891da177e4SLinus Torvalds .store = store, 8901da177e4SLinus Torvalds }; 8911da177e4SLinus Torvalds 8921da177e4SLinus Torvalds static struct kobj_type ktype_cpufreq = { 8931da177e4SLinus Torvalds .sysfs_ops = &sysfs_ops, 8941da177e4SLinus Torvalds .default_attrs = default_attrs, 8951da177e4SLinus Torvalds .release = cpufreq_sysfs_release, 8961da177e4SLinus Torvalds }; 8971da177e4SLinus Torvalds 8982361be23SViresh Kumar struct kobject *cpufreq_global_kobject; 8992361be23SViresh Kumar EXPORT_SYMBOL(cpufreq_global_kobject); 9002361be23SViresh Kumar 9012361be23SViresh Kumar static int cpufreq_global_kobject_usage; 9022361be23SViresh Kumar 9032361be23SViresh Kumar int cpufreq_get_global_kobject(void) 9042361be23SViresh Kumar { 9052361be23SViresh Kumar if (!cpufreq_global_kobject_usage++) 9062361be23SViresh Kumar return kobject_add(cpufreq_global_kobject, 9072361be23SViresh Kumar &cpu_subsys.dev_root->kobj, "%s", "cpufreq"); 9082361be23SViresh Kumar 9092361be23SViresh Kumar return 0; 9102361be23SViresh Kumar } 9112361be23SViresh Kumar EXPORT_SYMBOL(cpufreq_get_global_kobject); 9122361be23SViresh Kumar 9132361be23SViresh Kumar void cpufreq_put_global_kobject(void) 9142361be23SViresh Kumar { 9152361be23SViresh Kumar if (!--cpufreq_global_kobject_usage) 9162361be23SViresh Kumar kobject_del(cpufreq_global_kobject); 9172361be23SViresh Kumar } 9182361be23SViresh Kumar EXPORT_SYMBOL(cpufreq_put_global_kobject); 9192361be23SViresh Kumar 9202361be23SViresh Kumar int cpufreq_sysfs_create_file(const struct attribute *attr) 9212361be23SViresh Kumar { 9222361be23SViresh Kumar int ret = cpufreq_get_global_kobject(); 9232361be23SViresh Kumar 9242361be23SViresh Kumar if (!ret) { 9252361be23SViresh Kumar ret = sysfs_create_file(cpufreq_global_kobject, attr); 9262361be23SViresh Kumar if (ret) 9272361be23SViresh Kumar cpufreq_put_global_kobject(); 9282361be23SViresh Kumar } 9292361be23SViresh Kumar 9302361be23SViresh Kumar return ret; 9312361be23SViresh Kumar } 9322361be23SViresh Kumar EXPORT_SYMBOL(cpufreq_sysfs_create_file); 9332361be23SViresh Kumar 9342361be23SViresh Kumar void cpufreq_sysfs_remove_file(const struct attribute *attr) 9352361be23SViresh Kumar { 9362361be23SViresh Kumar sysfs_remove_file(cpufreq_global_kobject, attr); 9372361be23SViresh Kumar cpufreq_put_global_kobject(); 9382361be23SViresh Kumar } 9392361be23SViresh Kumar EXPORT_SYMBOL(cpufreq_sysfs_remove_file); 9402361be23SViresh Kumar 94187549141SViresh Kumar static int add_cpu_dev_symlink(struct cpufreq_policy *policy, int cpu) 94287549141SViresh Kumar { 94387549141SViresh Kumar struct device *cpu_dev; 94487549141SViresh Kumar 94587549141SViresh Kumar pr_debug("%s: Adding symlink for CPU: %u\n", __func__, cpu); 94687549141SViresh Kumar 94787549141SViresh Kumar if (!policy) 94887549141SViresh Kumar return 0; 94987549141SViresh Kumar 95087549141SViresh Kumar cpu_dev = get_cpu_device(cpu); 95187549141SViresh Kumar if (WARN_ON(!cpu_dev)) 95287549141SViresh Kumar return 0; 95387549141SViresh Kumar 95487549141SViresh Kumar return sysfs_create_link(&cpu_dev->kobj, &policy->kobj, "cpufreq"); 95587549141SViresh Kumar } 95687549141SViresh Kumar 95787549141SViresh Kumar static void remove_cpu_dev_symlink(struct cpufreq_policy *policy, int cpu) 95887549141SViresh Kumar { 95987549141SViresh Kumar struct device *cpu_dev; 96087549141SViresh Kumar 96187549141SViresh Kumar pr_debug("%s: Removing symlink for CPU: %u\n", __func__, cpu); 96287549141SViresh Kumar 96387549141SViresh Kumar cpu_dev = get_cpu_device(cpu); 96487549141SViresh Kumar if (WARN_ON(!cpu_dev)) 96587549141SViresh Kumar return; 96687549141SViresh Kumar 96787549141SViresh Kumar sysfs_remove_link(&cpu_dev->kobj, "cpufreq"); 96887549141SViresh Kumar } 96987549141SViresh Kumar 97087549141SViresh Kumar /* Add/remove symlinks for all related CPUs */ 971308b60e7SViresh Kumar static int cpufreq_add_dev_symlink(struct cpufreq_policy *policy) 97219d6f7ecSDave Jones { 97319d6f7ecSDave Jones unsigned int j; 97419d6f7ecSDave Jones int ret = 0; 97519d6f7ecSDave Jones 97687549141SViresh Kumar /* Some related CPUs might not be present (physically hotplugged) */ 977559ed407SRafael J. Wysocki for_each_cpu(j, policy->real_cpus) { 9789d16f207SSaravana Kannan if (j == policy->kobj_cpu) 97919d6f7ecSDave Jones continue; 98019d6f7ecSDave Jones 98187549141SViresh Kumar ret = add_cpu_dev_symlink(policy, j); 98271c3461eSRafael J. Wysocki if (ret) 98371c3461eSRafael J. Wysocki break; 98419d6f7ecSDave Jones } 98587549141SViresh Kumar 98619d6f7ecSDave Jones return ret; 98719d6f7ecSDave Jones } 98819d6f7ecSDave Jones 98987549141SViresh Kumar static void cpufreq_remove_dev_symlink(struct cpufreq_policy *policy) 99087549141SViresh Kumar { 99187549141SViresh Kumar unsigned int j; 99287549141SViresh Kumar 99387549141SViresh Kumar /* Some related CPUs might not be present (physically hotplugged) */ 994559ed407SRafael J. Wysocki for_each_cpu(j, policy->real_cpus) { 99587549141SViresh Kumar if (j == policy->kobj_cpu) 99687549141SViresh Kumar continue; 99787549141SViresh Kumar 99887549141SViresh Kumar remove_cpu_dev_symlink(policy, j); 99987549141SViresh Kumar } 100087549141SViresh Kumar } 100187549141SViresh Kumar 1002308b60e7SViresh Kumar static int cpufreq_add_dev_interface(struct cpufreq_policy *policy, 10038a25a2fdSKay Sievers struct device *dev) 1004909a694eSDave Jones { 1005909a694eSDave Jones struct freq_attr **drv_attr; 1006909a694eSDave Jones int ret = 0; 1007909a694eSDave Jones 1008909a694eSDave Jones /* set up files for this cpu device */ 10091c3d85ddSRafael J. Wysocki drv_attr = cpufreq_driver->attr; 1010f13f1184SViresh Kumar while (drv_attr && *drv_attr) { 1011909a694eSDave Jones ret = sysfs_create_file(&policy->kobj, &((*drv_attr)->attr)); 1012909a694eSDave Jones if (ret) 10136d4e81edSTomeu Vizoso return ret; 1014909a694eSDave Jones drv_attr++; 1015909a694eSDave Jones } 10161c3d85ddSRafael J. Wysocki if (cpufreq_driver->get) { 1017909a694eSDave Jones ret = sysfs_create_file(&policy->kobj, &cpuinfo_cur_freq.attr); 1018909a694eSDave Jones if (ret) 10196d4e81edSTomeu Vizoso return ret; 1020909a694eSDave Jones } 1021c034b02eSDirk Brandewie 1022909a694eSDave Jones ret = sysfs_create_file(&policy->kobj, &scaling_cur_freq.attr); 1023909a694eSDave Jones if (ret) 10246d4e81edSTomeu Vizoso return ret; 1025c034b02eSDirk Brandewie 10261c3d85ddSRafael J. Wysocki if (cpufreq_driver->bios_limit) { 1027e2f74f35SThomas Renninger ret = sysfs_create_file(&policy->kobj, &bios_limit.attr); 1028e2f74f35SThomas Renninger if (ret) 10296d4e81edSTomeu Vizoso return ret; 1030e2f74f35SThomas Renninger } 1031909a694eSDave Jones 10326d4e81edSTomeu Vizoso return cpufreq_add_dev_symlink(policy); 1033e18f1682SSrivatsa S. Bhat } 1034e18f1682SSrivatsa S. Bhat 10357f0fa40fSViresh Kumar static int cpufreq_init_policy(struct cpufreq_policy *policy) 1036e18f1682SSrivatsa S. Bhat { 10376e2c89d1Sviresh kumar struct cpufreq_governor *gov = NULL; 1038e18f1682SSrivatsa S. Bhat struct cpufreq_policy new_policy; 1039e18f1682SSrivatsa S. Bhat 1040d5b73cd8SViresh Kumar memcpy(&new_policy, policy, sizeof(*policy)); 1041a27a9ab7SJason Baron 10426e2c89d1Sviresh kumar /* Update governor of new_policy to the governor used before hotplug */ 10434573237bSViresh Kumar gov = find_governor(policy->last_governor); 10446e2c89d1Sviresh kumar if (gov) 10456e2c89d1Sviresh kumar pr_debug("Restoring governor %s for cpu %d\n", 10466e2c89d1Sviresh kumar policy->governor->name, policy->cpu); 10476e2c89d1Sviresh kumar else 10486e2c89d1Sviresh kumar gov = CPUFREQ_DEFAULT_GOVERNOR; 10496e2c89d1Sviresh kumar 10506e2c89d1Sviresh kumar new_policy.governor = gov; 10516e2c89d1Sviresh kumar 1052a27a9ab7SJason Baron /* Use the default policy if its valid. */ 1053a27a9ab7SJason Baron if (cpufreq_driver->setpolicy) 10546e2c89d1Sviresh kumar cpufreq_parse_governor(gov->name, &new_policy.policy, NULL); 1055ecf7e461SDave Jones 1056ecf7e461SDave Jones /* set default policy */ 10577f0fa40fSViresh Kumar return cpufreq_set_policy(policy, &new_policy); 1058909a694eSDave Jones } 1059909a694eSDave Jones 1060d8d3b471SViresh Kumar static int cpufreq_add_policy_cpu(struct cpufreq_policy *policy, 106142f921a6SViresh Kumar unsigned int cpu, struct device *dev) 1062fcf80582SViresh Kumar { 10639c0ebcf7SViresh Kumar int ret = 0; 1064fcf80582SViresh Kumar 1065bb29ae15SViresh Kumar /* Has this CPU been taken care of already? */ 1066bb29ae15SViresh Kumar if (cpumask_test_cpu(cpu, policy->cpus)) 1067bb29ae15SViresh Kumar return 0; 1068bb29ae15SViresh Kumar 10699c0ebcf7SViresh Kumar if (has_target()) { 10703de9bdebSViresh Kumar ret = __cpufreq_governor(policy, CPUFREQ_GOV_STOP); 10713de9bdebSViresh Kumar if (ret) { 10723de9bdebSViresh Kumar pr_err("%s: Failed to stop governor\n", __func__); 10733de9bdebSViresh Kumar return ret; 10743de9bdebSViresh Kumar } 10753de9bdebSViresh Kumar } 1076fcf80582SViresh Kumar 1077ad7722daSviresh kumar down_write(&policy->rwsem); 1078fcf80582SViresh Kumar cpumask_set_cpu(cpu, policy->cpus); 1079ad7722daSviresh kumar up_write(&policy->rwsem); 10802eaa3e2dSViresh Kumar 10819c0ebcf7SViresh Kumar if (has_target()) { 1082e5c87b76SStratos Karafotis ret = __cpufreq_governor(policy, CPUFREQ_GOV_START); 1083e5c87b76SStratos Karafotis if (!ret) 1084e5c87b76SStratos Karafotis ret = __cpufreq_governor(policy, CPUFREQ_GOV_LIMITS); 1085e5c87b76SStratos Karafotis 1086e5c87b76SStratos Karafotis if (ret) { 10873de9bdebSViresh Kumar pr_err("%s: Failed to start governor\n", __func__); 10883de9bdebSViresh Kumar return ret; 10893de9bdebSViresh Kumar } 1090820c6ca2SViresh Kumar } 1091fcf80582SViresh Kumar 109287549141SViresh Kumar return 0; 1093fcf80582SViresh Kumar } 10941da177e4SLinus Torvalds 10958414809cSSrivatsa S. Bhat static struct cpufreq_policy *cpufreq_policy_restore(unsigned int cpu) 10968414809cSSrivatsa S. Bhat { 10978414809cSSrivatsa S. Bhat struct cpufreq_policy *policy; 10988414809cSSrivatsa S. Bhat unsigned long flags; 10998414809cSSrivatsa S. Bhat 110044871c9cSLan Tianyu read_lock_irqsave(&cpufreq_driver_lock, flags); 11013914d379SViresh Kumar policy = per_cpu(cpufreq_cpu_data, cpu); 110244871c9cSLan Tianyu read_unlock_irqrestore(&cpufreq_driver_lock, flags); 11038414809cSSrivatsa S. Bhat 11043914d379SViresh Kumar if (likely(policy)) { 11053914d379SViresh Kumar /* Policy should be inactive here */ 11063914d379SViresh Kumar WARN_ON(!policy_is_inactive(policy)); 110737829029SViresh Kumar 110837829029SViresh Kumar down_write(&policy->rwsem); 110937829029SViresh Kumar policy->cpu = cpu; 111035afd02eSViresh Kumar policy->governor = NULL; 111137829029SViresh Kumar up_write(&policy->rwsem); 11123914d379SViresh Kumar } 11136e2c89d1Sviresh kumar 11148414809cSSrivatsa S. Bhat return policy; 11158414809cSSrivatsa S. Bhat } 11168414809cSSrivatsa S. Bhat 11172fc3384dSViresh Kumar static struct cpufreq_policy *cpufreq_policy_alloc(struct device *dev) 1118e9698cc5SSrivatsa S. Bhat { 1119e9698cc5SSrivatsa S. Bhat struct cpufreq_policy *policy; 11202fc3384dSViresh Kumar int ret; 1121e9698cc5SSrivatsa S. Bhat 1122e9698cc5SSrivatsa S. Bhat policy = kzalloc(sizeof(*policy), GFP_KERNEL); 1123e9698cc5SSrivatsa S. Bhat if (!policy) 1124e9698cc5SSrivatsa S. Bhat return NULL; 1125e9698cc5SSrivatsa S. Bhat 1126e9698cc5SSrivatsa S. Bhat if (!alloc_cpumask_var(&policy->cpus, GFP_KERNEL)) 1127e9698cc5SSrivatsa S. Bhat goto err_free_policy; 1128e9698cc5SSrivatsa S. Bhat 1129e9698cc5SSrivatsa S. Bhat if (!zalloc_cpumask_var(&policy->related_cpus, GFP_KERNEL)) 1130e9698cc5SSrivatsa S. Bhat goto err_free_cpumask; 1131e9698cc5SSrivatsa S. Bhat 1132559ed407SRafael J. Wysocki if (!zalloc_cpumask_var(&policy->real_cpus, GFP_KERNEL)) 1133559ed407SRafael J. Wysocki goto err_free_rcpumask; 1134559ed407SRafael J. Wysocki 11352fc3384dSViresh Kumar ret = kobject_init_and_add(&policy->kobj, &ktype_cpufreq, &dev->kobj, 11362fc3384dSViresh Kumar "cpufreq"); 11372fc3384dSViresh Kumar if (ret) { 11382fc3384dSViresh Kumar pr_err("%s: failed to init policy->kobj: %d\n", __func__, ret); 1139559ed407SRafael J. Wysocki goto err_free_real_cpus; 11402fc3384dSViresh Kumar } 11412fc3384dSViresh Kumar 1142c88a1f8bSLukasz Majewski INIT_LIST_HEAD(&policy->policy_list); 1143ad7722daSviresh kumar init_rwsem(&policy->rwsem); 114412478cf0SSrivatsa S. Bhat spin_lock_init(&policy->transition_lock); 114512478cf0SSrivatsa S. Bhat init_waitqueue_head(&policy->transition_wait); 1146818c5712SViresh Kumar init_completion(&policy->kobj_unregister); 1147818c5712SViresh Kumar INIT_WORK(&policy->update, handle_update); 1148ad7722daSviresh kumar 11492fc3384dSViresh Kumar policy->cpu = dev->id; 115087549141SViresh Kumar 115187549141SViresh Kumar /* Set this once on allocation */ 11522fc3384dSViresh Kumar policy->kobj_cpu = dev->id; 115387549141SViresh Kumar 1154e9698cc5SSrivatsa S. Bhat return policy; 1155e9698cc5SSrivatsa S. Bhat 1156559ed407SRafael J. Wysocki err_free_real_cpus: 1157559ed407SRafael J. Wysocki free_cpumask_var(policy->real_cpus); 11582fc3384dSViresh Kumar err_free_rcpumask: 11592fc3384dSViresh Kumar free_cpumask_var(policy->related_cpus); 1160e9698cc5SSrivatsa S. Bhat err_free_cpumask: 1161e9698cc5SSrivatsa S. Bhat free_cpumask_var(policy->cpus); 1162e9698cc5SSrivatsa S. Bhat err_free_policy: 1163e9698cc5SSrivatsa S. Bhat kfree(policy); 1164e9698cc5SSrivatsa S. Bhat 1165e9698cc5SSrivatsa S. Bhat return NULL; 1166e9698cc5SSrivatsa S. Bhat } 1167e9698cc5SSrivatsa S. Bhat 11682fc3384dSViresh Kumar static void cpufreq_policy_put_kobj(struct cpufreq_policy *policy, bool notify) 116942f921a6SViresh Kumar { 117042f921a6SViresh Kumar struct kobject *kobj; 117142f921a6SViresh Kumar struct completion *cmp; 117242f921a6SViresh Kumar 11732fc3384dSViresh Kumar if (notify) 1174fcd7af91SViresh Kumar blocking_notifier_call_chain(&cpufreq_policy_notifier_list, 1175fcd7af91SViresh Kumar CPUFREQ_REMOVE_POLICY, policy); 1176fcd7af91SViresh Kumar 117787549141SViresh Kumar down_write(&policy->rwsem); 117887549141SViresh Kumar cpufreq_remove_dev_symlink(policy); 117942f921a6SViresh Kumar kobj = &policy->kobj; 118042f921a6SViresh Kumar cmp = &policy->kobj_unregister; 118187549141SViresh Kumar up_write(&policy->rwsem); 118242f921a6SViresh Kumar kobject_put(kobj); 118342f921a6SViresh Kumar 118442f921a6SViresh Kumar /* 118542f921a6SViresh Kumar * We need to make sure that the underlying kobj is 118642f921a6SViresh Kumar * actually not referenced anymore by anybody before we 118742f921a6SViresh Kumar * proceed with unloading. 118842f921a6SViresh Kumar */ 118942f921a6SViresh Kumar pr_debug("waiting for dropping of refcount\n"); 119042f921a6SViresh Kumar wait_for_completion(cmp); 119142f921a6SViresh Kumar pr_debug("wait complete\n"); 119242f921a6SViresh Kumar } 119342f921a6SViresh Kumar 11943654c5ccSViresh Kumar static void cpufreq_policy_free(struct cpufreq_policy *policy, bool notify) 1195e9698cc5SSrivatsa S. Bhat { 1196988bed09SViresh Kumar unsigned long flags; 1197988bed09SViresh Kumar int cpu; 1198988bed09SViresh Kumar 1199988bed09SViresh Kumar /* Remove policy from list */ 1200988bed09SViresh Kumar write_lock_irqsave(&cpufreq_driver_lock, flags); 1201988bed09SViresh Kumar list_del(&policy->policy_list); 1202988bed09SViresh Kumar 1203988bed09SViresh Kumar for_each_cpu(cpu, policy->related_cpus) 1204988bed09SViresh Kumar per_cpu(cpufreq_cpu_data, cpu) = NULL; 1205988bed09SViresh Kumar write_unlock_irqrestore(&cpufreq_driver_lock, flags); 1206988bed09SViresh Kumar 12073654c5ccSViresh Kumar cpufreq_policy_put_kobj(policy, notify); 1208559ed407SRafael J. Wysocki free_cpumask_var(policy->real_cpus); 1209e9698cc5SSrivatsa S. Bhat free_cpumask_var(policy->related_cpus); 1210e9698cc5SSrivatsa S. Bhat free_cpumask_var(policy->cpus); 1211e9698cc5SSrivatsa S. Bhat kfree(policy); 1212e9698cc5SSrivatsa S. Bhat } 1213e9698cc5SSrivatsa S. Bhat 121423faf0b7SViresh Kumar /** 121523faf0b7SViresh Kumar * cpufreq_add_dev - add a CPU device 121623faf0b7SViresh Kumar * 121723faf0b7SViresh Kumar * Adds the cpufreq interface for a CPU device. 121823faf0b7SViresh Kumar * 121923faf0b7SViresh Kumar * The Oracle says: try running cpufreq registration/unregistration concurrently 122023faf0b7SViresh Kumar * with with cpu hotplugging and all hell will break loose. Tried to clean this 122123faf0b7SViresh Kumar * mess up, but more thorough testing is needed. - Mathieu 122223faf0b7SViresh Kumar */ 122323faf0b7SViresh Kumar static int cpufreq_add_dev(struct device *dev, struct subsys_interface *sif) 12241da177e4SLinus Torvalds { 1225fcf80582SViresh Kumar unsigned int j, cpu = dev->id; 122665922465SViresh Kumar int ret = -ENOMEM; 12277f0c020aSViresh Kumar struct cpufreq_policy *policy; 12281da177e4SLinus Torvalds unsigned long flags; 122987549141SViresh Kumar bool recover_policy = !sif; 1230c32b6b8eSAshok Raj 12312d06d8c4SDominik Brodowski pr_debug("adding CPU %u\n", cpu); 12321da177e4SLinus Torvalds 1233559ed407SRafael J. Wysocki if (cpu_is_offline(cpu)) { 123487549141SViresh Kumar /* 1235559ed407SRafael J. Wysocki * Only possible if we are here from the subsys_interface add 1236559ed407SRafael J. Wysocki * callback. A hotplug notifier will follow and we will handle 1237559ed407SRafael J. Wysocki * it as CPU online then. For now, just create the sysfs link, 1238559ed407SRafael J. Wysocki * unless there is no policy or the link is already present. 123987549141SViresh Kumar */ 1240559ed407SRafael J. Wysocki policy = per_cpu(cpufreq_cpu_data, cpu); 1241559ed407SRafael J. Wysocki return policy && !cpumask_test_and_set_cpu(cpu, policy->real_cpus) 1242559ed407SRafael J. Wysocki ? add_cpu_dev_symlink(policy, cpu) : 0; 1243559ed407SRafael J. Wysocki } 124487549141SViresh Kumar 1245bb29ae15SViresh Kumar /* Check if this CPU already has a policy to manage it */ 12469104bb26SViresh Kumar policy = per_cpu(cpufreq_cpu_data, cpu); 12479104bb26SViresh Kumar if (policy && !policy_is_inactive(policy)) { 12489104bb26SViresh Kumar WARN_ON(!cpumask_test_cpu(cpu, policy->related_cpus)); 12497f0c020aSViresh Kumar ret = cpufreq_add_policy_cpu(policy, cpu, dev); 12506eed9404SViresh Kumar return ret; 1251fcf80582SViresh Kumar } 12521da177e4SLinus Torvalds 125372368d12SRafael J. Wysocki /* 125472368d12SRafael J. Wysocki * Restore the saved policy when doing light-weight init and fall back 125572368d12SRafael J. Wysocki * to the full init if that fails. 125672368d12SRafael J. Wysocki */ 125796bbbe4aSViresh Kumar policy = recover_policy ? cpufreq_policy_restore(cpu) : NULL; 125872368d12SRafael J. Wysocki if (!policy) { 125996bbbe4aSViresh Kumar recover_policy = false; 12602fc3384dSViresh Kumar policy = cpufreq_policy_alloc(dev); 1261059019a3SDave Jones if (!policy) 12628101f997SViresh Kumar goto out_release_rwsem; 126372368d12SRafael J. Wysocki } 12640d66b91eSSrivatsa S. Bhat 1265835481d9SRusty Russell cpumask_copy(policy->cpus, cpumask_of(cpu)); 12661da177e4SLinus Torvalds 12671da177e4SLinus Torvalds /* call driver. From then on the cpufreq must be able 12681da177e4SLinus Torvalds * to accept all calls to ->verify and ->setpolicy for this CPU 12691da177e4SLinus Torvalds */ 12701c3d85ddSRafael J. Wysocki ret = cpufreq_driver->init(policy); 12711da177e4SLinus Torvalds if (ret) { 12722d06d8c4SDominik Brodowski pr_debug("initialization failed\n"); 12738101f997SViresh Kumar goto out_free_policy; 12741da177e4SLinus Torvalds } 1275643ae6e8SViresh Kumar 12766d4e81edSTomeu Vizoso down_write(&policy->rwsem); 12776d4e81edSTomeu Vizoso 12785a7e56a5SViresh Kumar /* related cpus should atleast have policy->cpus */ 12795a7e56a5SViresh Kumar cpumask_or(policy->related_cpus, policy->related_cpus, policy->cpus); 12805a7e56a5SViresh Kumar 1281559ed407SRafael J. Wysocki /* Remember which CPUs have been present at the policy creation time. */ 1282559ed407SRafael J. Wysocki if (!recover_policy) 1283559ed407SRafael J. Wysocki cpumask_and(policy->real_cpus, policy->cpus, cpu_present_mask); 1284559ed407SRafael J. Wysocki 12855a7e56a5SViresh Kumar /* 12865a7e56a5SViresh Kumar * affected cpus must always be the one, which are online. We aren't 12875a7e56a5SViresh Kumar * managing offline cpus here. 12885a7e56a5SViresh Kumar */ 12895a7e56a5SViresh Kumar cpumask_and(policy->cpus, policy->cpus, cpu_online_mask); 12905a7e56a5SViresh Kumar 129196bbbe4aSViresh Kumar if (!recover_policy) { 12925a7e56a5SViresh Kumar policy->user_policy.min = policy->min; 12935a7e56a5SViresh Kumar policy->user_policy.max = policy->max; 12946d4e81edSTomeu Vizoso 1295652ed95dSViresh Kumar write_lock_irqsave(&cpufreq_driver_lock, flags); 1296988bed09SViresh Kumar for_each_cpu(j, policy->related_cpus) 1297652ed95dSViresh Kumar per_cpu(cpufreq_cpu_data, j) = policy; 1298652ed95dSViresh Kumar write_unlock_irqrestore(&cpufreq_driver_lock, flags); 1299988bed09SViresh Kumar } 1300652ed95dSViresh Kumar 13012ed99e39SRafael J. Wysocki if (cpufreq_driver->get && !cpufreq_driver->setpolicy) { 1302da60ce9fSViresh Kumar policy->cur = cpufreq_driver->get(policy->cpu); 1303da60ce9fSViresh Kumar if (!policy->cur) { 1304da60ce9fSViresh Kumar pr_err("%s: ->get() failed\n", __func__); 13058101f997SViresh Kumar goto out_exit_policy; 1306da60ce9fSViresh Kumar } 1307da60ce9fSViresh Kumar } 1308da60ce9fSViresh Kumar 1309d3916691SViresh Kumar /* 1310d3916691SViresh Kumar * Sometimes boot loaders set CPU frequency to a value outside of 1311d3916691SViresh Kumar * frequency table present with cpufreq core. In such cases CPU might be 1312d3916691SViresh Kumar * unstable if it has to run on that frequency for long duration of time 1313d3916691SViresh Kumar * and so its better to set it to a frequency which is specified in 1314d3916691SViresh Kumar * freq-table. This also makes cpufreq stats inconsistent as 1315d3916691SViresh Kumar * cpufreq-stats would fail to register because current frequency of CPU 1316d3916691SViresh Kumar * isn't found in freq-table. 1317d3916691SViresh Kumar * 1318d3916691SViresh Kumar * Because we don't want this change to effect boot process badly, we go 1319d3916691SViresh Kumar * for the next freq which is >= policy->cur ('cur' must be set by now, 1320d3916691SViresh Kumar * otherwise we will end up setting freq to lowest of the table as 'cur' 1321d3916691SViresh Kumar * is initialized to zero). 1322d3916691SViresh Kumar * 1323d3916691SViresh Kumar * We are passing target-freq as "policy->cur - 1" otherwise 1324d3916691SViresh Kumar * __cpufreq_driver_target() would simply fail, as policy->cur will be 1325d3916691SViresh Kumar * equal to target-freq. 1326d3916691SViresh Kumar */ 1327d3916691SViresh Kumar if ((cpufreq_driver->flags & CPUFREQ_NEED_INITIAL_FREQ_CHECK) 1328d3916691SViresh Kumar && has_target()) { 1329d3916691SViresh Kumar /* Are we running at unknown frequency ? */ 1330d3916691SViresh Kumar ret = cpufreq_frequency_table_get_index(policy, policy->cur); 1331d3916691SViresh Kumar if (ret == -EINVAL) { 1332d3916691SViresh Kumar /* Warn user and fix it */ 1333d3916691SViresh Kumar pr_warn("%s: CPU%d: Running at unlisted freq: %u KHz\n", 1334d3916691SViresh Kumar __func__, policy->cpu, policy->cur); 1335d3916691SViresh Kumar ret = __cpufreq_driver_target(policy, policy->cur - 1, 1336d3916691SViresh Kumar CPUFREQ_RELATION_L); 1337d3916691SViresh Kumar 1338d3916691SViresh Kumar /* 1339d3916691SViresh Kumar * Reaching here after boot in a few seconds may not 1340d3916691SViresh Kumar * mean that system will remain stable at "unknown" 1341d3916691SViresh Kumar * frequency for longer duration. Hence, a BUG_ON(). 1342d3916691SViresh Kumar */ 1343d3916691SViresh Kumar BUG_ON(ret); 1344d3916691SViresh Kumar pr_warn("%s: CPU%d: Unlisted initial frequency changed to: %u KHz\n", 1345d3916691SViresh Kumar __func__, policy->cpu, policy->cur); 1346d3916691SViresh Kumar } 1347d3916691SViresh Kumar } 1348d3916691SViresh Kumar 1349a1531acdSThomas Renninger blocking_notifier_call_chain(&cpufreq_policy_notifier_list, 1350a1531acdSThomas Renninger CPUFREQ_START, policy); 1351a1531acdSThomas Renninger 135296bbbe4aSViresh Kumar if (!recover_policy) { 1353308b60e7SViresh Kumar ret = cpufreq_add_dev_interface(policy, dev); 135419d6f7ecSDave Jones if (ret) 13558101f997SViresh Kumar goto out_exit_policy; 1356fcd7af91SViresh Kumar blocking_notifier_call_chain(&cpufreq_policy_notifier_list, 1357fcd7af91SViresh Kumar CPUFREQ_CREATE_POLICY, policy); 1358c88a1f8bSLukasz Majewski 1359c88a1f8bSLukasz Majewski write_lock_irqsave(&cpufreq_driver_lock, flags); 1360c88a1f8bSLukasz Majewski list_add(&policy->policy_list, &cpufreq_policy_list); 1361c88a1f8bSLukasz Majewski write_unlock_irqrestore(&cpufreq_driver_lock, flags); 1362988bed09SViresh Kumar } 13638ff69732SDave Jones 13647f0fa40fSViresh Kumar ret = cpufreq_init_policy(policy); 13657f0fa40fSViresh Kumar if (ret) { 13667f0fa40fSViresh Kumar pr_err("%s: Failed to initialize policy for cpu: %d (%d)\n", 13677f0fa40fSViresh Kumar __func__, cpu, ret); 13687f0fa40fSViresh Kumar goto out_remove_policy_notify; 13697f0fa40fSViresh Kumar } 1370e18f1682SSrivatsa S. Bhat 137196bbbe4aSViresh Kumar if (!recover_policy) { 137208fd8c1cSViresh Kumar policy->user_policy.policy = policy->policy; 137308fd8c1cSViresh Kumar policy->user_policy.governor = policy->governor; 137408fd8c1cSViresh Kumar } 13754e97b631SViresh Kumar up_write(&policy->rwsem); 137608fd8c1cSViresh Kumar 1377038c5b3eSGreg Kroah-Hartman kobject_uevent(&policy->kobj, KOBJ_ADD); 13787c45cf31SViresh Kumar 13797c45cf31SViresh Kumar /* Callback for handling stuff after policy is ready */ 13807c45cf31SViresh Kumar if (cpufreq_driver->ready) 13817c45cf31SViresh Kumar cpufreq_driver->ready(policy); 13827c45cf31SViresh Kumar 13832d06d8c4SDominik Brodowski pr_debug("initialization complete\n"); 13841da177e4SLinus Torvalds 13851da177e4SLinus Torvalds return 0; 13861da177e4SLinus Torvalds 13877f0fa40fSViresh Kumar out_remove_policy_notify: 13887f0fa40fSViresh Kumar /* cpufreq_policy_free() will notify based on this */ 13897f0fa40fSViresh Kumar recover_policy = true; 13908101f997SViresh Kumar out_exit_policy: 13917106e02bSPrarit Bhargava up_write(&policy->rwsem); 13927106e02bSPrarit Bhargava 1393da60ce9fSViresh Kumar if (cpufreq_driver->exit) 1394da60ce9fSViresh Kumar cpufreq_driver->exit(policy); 13958101f997SViresh Kumar out_free_policy: 13963654c5ccSViresh Kumar cpufreq_policy_free(policy, recover_policy); 13978101f997SViresh Kumar out_release_rwsem: 13981da177e4SLinus Torvalds return ret; 13991da177e4SLinus Torvalds } 14001da177e4SLinus Torvalds 1401*15c0b4d2SRafael J. Wysocki static void cpufreq_offline_prepare(unsigned int cpu) 14021da177e4SLinus Torvalds { 14033a3e9e06SViresh Kumar struct cpufreq_policy *policy; 14041da177e4SLinus Torvalds 1405b8eed8afSViresh Kumar pr_debug("%s: unregistering CPU %u\n", __func__, cpu); 14061da177e4SLinus Torvalds 1407988bed09SViresh Kumar policy = cpufreq_cpu_get_raw(cpu); 14083a3e9e06SViresh Kumar if (!policy) { 1409b8eed8afSViresh Kumar pr_debug("%s: No cpu_data found\n", __func__); 1410*15c0b4d2SRafael J. Wysocki return; 14111da177e4SLinus Torvalds } 14121da177e4SLinus Torvalds 14139c0ebcf7SViresh Kumar if (has_target()) { 1414*15c0b4d2SRafael J. Wysocki int ret = __cpufreq_governor(policy, CPUFREQ_GOV_STOP); 1415559ed407SRafael J. Wysocki if (ret) 14163de9bdebSViresh Kumar pr_err("%s: Failed to stop governor\n", __func__); 1417db5f2995SViresh Kumar } 14181da177e4SLinus Torvalds 14194573237bSViresh Kumar down_write(&policy->rwsem); 14209591becbSViresh Kumar cpumask_clear_cpu(cpu, policy->cpus); 14214573237bSViresh Kumar 14229591becbSViresh Kumar if (policy_is_inactive(policy)) { 14239591becbSViresh Kumar if (has_target()) 14244573237bSViresh Kumar strncpy(policy->last_governor, policy->governor->name, 14254573237bSViresh Kumar CPUFREQ_NAME_LEN); 14269591becbSViresh Kumar } else if (cpu == policy->cpu) { 14279591becbSViresh Kumar /* Nominate new CPU */ 14289591becbSViresh Kumar policy->cpu = cpumask_any(policy->cpus); 14299591becbSViresh Kumar } 14304573237bSViresh Kumar up_write(&policy->rwsem); 14311da177e4SLinus Torvalds 14329591becbSViresh Kumar /* Start governor again for active policy */ 14339591becbSViresh Kumar if (!policy_is_inactive(policy)) { 14349591becbSViresh Kumar if (has_target()) { 1435*15c0b4d2SRafael J. Wysocki int ret = __cpufreq_governor(policy, CPUFREQ_GOV_START); 14369591becbSViresh Kumar if (!ret) 14379591becbSViresh Kumar ret = __cpufreq_governor(policy, CPUFREQ_GOV_LIMITS); 143887549141SViresh Kumar 14399591becbSViresh Kumar if (ret) 14409591becbSViresh Kumar pr_err("%s: Failed to start governor\n", __func__); 14419591becbSViresh Kumar } 14429591becbSViresh Kumar } else if (cpufreq_driver->stop_cpu) { 1443367dc4aaSDirk Brandewie cpufreq_driver->stop_cpu(policy); 14449591becbSViresh Kumar } 1445cedb70afSSrivatsa S. Bhat } 1446cedb70afSSrivatsa S. Bhat 1447*15c0b4d2SRafael J. Wysocki static void cpufreq_offline_finish(unsigned int cpu) 1448cedb70afSSrivatsa S. Bhat { 14499591becbSViresh Kumar struct cpufreq_policy *policy = per_cpu(cpufreq_cpu_data, cpu); 1450cedb70afSSrivatsa S. Bhat 1451cedb70afSSrivatsa S. Bhat if (!policy) { 1452cedb70afSSrivatsa S. Bhat pr_debug("%s: No cpu_data found\n", __func__); 1453*15c0b4d2SRafael J. Wysocki return; 1454cedb70afSSrivatsa S. Bhat } 1455cedb70afSSrivatsa S. Bhat 14569591becbSViresh Kumar /* Only proceed for inactive policies */ 14579591becbSViresh Kumar if (!policy_is_inactive(policy)) 1458*15c0b4d2SRafael J. Wysocki return; 145987549141SViresh Kumar 146087549141SViresh Kumar /* If cpu is last user of policy, free policy */ 146187549141SViresh Kumar if (has_target()) { 1462*15c0b4d2SRafael J. Wysocki int ret = __cpufreq_governor(policy, CPUFREQ_GOV_POLICY_EXIT); 1463559ed407SRafael J. Wysocki if (ret) 146487549141SViresh Kumar pr_err("%s: Failed to exit governor\n", __func__); 14653de9bdebSViresh Kumar } 14662a998599SRafael J. Wysocki 14678414809cSSrivatsa S. Bhat /* 14688414809cSSrivatsa S. Bhat * Perform the ->exit() even during light-weight tear-down, 14698414809cSSrivatsa S. Bhat * since this is a core component, and is essential for the 14708414809cSSrivatsa S. Bhat * subsequent light-weight ->init() to succeed. 14718414809cSSrivatsa S. Bhat */ 14721c3d85ddSRafael J. Wysocki if (cpufreq_driver->exit) 14733a3e9e06SViresh Kumar cpufreq_driver->exit(policy); 14741da177e4SLinus Torvalds } 14751da177e4SLinus Torvalds 1476cedb70afSSrivatsa S. Bhat /** 147727a862e9SViresh Kumar * cpufreq_remove_dev - remove a CPU device 1478cedb70afSSrivatsa S. Bhat * 1479cedb70afSSrivatsa S. Bhat * Removes the cpufreq interface for a CPU device. 1480cedb70afSSrivatsa S. Bhat */ 14818a25a2fdSKay Sievers static int cpufreq_remove_dev(struct device *dev, struct subsys_interface *sif) 14825a01f2e8SVenkatesh Pallipadi { 14838a25a2fdSKay Sievers unsigned int cpu = dev->id; 148487549141SViresh Kumar struct cpufreq_policy *policy = per_cpu(cpufreq_cpu_data, cpu); 148587549141SViresh Kumar 148687549141SViresh Kumar if (!policy) 1487ec28297aSVenki Pallipadi return 0; 1488ec28297aSVenki Pallipadi 1489559ed407SRafael J. Wysocki if (cpu_online(cpu)) { 1490*15c0b4d2SRafael J. Wysocki cpufreq_offline_prepare(cpu); 1491*15c0b4d2SRafael J. Wysocki cpufreq_offline_finish(cpu); 149287549141SViresh Kumar } 149387549141SViresh Kumar 1494559ed407SRafael J. Wysocki cpumask_clear_cpu(cpu, policy->real_cpus); 1495559ed407SRafael J. Wysocki 1496559ed407SRafael J. Wysocki if (cpumask_empty(policy->real_cpus)) { 14973654c5ccSViresh Kumar cpufreq_policy_free(policy, true); 149887549141SViresh Kumar return 0; 149987549141SViresh Kumar } 150087549141SViresh Kumar 1501559ed407SRafael J. Wysocki if (cpu != policy->kobj_cpu) { 1502559ed407SRafael J. Wysocki remove_cpu_dev_symlink(policy, cpu); 1503559ed407SRafael J. Wysocki } else { 1504559ed407SRafael J. Wysocki /* 1505559ed407SRafael J. Wysocki * The CPU owning the policy object is going away. Move it to 1506559ed407SRafael J. Wysocki * another suitable CPU. 1507559ed407SRafael J. Wysocki */ 1508559ed407SRafael J. Wysocki unsigned int new_cpu = cpumask_first(policy->real_cpus); 1509559ed407SRafael J. Wysocki struct device *new_dev = get_cpu_device(new_cpu); 151027a862e9SViresh Kumar 1511559ed407SRafael J. Wysocki dev_dbg(dev, "%s: Moving policy object to CPU%u\n", __func__, new_cpu); 151227a862e9SViresh Kumar 1513559ed407SRafael J. Wysocki sysfs_remove_link(&new_dev->kobj, "cpufreq"); 1514559ed407SRafael J. Wysocki policy->kobj_cpu = new_cpu; 1515559ed407SRafael J. Wysocki WARN_ON(kobject_move(&policy->kobj, &new_dev->kobj)); 1516559ed407SRafael J. Wysocki } 1517559ed407SRafael J. Wysocki 1518559ed407SRafael J. Wysocki return 0; 15195a01f2e8SVenkatesh Pallipadi } 15205a01f2e8SVenkatesh Pallipadi 152165f27f38SDavid Howells static void handle_update(struct work_struct *work) 15221da177e4SLinus Torvalds { 152365f27f38SDavid Howells struct cpufreq_policy *policy = 152465f27f38SDavid Howells container_of(work, struct cpufreq_policy, update); 152565f27f38SDavid Howells unsigned int cpu = policy->cpu; 15262d06d8c4SDominik Brodowski pr_debug("handle_update for cpu %u called\n", cpu); 15271da177e4SLinus Torvalds cpufreq_update_policy(cpu); 15281da177e4SLinus Torvalds } 15291da177e4SLinus Torvalds 15301da177e4SLinus Torvalds /** 1531bb176f7dSViresh Kumar * cpufreq_out_of_sync - If actual and saved CPU frequency differs, we're 1532bb176f7dSViresh Kumar * in deep trouble. 1533a1e1dc41SViresh Kumar * @policy: policy managing CPUs 15341da177e4SLinus Torvalds * @new_freq: CPU frequency the CPU actually runs at 15351da177e4SLinus Torvalds * 153629464f28SDave Jones * We adjust to current frequency first, and need to clean up later. 153729464f28SDave Jones * So either call to cpufreq_update_policy() or schedule handle_update()). 15381da177e4SLinus Torvalds */ 1539a1e1dc41SViresh Kumar static void cpufreq_out_of_sync(struct cpufreq_policy *policy, 1540e08f5f5bSGautham R Shenoy unsigned int new_freq) 15411da177e4SLinus Torvalds { 15421da177e4SLinus Torvalds struct cpufreq_freqs freqs; 1543b43a7ffbSViresh Kumar 1544e837f9b5SJoe Perches pr_debug("Warning: CPU frequency out of sync: cpufreq and timing core thinks of %u, is %u kHz\n", 1545a1e1dc41SViresh Kumar policy->cur, new_freq); 15461da177e4SLinus Torvalds 1547a1e1dc41SViresh Kumar freqs.old = policy->cur; 15481da177e4SLinus Torvalds freqs.new = new_freq; 1549b43a7ffbSViresh Kumar 15508fec051eSViresh Kumar cpufreq_freq_transition_begin(policy, &freqs); 15518fec051eSViresh Kumar cpufreq_freq_transition_end(policy, &freqs, 0); 15521da177e4SLinus Torvalds } 15531da177e4SLinus Torvalds 15541da177e4SLinus Torvalds /** 15554ab70df4SDhaval Giani * cpufreq_quick_get - get the CPU frequency (in kHz) from policy->cur 155695235ca2SVenkatesh Pallipadi * @cpu: CPU number 155795235ca2SVenkatesh Pallipadi * 155895235ca2SVenkatesh Pallipadi * This is the last known freq, without actually getting it from the driver. 155995235ca2SVenkatesh Pallipadi * Return value will be same as what is shown in scaling_cur_freq in sysfs. 156095235ca2SVenkatesh Pallipadi */ 156195235ca2SVenkatesh Pallipadi unsigned int cpufreq_quick_get(unsigned int cpu) 156295235ca2SVenkatesh Pallipadi { 15639e21ba8bSDirk Brandewie struct cpufreq_policy *policy; 1564e08f5f5bSGautham R Shenoy unsigned int ret_freq = 0; 156595235ca2SVenkatesh Pallipadi 15661c3d85ddSRafael J. Wysocki if (cpufreq_driver && cpufreq_driver->setpolicy && cpufreq_driver->get) 15671c3d85ddSRafael J. Wysocki return cpufreq_driver->get(cpu); 15689e21ba8bSDirk Brandewie 15699e21ba8bSDirk Brandewie policy = cpufreq_cpu_get(cpu); 157095235ca2SVenkatesh Pallipadi if (policy) { 1571e08f5f5bSGautham R Shenoy ret_freq = policy->cur; 157295235ca2SVenkatesh Pallipadi cpufreq_cpu_put(policy); 157395235ca2SVenkatesh Pallipadi } 157495235ca2SVenkatesh Pallipadi 15754d34a67dSDave Jones return ret_freq; 157695235ca2SVenkatesh Pallipadi } 157795235ca2SVenkatesh Pallipadi EXPORT_SYMBOL(cpufreq_quick_get); 157895235ca2SVenkatesh Pallipadi 15793d737108SJesse Barnes /** 15803d737108SJesse Barnes * cpufreq_quick_get_max - get the max reported CPU frequency for this CPU 15813d737108SJesse Barnes * @cpu: CPU number 15823d737108SJesse Barnes * 15833d737108SJesse Barnes * Just return the max possible frequency for a given CPU. 15843d737108SJesse Barnes */ 15853d737108SJesse Barnes unsigned int cpufreq_quick_get_max(unsigned int cpu) 15863d737108SJesse Barnes { 15873d737108SJesse Barnes struct cpufreq_policy *policy = cpufreq_cpu_get(cpu); 15883d737108SJesse Barnes unsigned int ret_freq = 0; 15893d737108SJesse Barnes 15903d737108SJesse Barnes if (policy) { 15913d737108SJesse Barnes ret_freq = policy->max; 15923d737108SJesse Barnes cpufreq_cpu_put(policy); 15933d737108SJesse Barnes } 15943d737108SJesse Barnes 15953d737108SJesse Barnes return ret_freq; 15963d737108SJesse Barnes } 15973d737108SJesse Barnes EXPORT_SYMBOL(cpufreq_quick_get_max); 15983d737108SJesse Barnes 1599d92d50a4SViresh Kumar static unsigned int __cpufreq_get(struct cpufreq_policy *policy) 16001da177e4SLinus Torvalds { 1601e08f5f5bSGautham R Shenoy unsigned int ret_freq = 0; 16021da177e4SLinus Torvalds 16031c3d85ddSRafael J. Wysocki if (!cpufreq_driver->get) 16044d34a67dSDave Jones return ret_freq; 16051da177e4SLinus Torvalds 1606d92d50a4SViresh Kumar ret_freq = cpufreq_driver->get(policy->cpu); 16071da177e4SLinus Torvalds 160811e584cfSViresh Kumar /* Updating inactive policies is invalid, so avoid doing that. */ 160911e584cfSViresh Kumar if (unlikely(policy_is_inactive(policy))) 161011e584cfSViresh Kumar return ret_freq; 161111e584cfSViresh Kumar 1612e08f5f5bSGautham R Shenoy if (ret_freq && policy->cur && 16131c3d85ddSRafael J. Wysocki !(cpufreq_driver->flags & CPUFREQ_CONST_LOOPS)) { 1614e08f5f5bSGautham R Shenoy /* verify no discrepancy between actual and 1615e08f5f5bSGautham R Shenoy saved value exists */ 1616e08f5f5bSGautham R Shenoy if (unlikely(ret_freq != policy->cur)) { 1617a1e1dc41SViresh Kumar cpufreq_out_of_sync(policy, ret_freq); 16181da177e4SLinus Torvalds schedule_work(&policy->update); 16191da177e4SLinus Torvalds } 16201da177e4SLinus Torvalds } 16211da177e4SLinus Torvalds 16224d34a67dSDave Jones return ret_freq; 16235a01f2e8SVenkatesh Pallipadi } 16241da177e4SLinus Torvalds 16255a01f2e8SVenkatesh Pallipadi /** 16265a01f2e8SVenkatesh Pallipadi * cpufreq_get - get the current CPU frequency (in kHz) 16275a01f2e8SVenkatesh Pallipadi * @cpu: CPU number 16285a01f2e8SVenkatesh Pallipadi * 16295a01f2e8SVenkatesh Pallipadi * Get the CPU current (static) CPU frequency 16305a01f2e8SVenkatesh Pallipadi */ 16315a01f2e8SVenkatesh Pallipadi unsigned int cpufreq_get(unsigned int cpu) 16325a01f2e8SVenkatesh Pallipadi { 1633999976e0SAaron Plattner struct cpufreq_policy *policy = cpufreq_cpu_get(cpu); 16345a01f2e8SVenkatesh Pallipadi unsigned int ret_freq = 0; 16355a01f2e8SVenkatesh Pallipadi 1636999976e0SAaron Plattner if (policy) { 1637ad7722daSviresh kumar down_read(&policy->rwsem); 1638d92d50a4SViresh Kumar ret_freq = __cpufreq_get(policy); 1639ad7722daSviresh kumar up_read(&policy->rwsem); 1640999976e0SAaron Plattner 1641999976e0SAaron Plattner cpufreq_cpu_put(policy); 1642999976e0SAaron Plattner } 16436eed9404SViresh Kumar 16444d34a67dSDave Jones return ret_freq; 16451da177e4SLinus Torvalds } 16461da177e4SLinus Torvalds EXPORT_SYMBOL(cpufreq_get); 16471da177e4SLinus Torvalds 16488a25a2fdSKay Sievers static struct subsys_interface cpufreq_interface = { 16498a25a2fdSKay Sievers .name = "cpufreq", 16508a25a2fdSKay Sievers .subsys = &cpu_subsys, 16518a25a2fdSKay Sievers .add_dev = cpufreq_add_dev, 16528a25a2fdSKay Sievers .remove_dev = cpufreq_remove_dev, 1653e00e56dfSRafael J. Wysocki }; 1654e00e56dfSRafael J. Wysocki 1655e28867eaSViresh Kumar /* 1656e28867eaSViresh Kumar * In case platform wants some specific frequency to be configured 1657e28867eaSViresh Kumar * during suspend.. 165842d4dc3fSBenjamin Herrenschmidt */ 1659e28867eaSViresh Kumar int cpufreq_generic_suspend(struct cpufreq_policy *policy) 166042d4dc3fSBenjamin Herrenschmidt { 1661e28867eaSViresh Kumar int ret; 16624bc5d341SDave Jones 1663e28867eaSViresh Kumar if (!policy->suspend_freq) { 1664e28867eaSViresh Kumar pr_err("%s: suspend_freq can't be zero\n", __func__); 1665e28867eaSViresh Kumar return -EINVAL; 166642d4dc3fSBenjamin Herrenschmidt } 166742d4dc3fSBenjamin Herrenschmidt 1668e28867eaSViresh Kumar pr_debug("%s: Setting suspend-freq: %u\n", __func__, 1669e28867eaSViresh Kumar policy->suspend_freq); 1670e28867eaSViresh Kumar 1671e28867eaSViresh Kumar ret = __cpufreq_driver_target(policy, policy->suspend_freq, 1672e28867eaSViresh Kumar CPUFREQ_RELATION_H); 1673e28867eaSViresh Kumar if (ret) 1674e28867eaSViresh Kumar pr_err("%s: unable to set suspend-freq: %u. err: %d\n", 1675e28867eaSViresh Kumar __func__, policy->suspend_freq, ret); 1676e28867eaSViresh Kumar 1677c9060494SDave Jones return ret; 167842d4dc3fSBenjamin Herrenschmidt } 1679e28867eaSViresh Kumar EXPORT_SYMBOL(cpufreq_generic_suspend); 168042d4dc3fSBenjamin Herrenschmidt 168142d4dc3fSBenjamin Herrenschmidt /** 16822f0aea93SViresh Kumar * cpufreq_suspend() - Suspend CPUFreq governors 16831da177e4SLinus Torvalds * 16842f0aea93SViresh Kumar * Called during system wide Suspend/Hibernate cycles for suspending governors 16852f0aea93SViresh Kumar * as some platforms can't change frequency after this point in suspend cycle. 16862f0aea93SViresh Kumar * Because some of the devices (like: i2c, regulators, etc) they use for 16872f0aea93SViresh Kumar * changing frequency are suspended quickly after this point. 16881da177e4SLinus Torvalds */ 16892f0aea93SViresh Kumar void cpufreq_suspend(void) 16901da177e4SLinus Torvalds { 16913a3e9e06SViresh Kumar struct cpufreq_policy *policy; 16921da177e4SLinus Torvalds 16932f0aea93SViresh Kumar if (!cpufreq_driver) 1694e00e56dfSRafael J. Wysocki return; 16951da177e4SLinus Torvalds 16962f0aea93SViresh Kumar if (!has_target()) 1697b1b12babSViresh Kumar goto suspend; 16981da177e4SLinus Torvalds 16992f0aea93SViresh Kumar pr_debug("%s: Suspending Governors\n", __func__); 17002f0aea93SViresh Kumar 1701f963735aSViresh Kumar for_each_active_policy(policy) { 17022f0aea93SViresh Kumar if (__cpufreq_governor(policy, CPUFREQ_GOV_STOP)) 17032f0aea93SViresh Kumar pr_err("%s: Failed to stop governor for policy: %p\n", 17042f0aea93SViresh Kumar __func__, policy); 17052f0aea93SViresh Kumar else if (cpufreq_driver->suspend 17062f0aea93SViresh Kumar && cpufreq_driver->suspend(policy)) 17072f0aea93SViresh Kumar pr_err("%s: Failed to suspend driver: %p\n", __func__, 17082f0aea93SViresh Kumar policy); 17091da177e4SLinus Torvalds } 1710b1b12babSViresh Kumar 1711b1b12babSViresh Kumar suspend: 1712b1b12babSViresh Kumar cpufreq_suspended = true; 17131da177e4SLinus Torvalds } 17141da177e4SLinus Torvalds 17151da177e4SLinus Torvalds /** 17162f0aea93SViresh Kumar * cpufreq_resume() - Resume CPUFreq governors 17171da177e4SLinus Torvalds * 17182f0aea93SViresh Kumar * Called during system wide Suspend/Hibernate cycle for resuming governors that 17192f0aea93SViresh Kumar * are suspended with cpufreq_suspend(). 17201da177e4SLinus Torvalds */ 17212f0aea93SViresh Kumar void cpufreq_resume(void) 17221da177e4SLinus Torvalds { 17231da177e4SLinus Torvalds struct cpufreq_policy *policy; 17241da177e4SLinus Torvalds 17252f0aea93SViresh Kumar if (!cpufreq_driver) 17261da177e4SLinus Torvalds return; 17271da177e4SLinus Torvalds 17288e30444eSLan Tianyu cpufreq_suspended = false; 17298e30444eSLan Tianyu 17302f0aea93SViresh Kumar if (!has_target()) 17312f0aea93SViresh Kumar return; 17321da177e4SLinus Torvalds 17332f0aea93SViresh Kumar pr_debug("%s: Resuming Governors\n", __func__); 17342f0aea93SViresh Kumar 1735f963735aSViresh Kumar for_each_active_policy(policy) { 17360c5aa405SViresh Kumar if (cpufreq_driver->resume && cpufreq_driver->resume(policy)) 17370c5aa405SViresh Kumar pr_err("%s: Failed to resume driver: %p\n", __func__, 17380c5aa405SViresh Kumar policy); 17390c5aa405SViresh Kumar else if (__cpufreq_governor(policy, CPUFREQ_GOV_START) 17402f0aea93SViresh Kumar || __cpufreq_governor(policy, CPUFREQ_GOV_LIMITS)) 17412f0aea93SViresh Kumar pr_err("%s: Failed to start governor for policy: %p\n", 17422f0aea93SViresh Kumar __func__, policy); 1743c75de0acSViresh Kumar } 17442f0aea93SViresh Kumar 17452f0aea93SViresh Kumar /* 1746c75de0acSViresh Kumar * schedule call cpufreq_update_policy() for first-online CPU, as that 1747c75de0acSViresh Kumar * wouldn't be hotplugged-out on suspend. It will verify that the 1748c75de0acSViresh Kumar * current freq is in sync with what we believe it to be. 17492f0aea93SViresh Kumar */ 1750c75de0acSViresh Kumar policy = cpufreq_cpu_get_raw(cpumask_first(cpu_online_mask)); 1751c75de0acSViresh Kumar if (WARN_ON(!policy)) 1752c75de0acSViresh Kumar return; 1753c75de0acSViresh Kumar 17543a3e9e06SViresh Kumar schedule_work(&policy->update); 17551da177e4SLinus Torvalds } 17561da177e4SLinus Torvalds 17579d95046eSBorislav Petkov /** 17589d95046eSBorislav Petkov * cpufreq_get_current_driver - return current driver's name 17599d95046eSBorislav Petkov * 17609d95046eSBorislav Petkov * Return the name string of the currently loaded cpufreq driver 17619d95046eSBorislav Petkov * or NULL, if none. 17629d95046eSBorislav Petkov */ 17639d95046eSBorislav Petkov const char *cpufreq_get_current_driver(void) 17649d95046eSBorislav Petkov { 17651c3d85ddSRafael J. Wysocki if (cpufreq_driver) 17661c3d85ddSRafael J. Wysocki return cpufreq_driver->name; 17671c3d85ddSRafael J. Wysocki 17681c3d85ddSRafael J. Wysocki return NULL; 17699d95046eSBorislav Petkov } 17709d95046eSBorislav Petkov EXPORT_SYMBOL_GPL(cpufreq_get_current_driver); 17711da177e4SLinus Torvalds 177251315cdfSThomas Petazzoni /** 177351315cdfSThomas Petazzoni * cpufreq_get_driver_data - return current driver data 177451315cdfSThomas Petazzoni * 177551315cdfSThomas Petazzoni * Return the private data of the currently loaded cpufreq 177651315cdfSThomas Petazzoni * driver, or NULL if no cpufreq driver is loaded. 177751315cdfSThomas Petazzoni */ 177851315cdfSThomas Petazzoni void *cpufreq_get_driver_data(void) 177951315cdfSThomas Petazzoni { 178051315cdfSThomas Petazzoni if (cpufreq_driver) 178151315cdfSThomas Petazzoni return cpufreq_driver->driver_data; 178251315cdfSThomas Petazzoni 178351315cdfSThomas Petazzoni return NULL; 178451315cdfSThomas Petazzoni } 178551315cdfSThomas Petazzoni EXPORT_SYMBOL_GPL(cpufreq_get_driver_data); 178651315cdfSThomas Petazzoni 17871da177e4SLinus Torvalds /********************************************************************* 17881da177e4SLinus Torvalds * NOTIFIER LISTS INTERFACE * 17891da177e4SLinus Torvalds *********************************************************************/ 17901da177e4SLinus Torvalds 17911da177e4SLinus Torvalds /** 17921da177e4SLinus Torvalds * cpufreq_register_notifier - register a driver with cpufreq 17931da177e4SLinus Torvalds * @nb: notifier function to register 17941da177e4SLinus Torvalds * @list: CPUFREQ_TRANSITION_NOTIFIER or CPUFREQ_POLICY_NOTIFIER 17951da177e4SLinus Torvalds * 17961da177e4SLinus Torvalds * Add a driver to one of two lists: either a list of drivers that 17971da177e4SLinus Torvalds * are notified about clock rate changes (once before and once after 17981da177e4SLinus Torvalds * the transition), or a list of drivers that are notified about 17991da177e4SLinus Torvalds * changes in cpufreq policy. 18001da177e4SLinus Torvalds * 18011da177e4SLinus Torvalds * This function may sleep, and has the same return conditions as 1802e041c683SAlan Stern * blocking_notifier_chain_register. 18031da177e4SLinus Torvalds */ 18041da177e4SLinus Torvalds int cpufreq_register_notifier(struct notifier_block *nb, unsigned int list) 18051da177e4SLinus Torvalds { 18061da177e4SLinus Torvalds int ret; 18071da177e4SLinus Torvalds 1808d5aaffa9SDirk Brandewie if (cpufreq_disabled()) 1809d5aaffa9SDirk Brandewie return -EINVAL; 1810d5aaffa9SDirk Brandewie 181174212ca4SCesar Eduardo Barros WARN_ON(!init_cpufreq_transition_notifier_list_called); 181274212ca4SCesar Eduardo Barros 18131da177e4SLinus Torvalds switch (list) { 18141da177e4SLinus Torvalds case CPUFREQ_TRANSITION_NOTIFIER: 1815b4dfdbb3SAlan Stern ret = srcu_notifier_chain_register( 1816e041c683SAlan Stern &cpufreq_transition_notifier_list, nb); 18171da177e4SLinus Torvalds break; 18181da177e4SLinus Torvalds case CPUFREQ_POLICY_NOTIFIER: 1819e041c683SAlan Stern ret = blocking_notifier_chain_register( 1820e041c683SAlan Stern &cpufreq_policy_notifier_list, nb); 18211da177e4SLinus Torvalds break; 18221da177e4SLinus Torvalds default: 18231da177e4SLinus Torvalds ret = -EINVAL; 18241da177e4SLinus Torvalds } 18251da177e4SLinus Torvalds 18261da177e4SLinus Torvalds return ret; 18271da177e4SLinus Torvalds } 18281da177e4SLinus Torvalds EXPORT_SYMBOL(cpufreq_register_notifier); 18291da177e4SLinus Torvalds 18301da177e4SLinus Torvalds /** 18311da177e4SLinus Torvalds * cpufreq_unregister_notifier - unregister a driver with cpufreq 18321da177e4SLinus Torvalds * @nb: notifier block to be unregistered 18331da177e4SLinus Torvalds * @list: CPUFREQ_TRANSITION_NOTIFIER or CPUFREQ_POLICY_NOTIFIER 18341da177e4SLinus Torvalds * 18351da177e4SLinus Torvalds * Remove a driver from the CPU frequency notifier list. 18361da177e4SLinus Torvalds * 18371da177e4SLinus Torvalds * This function may sleep, and has the same return conditions as 1838e041c683SAlan Stern * blocking_notifier_chain_unregister. 18391da177e4SLinus Torvalds */ 18401da177e4SLinus Torvalds int cpufreq_unregister_notifier(struct notifier_block *nb, unsigned int list) 18411da177e4SLinus Torvalds { 18421da177e4SLinus Torvalds int ret; 18431da177e4SLinus Torvalds 1844d5aaffa9SDirk Brandewie if (cpufreq_disabled()) 1845d5aaffa9SDirk Brandewie return -EINVAL; 1846d5aaffa9SDirk Brandewie 18471da177e4SLinus Torvalds switch (list) { 18481da177e4SLinus Torvalds case CPUFREQ_TRANSITION_NOTIFIER: 1849b4dfdbb3SAlan Stern ret = srcu_notifier_chain_unregister( 1850e041c683SAlan Stern &cpufreq_transition_notifier_list, nb); 18511da177e4SLinus Torvalds break; 18521da177e4SLinus Torvalds case CPUFREQ_POLICY_NOTIFIER: 1853e041c683SAlan Stern ret = blocking_notifier_chain_unregister( 1854e041c683SAlan Stern &cpufreq_policy_notifier_list, nb); 18551da177e4SLinus Torvalds break; 18561da177e4SLinus Torvalds default: 18571da177e4SLinus Torvalds ret = -EINVAL; 18581da177e4SLinus Torvalds } 18591da177e4SLinus Torvalds 18601da177e4SLinus Torvalds return ret; 18611da177e4SLinus Torvalds } 18621da177e4SLinus Torvalds EXPORT_SYMBOL(cpufreq_unregister_notifier); 18631da177e4SLinus Torvalds 18641da177e4SLinus Torvalds 18651da177e4SLinus Torvalds /********************************************************************* 18661da177e4SLinus Torvalds * GOVERNORS * 18671da177e4SLinus Torvalds *********************************************************************/ 18681da177e4SLinus Torvalds 18691c03a2d0SViresh Kumar /* Must set freqs->new to intermediate frequency */ 18701c03a2d0SViresh Kumar static int __target_intermediate(struct cpufreq_policy *policy, 18711c03a2d0SViresh Kumar struct cpufreq_freqs *freqs, int index) 18721c03a2d0SViresh Kumar { 18731c03a2d0SViresh Kumar int ret; 18741c03a2d0SViresh Kumar 18751c03a2d0SViresh Kumar freqs->new = cpufreq_driver->get_intermediate(policy, index); 18761c03a2d0SViresh Kumar 18771c03a2d0SViresh Kumar /* We don't need to switch to intermediate freq */ 18781c03a2d0SViresh Kumar if (!freqs->new) 18791c03a2d0SViresh Kumar return 0; 18801c03a2d0SViresh Kumar 18811c03a2d0SViresh Kumar pr_debug("%s: cpu: %d, switching to intermediate freq: oldfreq: %u, intermediate freq: %u\n", 18821c03a2d0SViresh Kumar __func__, policy->cpu, freqs->old, freqs->new); 18831c03a2d0SViresh Kumar 18841c03a2d0SViresh Kumar cpufreq_freq_transition_begin(policy, freqs); 18851c03a2d0SViresh Kumar ret = cpufreq_driver->target_intermediate(policy, index); 18861c03a2d0SViresh Kumar cpufreq_freq_transition_end(policy, freqs, ret); 18871c03a2d0SViresh Kumar 18881c03a2d0SViresh Kumar if (ret) 18891c03a2d0SViresh Kumar pr_err("%s: Failed to change to intermediate frequency: %d\n", 18901c03a2d0SViresh Kumar __func__, ret); 18911c03a2d0SViresh Kumar 18921c03a2d0SViresh Kumar return ret; 18931c03a2d0SViresh Kumar } 18941c03a2d0SViresh Kumar 18958d65775dSViresh Kumar static int __target_index(struct cpufreq_policy *policy, 18968d65775dSViresh Kumar struct cpufreq_frequency_table *freq_table, int index) 18978d65775dSViresh Kumar { 18981c03a2d0SViresh Kumar struct cpufreq_freqs freqs = {.old = policy->cur, .flags = 0}; 18991c03a2d0SViresh Kumar unsigned int intermediate_freq = 0; 19008d65775dSViresh Kumar int retval = -EINVAL; 19018d65775dSViresh Kumar bool notify; 19028d65775dSViresh Kumar 19038d65775dSViresh Kumar notify = !(cpufreq_driver->flags & CPUFREQ_ASYNC_NOTIFICATION); 19048d65775dSViresh Kumar if (notify) { 19051c03a2d0SViresh Kumar /* Handle switching to intermediate frequency */ 19061c03a2d0SViresh Kumar if (cpufreq_driver->get_intermediate) { 19071c03a2d0SViresh Kumar retval = __target_intermediate(policy, &freqs, index); 19081c03a2d0SViresh Kumar if (retval) 19091c03a2d0SViresh Kumar return retval; 19108d65775dSViresh Kumar 19111c03a2d0SViresh Kumar intermediate_freq = freqs.new; 19121c03a2d0SViresh Kumar /* Set old freq to intermediate */ 19131c03a2d0SViresh Kumar if (intermediate_freq) 19141c03a2d0SViresh Kumar freqs.old = freqs.new; 19151c03a2d0SViresh Kumar } 19161c03a2d0SViresh Kumar 19171c03a2d0SViresh Kumar freqs.new = freq_table[index].frequency; 19188d65775dSViresh Kumar pr_debug("%s: cpu: %d, oldfreq: %u, new freq: %u\n", 19198d65775dSViresh Kumar __func__, policy->cpu, freqs.old, freqs.new); 19208d65775dSViresh Kumar 19218d65775dSViresh Kumar cpufreq_freq_transition_begin(policy, &freqs); 19228d65775dSViresh Kumar } 19238d65775dSViresh Kumar 19248d65775dSViresh Kumar retval = cpufreq_driver->target_index(policy, index); 19258d65775dSViresh Kumar if (retval) 19268d65775dSViresh Kumar pr_err("%s: Failed to change cpu frequency: %d\n", __func__, 19278d65775dSViresh Kumar retval); 19288d65775dSViresh Kumar 19291c03a2d0SViresh Kumar if (notify) { 19308d65775dSViresh Kumar cpufreq_freq_transition_end(policy, &freqs, retval); 19318d65775dSViresh Kumar 19321c03a2d0SViresh Kumar /* 19331c03a2d0SViresh Kumar * Failed after setting to intermediate freq? Driver should have 19341c03a2d0SViresh Kumar * reverted back to initial frequency and so should we. Check 19351c03a2d0SViresh Kumar * here for intermediate_freq instead of get_intermediate, in 193658405af6SShailendra Verma * case we haven't switched to intermediate freq at all. 19371c03a2d0SViresh Kumar */ 19381c03a2d0SViresh Kumar if (unlikely(retval && intermediate_freq)) { 19391c03a2d0SViresh Kumar freqs.old = intermediate_freq; 19401c03a2d0SViresh Kumar freqs.new = policy->restore_freq; 19411c03a2d0SViresh Kumar cpufreq_freq_transition_begin(policy, &freqs); 19421c03a2d0SViresh Kumar cpufreq_freq_transition_end(policy, &freqs, 0); 19431c03a2d0SViresh Kumar } 19441c03a2d0SViresh Kumar } 19451c03a2d0SViresh Kumar 19468d65775dSViresh Kumar return retval; 19478d65775dSViresh Kumar } 19488d65775dSViresh Kumar 19491da177e4SLinus Torvalds int __cpufreq_driver_target(struct cpufreq_policy *policy, 19501da177e4SLinus Torvalds unsigned int target_freq, 19511da177e4SLinus Torvalds unsigned int relation) 19521da177e4SLinus Torvalds { 19537249924eSViresh Kumar unsigned int old_target_freq = target_freq; 19548d65775dSViresh Kumar int retval = -EINVAL; 1955c32b6b8eSAshok Raj 1956a7b422cdSKonrad Rzeszutek Wilk if (cpufreq_disabled()) 1957a7b422cdSKonrad Rzeszutek Wilk return -ENODEV; 1958a7b422cdSKonrad Rzeszutek Wilk 19597249924eSViresh Kumar /* Make sure that target_freq is within supported range */ 19607249924eSViresh Kumar if (target_freq > policy->max) 19617249924eSViresh Kumar target_freq = policy->max; 19627249924eSViresh Kumar if (target_freq < policy->min) 19637249924eSViresh Kumar target_freq = policy->min; 19647249924eSViresh Kumar 19657249924eSViresh Kumar pr_debug("target for CPU %u: %u kHz, relation %u, requested %u kHz\n", 19667249924eSViresh Kumar policy->cpu, target_freq, relation, old_target_freq); 19675a1c0228SViresh Kumar 19689c0ebcf7SViresh Kumar /* 19699c0ebcf7SViresh Kumar * This might look like a redundant call as we are checking it again 19709c0ebcf7SViresh Kumar * after finding index. But it is left intentionally for cases where 19719c0ebcf7SViresh Kumar * exactly same freq is called again and so we can save on few function 19729c0ebcf7SViresh Kumar * calls. 19739c0ebcf7SViresh Kumar */ 19745a1c0228SViresh Kumar if (target_freq == policy->cur) 19755a1c0228SViresh Kumar return 0; 19765a1c0228SViresh Kumar 19771c03a2d0SViresh Kumar /* Save last value to restore later on errors */ 19781c03a2d0SViresh Kumar policy->restore_freq = policy->cur; 19791c03a2d0SViresh Kumar 19801c3d85ddSRafael J. Wysocki if (cpufreq_driver->target) 19811c3d85ddSRafael J. Wysocki retval = cpufreq_driver->target(policy, target_freq, relation); 19829c0ebcf7SViresh Kumar else if (cpufreq_driver->target_index) { 19839c0ebcf7SViresh Kumar struct cpufreq_frequency_table *freq_table; 19849c0ebcf7SViresh Kumar int index; 198590d45d17SAshok Raj 19869c0ebcf7SViresh Kumar freq_table = cpufreq_frequency_get_table(policy->cpu); 19879c0ebcf7SViresh Kumar if (unlikely(!freq_table)) { 19889c0ebcf7SViresh Kumar pr_err("%s: Unable to find freq_table\n", __func__); 19899c0ebcf7SViresh Kumar goto out; 19909c0ebcf7SViresh Kumar } 19919c0ebcf7SViresh Kumar 19929c0ebcf7SViresh Kumar retval = cpufreq_frequency_table_target(policy, freq_table, 19939c0ebcf7SViresh Kumar target_freq, relation, &index); 19949c0ebcf7SViresh Kumar if (unlikely(retval)) { 19959c0ebcf7SViresh Kumar pr_err("%s: Unable to find matching freq\n", __func__); 19969c0ebcf7SViresh Kumar goto out; 19979c0ebcf7SViresh Kumar } 19989c0ebcf7SViresh Kumar 1999d4019f0aSViresh Kumar if (freq_table[index].frequency == policy->cur) { 20009c0ebcf7SViresh Kumar retval = 0; 2001d4019f0aSViresh Kumar goto out; 2002d4019f0aSViresh Kumar } 2003d4019f0aSViresh Kumar 20048d65775dSViresh Kumar retval = __target_index(policy, freq_table, index); 20059c0ebcf7SViresh Kumar } 20069c0ebcf7SViresh Kumar 20079c0ebcf7SViresh Kumar out: 20081da177e4SLinus Torvalds return retval; 20091da177e4SLinus Torvalds } 20101da177e4SLinus Torvalds EXPORT_SYMBOL_GPL(__cpufreq_driver_target); 20111da177e4SLinus Torvalds 20121da177e4SLinus Torvalds int cpufreq_driver_target(struct cpufreq_policy *policy, 20131da177e4SLinus Torvalds unsigned int target_freq, 20141da177e4SLinus Torvalds unsigned int relation) 20151da177e4SLinus Torvalds { 2016f1829e4aSJulia Lawall int ret = -EINVAL; 20171da177e4SLinus Torvalds 2018ad7722daSviresh kumar down_write(&policy->rwsem); 20191da177e4SLinus Torvalds 20201da177e4SLinus Torvalds ret = __cpufreq_driver_target(policy, target_freq, relation); 20211da177e4SLinus Torvalds 2022ad7722daSviresh kumar up_write(&policy->rwsem); 20231da177e4SLinus Torvalds 20241da177e4SLinus Torvalds return ret; 20251da177e4SLinus Torvalds } 20261da177e4SLinus Torvalds EXPORT_SYMBOL_GPL(cpufreq_driver_target); 20271da177e4SLinus Torvalds 2028e08f5f5bSGautham R Shenoy static int __cpufreq_governor(struct cpufreq_policy *policy, 2029e08f5f5bSGautham R Shenoy unsigned int event) 20301da177e4SLinus Torvalds { 2031cc993cabSDave Jones int ret; 20326afde10cSThomas Renninger 20336afde10cSThomas Renninger /* Only must be defined when default governor is known to have latency 20346afde10cSThomas Renninger restrictions, like e.g. conservative or ondemand. 20356afde10cSThomas Renninger That this is the case is already ensured in Kconfig 20366afde10cSThomas Renninger */ 20376afde10cSThomas Renninger #ifdef CONFIG_CPU_FREQ_GOV_PERFORMANCE 20386afde10cSThomas Renninger struct cpufreq_governor *gov = &cpufreq_gov_performance; 20396afde10cSThomas Renninger #else 20406afde10cSThomas Renninger struct cpufreq_governor *gov = NULL; 20416afde10cSThomas Renninger #endif 20421c256245SThomas Renninger 20432f0aea93SViresh Kumar /* Don't start any governor operations if we are entering suspend */ 20442f0aea93SViresh Kumar if (cpufreq_suspended) 20452f0aea93SViresh Kumar return 0; 2046cb57720bSEthan Zhao /* 2047cb57720bSEthan Zhao * Governor might not be initiated here if ACPI _PPC changed 2048cb57720bSEthan Zhao * notification happened, so check it. 2049cb57720bSEthan Zhao */ 2050cb57720bSEthan Zhao if (!policy->governor) 2051cb57720bSEthan Zhao return -EINVAL; 20522f0aea93SViresh Kumar 20531c256245SThomas Renninger if (policy->governor->max_transition_latency && 20541c256245SThomas Renninger policy->cpuinfo.transition_latency > 20551c256245SThomas Renninger policy->governor->max_transition_latency) { 20566afde10cSThomas Renninger if (!gov) 20576afde10cSThomas Renninger return -EINVAL; 20586afde10cSThomas Renninger else { 2059e837f9b5SJoe Perches pr_warn("%s governor failed, too long transition latency of HW, fallback to %s governor\n", 2060e837f9b5SJoe Perches policy->governor->name, gov->name); 20611c256245SThomas Renninger policy->governor = gov; 20621c256245SThomas Renninger } 20636afde10cSThomas Renninger } 20641da177e4SLinus Torvalds 2065fe492f3fSViresh Kumar if (event == CPUFREQ_GOV_POLICY_INIT) 20661da177e4SLinus Torvalds if (!try_module_get(policy->governor->owner)) 20671da177e4SLinus Torvalds return -EINVAL; 20681da177e4SLinus Torvalds 20692d06d8c4SDominik Brodowski pr_debug("__cpufreq_governor for CPU %u, event %u\n", 2070e08f5f5bSGautham R Shenoy policy->cpu, event); 207195731ebbSXiaoguang Chen 207295731ebbSXiaoguang Chen mutex_lock(&cpufreq_governor_lock); 207356d07db2SSrivatsa S. Bhat if ((policy->governor_enabled && event == CPUFREQ_GOV_START) 2074f73d3933SViresh Kumar || (!policy->governor_enabled 2075f73d3933SViresh Kumar && (event == CPUFREQ_GOV_LIMITS || event == CPUFREQ_GOV_STOP))) { 207695731ebbSXiaoguang Chen mutex_unlock(&cpufreq_governor_lock); 207795731ebbSXiaoguang Chen return -EBUSY; 207895731ebbSXiaoguang Chen } 207995731ebbSXiaoguang Chen 208095731ebbSXiaoguang Chen if (event == CPUFREQ_GOV_STOP) 208195731ebbSXiaoguang Chen policy->governor_enabled = false; 208295731ebbSXiaoguang Chen else if (event == CPUFREQ_GOV_START) 208395731ebbSXiaoguang Chen policy->governor_enabled = true; 208495731ebbSXiaoguang Chen 208595731ebbSXiaoguang Chen mutex_unlock(&cpufreq_governor_lock); 208695731ebbSXiaoguang Chen 20871da177e4SLinus Torvalds ret = policy->governor->governor(policy, event); 20881da177e4SLinus Torvalds 20894d5dcc42SViresh Kumar if (!ret) { 20904d5dcc42SViresh Kumar if (event == CPUFREQ_GOV_POLICY_INIT) 20918e53695fSViresh Kumar policy->governor->initialized++; 20924d5dcc42SViresh Kumar else if (event == CPUFREQ_GOV_POLICY_EXIT) 20938e53695fSViresh Kumar policy->governor->initialized--; 209495731ebbSXiaoguang Chen } else { 209595731ebbSXiaoguang Chen /* Restore original values */ 209695731ebbSXiaoguang Chen mutex_lock(&cpufreq_governor_lock); 209795731ebbSXiaoguang Chen if (event == CPUFREQ_GOV_STOP) 209895731ebbSXiaoguang Chen policy->governor_enabled = true; 209995731ebbSXiaoguang Chen else if (event == CPUFREQ_GOV_START) 210095731ebbSXiaoguang Chen policy->governor_enabled = false; 210195731ebbSXiaoguang Chen mutex_unlock(&cpufreq_governor_lock); 21024d5dcc42SViresh Kumar } 2103b394058fSViresh Kumar 2104fe492f3fSViresh Kumar if (((event == CPUFREQ_GOV_POLICY_INIT) && ret) || 2105fe492f3fSViresh Kumar ((event == CPUFREQ_GOV_POLICY_EXIT) && !ret)) 21061da177e4SLinus Torvalds module_put(policy->governor->owner); 21071da177e4SLinus Torvalds 21081da177e4SLinus Torvalds return ret; 21091da177e4SLinus Torvalds } 21101da177e4SLinus Torvalds 21111da177e4SLinus Torvalds int cpufreq_register_governor(struct cpufreq_governor *governor) 21121da177e4SLinus Torvalds { 21133bcb09a3SJeremy Fitzhardinge int err; 21141da177e4SLinus Torvalds 21151da177e4SLinus Torvalds if (!governor) 21161da177e4SLinus Torvalds return -EINVAL; 21171da177e4SLinus Torvalds 2118a7b422cdSKonrad Rzeszutek Wilk if (cpufreq_disabled()) 2119a7b422cdSKonrad Rzeszutek Wilk return -ENODEV; 2120a7b422cdSKonrad Rzeszutek Wilk 21213fc54d37Sakpm@osdl.org mutex_lock(&cpufreq_governor_mutex); 21221da177e4SLinus Torvalds 2123b394058fSViresh Kumar governor->initialized = 0; 21243bcb09a3SJeremy Fitzhardinge err = -EBUSY; 212542f91fa1SViresh Kumar if (!find_governor(governor->name)) { 21263bcb09a3SJeremy Fitzhardinge err = 0; 21271da177e4SLinus Torvalds list_add(&governor->governor_list, &cpufreq_governor_list); 21283bcb09a3SJeremy Fitzhardinge } 21291da177e4SLinus Torvalds 21303fc54d37Sakpm@osdl.org mutex_unlock(&cpufreq_governor_mutex); 21313bcb09a3SJeremy Fitzhardinge return err; 21321da177e4SLinus Torvalds } 21331da177e4SLinus Torvalds EXPORT_SYMBOL_GPL(cpufreq_register_governor); 21341da177e4SLinus Torvalds 21351da177e4SLinus Torvalds void cpufreq_unregister_governor(struct cpufreq_governor *governor) 21361da177e4SLinus Torvalds { 21374573237bSViresh Kumar struct cpufreq_policy *policy; 21384573237bSViresh Kumar unsigned long flags; 213990e41bacSPrarit Bhargava 21401da177e4SLinus Torvalds if (!governor) 21411da177e4SLinus Torvalds return; 21421da177e4SLinus Torvalds 2143a7b422cdSKonrad Rzeszutek Wilk if (cpufreq_disabled()) 2144a7b422cdSKonrad Rzeszutek Wilk return; 2145a7b422cdSKonrad Rzeszutek Wilk 21464573237bSViresh Kumar /* clear last_governor for all inactive policies */ 21474573237bSViresh Kumar read_lock_irqsave(&cpufreq_driver_lock, flags); 21484573237bSViresh Kumar for_each_inactive_policy(policy) { 214918bf3a12SViresh Kumar if (!strcmp(policy->last_governor, governor->name)) { 215018bf3a12SViresh Kumar policy->governor = NULL; 21514573237bSViresh Kumar strcpy(policy->last_governor, "\0"); 215290e41bacSPrarit Bhargava } 215318bf3a12SViresh Kumar } 21544573237bSViresh Kumar read_unlock_irqrestore(&cpufreq_driver_lock, flags); 215590e41bacSPrarit Bhargava 21563fc54d37Sakpm@osdl.org mutex_lock(&cpufreq_governor_mutex); 21571da177e4SLinus Torvalds list_del(&governor->governor_list); 21583fc54d37Sakpm@osdl.org mutex_unlock(&cpufreq_governor_mutex); 21591da177e4SLinus Torvalds return; 21601da177e4SLinus Torvalds } 21611da177e4SLinus Torvalds EXPORT_SYMBOL_GPL(cpufreq_unregister_governor); 21621da177e4SLinus Torvalds 21631da177e4SLinus Torvalds 21641da177e4SLinus Torvalds /********************************************************************* 21651da177e4SLinus Torvalds * POLICY INTERFACE * 21661da177e4SLinus Torvalds *********************************************************************/ 21671da177e4SLinus Torvalds 21681da177e4SLinus Torvalds /** 21691da177e4SLinus Torvalds * cpufreq_get_policy - get the current cpufreq_policy 217029464f28SDave Jones * @policy: struct cpufreq_policy into which the current cpufreq_policy 217129464f28SDave Jones * is written 21721da177e4SLinus Torvalds * 21731da177e4SLinus Torvalds * Reads the current cpufreq policy. 21741da177e4SLinus Torvalds */ 21751da177e4SLinus Torvalds int cpufreq_get_policy(struct cpufreq_policy *policy, unsigned int cpu) 21761da177e4SLinus Torvalds { 21771da177e4SLinus Torvalds struct cpufreq_policy *cpu_policy; 21781da177e4SLinus Torvalds if (!policy) 21791da177e4SLinus Torvalds return -EINVAL; 21801da177e4SLinus Torvalds 21811da177e4SLinus Torvalds cpu_policy = cpufreq_cpu_get(cpu); 21821da177e4SLinus Torvalds if (!cpu_policy) 21831da177e4SLinus Torvalds return -EINVAL; 21841da177e4SLinus Torvalds 2185d5b73cd8SViresh Kumar memcpy(policy, cpu_policy, sizeof(*policy)); 21861da177e4SLinus Torvalds 21871da177e4SLinus Torvalds cpufreq_cpu_put(cpu_policy); 21881da177e4SLinus Torvalds return 0; 21891da177e4SLinus Torvalds } 21901da177e4SLinus Torvalds EXPORT_SYMBOL(cpufreq_get_policy); 21911da177e4SLinus Torvalds 2192153d7f3fSArjan van de Ven /* 2193037ce839SViresh Kumar * policy : current policy. 2194037ce839SViresh Kumar * new_policy: policy to be set. 2195153d7f3fSArjan van de Ven */ 2196037ce839SViresh Kumar static int cpufreq_set_policy(struct cpufreq_policy *policy, 21973a3e9e06SViresh Kumar struct cpufreq_policy *new_policy) 21981da177e4SLinus Torvalds { 2199d9a789c7SRafael J. Wysocki struct cpufreq_governor *old_gov; 2200d9a789c7SRafael J. Wysocki int ret; 22011da177e4SLinus Torvalds 2202e837f9b5SJoe Perches pr_debug("setting new policy for CPU %u: %u - %u kHz\n", 2203e837f9b5SJoe Perches new_policy->cpu, new_policy->min, new_policy->max); 22041da177e4SLinus Torvalds 2205d5b73cd8SViresh Kumar memcpy(&new_policy->cpuinfo, &policy->cpuinfo, sizeof(policy->cpuinfo)); 22061da177e4SLinus Torvalds 2207d9a789c7SRafael J. Wysocki if (new_policy->min > policy->max || new_policy->max < policy->min) 2208d9a789c7SRafael J. Wysocki return -EINVAL; 22099c9a43edSMattia Dongili 22101da177e4SLinus Torvalds /* verify the cpu speed can be set within this limit */ 22113a3e9e06SViresh Kumar ret = cpufreq_driver->verify(new_policy); 22121da177e4SLinus Torvalds if (ret) 2213d9a789c7SRafael J. Wysocki return ret; 22141da177e4SLinus Torvalds 22151da177e4SLinus Torvalds /* adjust if necessary - all reasons */ 2216e041c683SAlan Stern blocking_notifier_call_chain(&cpufreq_policy_notifier_list, 22173a3e9e06SViresh Kumar CPUFREQ_ADJUST, new_policy); 22181da177e4SLinus Torvalds 22191da177e4SLinus Torvalds /* adjust if necessary - hardware incompatibility*/ 2220e041c683SAlan Stern blocking_notifier_call_chain(&cpufreq_policy_notifier_list, 22213a3e9e06SViresh Kumar CPUFREQ_INCOMPATIBLE, new_policy); 22221da177e4SLinus Torvalds 2223bb176f7dSViresh Kumar /* 2224bb176f7dSViresh Kumar * verify the cpu speed can be set within this limit, which might be 2225bb176f7dSViresh Kumar * different to the first one 2226bb176f7dSViresh Kumar */ 22273a3e9e06SViresh Kumar ret = cpufreq_driver->verify(new_policy); 2228e041c683SAlan Stern if (ret) 2229d9a789c7SRafael J. Wysocki return ret; 22301da177e4SLinus Torvalds 22311da177e4SLinus Torvalds /* notification of the new policy */ 2232e041c683SAlan Stern blocking_notifier_call_chain(&cpufreq_policy_notifier_list, 22333a3e9e06SViresh Kumar CPUFREQ_NOTIFY, new_policy); 22341da177e4SLinus Torvalds 22353a3e9e06SViresh Kumar policy->min = new_policy->min; 22363a3e9e06SViresh Kumar policy->max = new_policy->max; 22371da177e4SLinus Torvalds 22382d06d8c4SDominik Brodowski pr_debug("new min and max freqs are %u - %u kHz\n", 22393a3e9e06SViresh Kumar policy->min, policy->max); 22401da177e4SLinus Torvalds 22411c3d85ddSRafael J. Wysocki if (cpufreq_driver->setpolicy) { 22423a3e9e06SViresh Kumar policy->policy = new_policy->policy; 22432d06d8c4SDominik Brodowski pr_debug("setting range\n"); 2244d9a789c7SRafael J. Wysocki return cpufreq_driver->setpolicy(new_policy); 2245d9a789c7SRafael J. Wysocki } 2246d9a789c7SRafael J. Wysocki 2247d9a789c7SRafael J. Wysocki if (new_policy->governor == policy->governor) 2248d9a789c7SRafael J. Wysocki goto out; 22491da177e4SLinus Torvalds 22502d06d8c4SDominik Brodowski pr_debug("governor switch\n"); 22511da177e4SLinus Torvalds 2252d9a789c7SRafael J. Wysocki /* save old, working values */ 2253d9a789c7SRafael J. Wysocki old_gov = policy->governor; 22541da177e4SLinus Torvalds /* end old governor */ 2255d9a789c7SRafael J. Wysocki if (old_gov) { 22564bc384aeSViresh Kumar ret = __cpufreq_governor(policy, CPUFREQ_GOV_STOP); 22574bc384aeSViresh Kumar if (ret) { 22584bc384aeSViresh Kumar /* This can happen due to race with other operations */ 22594bc384aeSViresh Kumar pr_debug("%s: Failed to Stop Governor: %s (%d)\n", 22604bc384aeSViresh Kumar __func__, old_gov->name, ret); 22614bc384aeSViresh Kumar return ret; 22624bc384aeSViresh Kumar } 22634bc384aeSViresh Kumar 2264ad7722daSviresh kumar up_write(&policy->rwsem); 22654bc384aeSViresh Kumar ret = __cpufreq_governor(policy, CPUFREQ_GOV_POLICY_EXIT); 2266ad7722daSviresh kumar down_write(&policy->rwsem); 22674bc384aeSViresh Kumar 22684bc384aeSViresh Kumar if (ret) { 22694bc384aeSViresh Kumar pr_err("%s: Failed to Exit Governor: %s (%d)\n", 22704bc384aeSViresh Kumar __func__, old_gov->name, ret); 22714bc384aeSViresh Kumar return ret; 22724bc384aeSViresh Kumar } 22737bd353a9SViresh Kumar } 22741da177e4SLinus Torvalds 22751da177e4SLinus Torvalds /* start new governor */ 22763a3e9e06SViresh Kumar policy->governor = new_policy->governor; 22774bc384aeSViresh Kumar ret = __cpufreq_governor(policy, CPUFREQ_GOV_POLICY_INIT); 22784bc384aeSViresh Kumar if (!ret) { 22794bc384aeSViresh Kumar ret = __cpufreq_governor(policy, CPUFREQ_GOV_START); 22804bc384aeSViresh Kumar if (!ret) 2281d9a789c7SRafael J. Wysocki goto out; 2282d9a789c7SRafael J. Wysocki 2283ad7722daSviresh kumar up_write(&policy->rwsem); 2284d9a789c7SRafael J. Wysocki __cpufreq_governor(policy, CPUFREQ_GOV_POLICY_EXIT); 2285ad7722daSviresh kumar down_write(&policy->rwsem); 2286955ef483SViresh Kumar } 22877bd353a9SViresh Kumar 22881da177e4SLinus Torvalds /* new governor failed, so re-start old one */ 2289d9a789c7SRafael J. Wysocki pr_debug("starting governor %s failed\n", policy->governor->name); 22901da177e4SLinus Torvalds if (old_gov) { 22913a3e9e06SViresh Kumar policy->governor = old_gov; 22924bc384aeSViresh Kumar if (__cpufreq_governor(policy, CPUFREQ_GOV_POLICY_INIT)) 22934bc384aeSViresh Kumar policy->governor = NULL; 22944bc384aeSViresh Kumar else 2295d9a789c7SRafael J. Wysocki __cpufreq_governor(policy, CPUFREQ_GOV_START); 22961da177e4SLinus Torvalds } 22971da177e4SLinus Torvalds 22984bc384aeSViresh Kumar return ret; 2299d9a789c7SRafael J. Wysocki 2300d9a789c7SRafael J. Wysocki out: 2301d9a789c7SRafael J. Wysocki pr_debug("governor: change or update limits\n"); 2302d9a789c7SRafael J. Wysocki return __cpufreq_governor(policy, CPUFREQ_GOV_LIMITS); 23031da177e4SLinus Torvalds } 23041da177e4SLinus Torvalds 23051da177e4SLinus Torvalds /** 23061da177e4SLinus Torvalds * cpufreq_update_policy - re-evaluate an existing cpufreq policy 23071da177e4SLinus Torvalds * @cpu: CPU which shall be re-evaluated 23081da177e4SLinus Torvalds * 230925985edcSLucas De Marchi * Useful for policy notifiers which have different necessities 23101da177e4SLinus Torvalds * at different times. 23111da177e4SLinus Torvalds */ 23121da177e4SLinus Torvalds int cpufreq_update_policy(unsigned int cpu) 23131da177e4SLinus Torvalds { 23143a3e9e06SViresh Kumar struct cpufreq_policy *policy = cpufreq_cpu_get(cpu); 23153a3e9e06SViresh Kumar struct cpufreq_policy new_policy; 2316f1829e4aSJulia Lawall int ret; 23171da177e4SLinus Torvalds 2318fefa8ff8SAaron Plattner if (!policy) 2319fefa8ff8SAaron Plattner return -ENODEV; 23201da177e4SLinus Torvalds 2321ad7722daSviresh kumar down_write(&policy->rwsem); 23221da177e4SLinus Torvalds 23232d06d8c4SDominik Brodowski pr_debug("updating policy for CPU %u\n", cpu); 2324d5b73cd8SViresh Kumar memcpy(&new_policy, policy, sizeof(*policy)); 23253a3e9e06SViresh Kumar new_policy.min = policy->user_policy.min; 23263a3e9e06SViresh Kumar new_policy.max = policy->user_policy.max; 23273a3e9e06SViresh Kumar new_policy.policy = policy->user_policy.policy; 23283a3e9e06SViresh Kumar new_policy.governor = policy->user_policy.governor; 23291da177e4SLinus Torvalds 2330bb176f7dSViresh Kumar /* 2331bb176f7dSViresh Kumar * BIOS might change freq behind our back 2332bb176f7dSViresh Kumar * -> ask driver for current freq and notify governors about a change 2333bb176f7dSViresh Kumar */ 23342ed99e39SRafael J. Wysocki if (cpufreq_driver->get && !cpufreq_driver->setpolicy) { 23353a3e9e06SViresh Kumar new_policy.cur = cpufreq_driver->get(cpu); 2336bd0fa9bbSViresh Kumar if (WARN_ON(!new_policy.cur)) { 2337bd0fa9bbSViresh Kumar ret = -EIO; 2338fefa8ff8SAaron Plattner goto unlock; 2339bd0fa9bbSViresh Kumar } 2340bd0fa9bbSViresh Kumar 23413a3e9e06SViresh Kumar if (!policy->cur) { 2342e837f9b5SJoe Perches pr_debug("Driver did not initialize current freq\n"); 23433a3e9e06SViresh Kumar policy->cur = new_policy.cur; 2344a85f7bd3SThomas Renninger } else { 23459c0ebcf7SViresh Kumar if (policy->cur != new_policy.cur && has_target()) 2346a1e1dc41SViresh Kumar cpufreq_out_of_sync(policy, new_policy.cur); 23470961dd0dSThomas Renninger } 2348a85f7bd3SThomas Renninger } 23490961dd0dSThomas Renninger 2350037ce839SViresh Kumar ret = cpufreq_set_policy(policy, &new_policy); 23511da177e4SLinus Torvalds 2352fefa8ff8SAaron Plattner unlock: 2353ad7722daSviresh kumar up_write(&policy->rwsem); 23545a01f2e8SVenkatesh Pallipadi 23553a3e9e06SViresh Kumar cpufreq_cpu_put(policy); 23561da177e4SLinus Torvalds return ret; 23571da177e4SLinus Torvalds } 23581da177e4SLinus Torvalds EXPORT_SYMBOL(cpufreq_update_policy); 23591da177e4SLinus Torvalds 23602760984fSPaul Gortmaker static int cpufreq_cpu_callback(struct notifier_block *nfb, 2361c32b6b8eSAshok Raj unsigned long action, void *hcpu) 2362c32b6b8eSAshok Raj { 2363c32b6b8eSAshok Raj unsigned int cpu = (unsigned long)hcpu; 23648a25a2fdSKay Sievers struct device *dev; 2365c32b6b8eSAshok Raj 23668a25a2fdSKay Sievers dev = get_cpu_device(cpu); 23678a25a2fdSKay Sievers if (dev) { 23685302c3fbSSrivatsa S. Bhat switch (action & ~CPU_TASKS_FROZEN) { 2369c32b6b8eSAshok Raj case CPU_ONLINE: 237023faf0b7SViresh Kumar cpufreq_add_dev(dev, NULL); 2371c32b6b8eSAshok Raj break; 23725302c3fbSSrivatsa S. Bhat 2373c32b6b8eSAshok Raj case CPU_DOWN_PREPARE: 2374*15c0b4d2SRafael J. Wysocki cpufreq_offline_prepare(cpu); 23751aee40acSSrivatsa S. Bhat break; 23761aee40acSSrivatsa S. Bhat 23771aee40acSSrivatsa S. Bhat case CPU_POST_DEAD: 2378*15c0b4d2SRafael J. Wysocki cpufreq_offline_finish(cpu); 2379c32b6b8eSAshok Raj break; 23805302c3fbSSrivatsa S. Bhat 23815a01f2e8SVenkatesh Pallipadi case CPU_DOWN_FAILED: 238223faf0b7SViresh Kumar cpufreq_add_dev(dev, NULL); 2383c32b6b8eSAshok Raj break; 2384c32b6b8eSAshok Raj } 2385c32b6b8eSAshok Raj } 2386c32b6b8eSAshok Raj return NOTIFY_OK; 2387c32b6b8eSAshok Raj } 2388c32b6b8eSAshok Raj 23899c36f746SNeal Buckendahl static struct notifier_block __refdata cpufreq_cpu_notifier = { 2390c32b6b8eSAshok Raj .notifier_call = cpufreq_cpu_callback, 2391c32b6b8eSAshok Raj }; 23921da177e4SLinus Torvalds 23931da177e4SLinus Torvalds /********************************************************************* 23946f19efc0SLukasz Majewski * BOOST * 23956f19efc0SLukasz Majewski *********************************************************************/ 23966f19efc0SLukasz Majewski static int cpufreq_boost_set_sw(int state) 23976f19efc0SLukasz Majewski { 23986f19efc0SLukasz Majewski struct cpufreq_frequency_table *freq_table; 23996f19efc0SLukasz Majewski struct cpufreq_policy *policy; 24006f19efc0SLukasz Majewski int ret = -EINVAL; 24016f19efc0SLukasz Majewski 2402f963735aSViresh Kumar for_each_active_policy(policy) { 24036f19efc0SLukasz Majewski freq_table = cpufreq_frequency_get_table(policy->cpu); 24046f19efc0SLukasz Majewski if (freq_table) { 24056f19efc0SLukasz Majewski ret = cpufreq_frequency_table_cpuinfo(policy, 24066f19efc0SLukasz Majewski freq_table); 24076f19efc0SLukasz Majewski if (ret) { 24086f19efc0SLukasz Majewski pr_err("%s: Policy frequency update failed\n", 24096f19efc0SLukasz Majewski __func__); 24106f19efc0SLukasz Majewski break; 24116f19efc0SLukasz Majewski } 24126f19efc0SLukasz Majewski policy->user_policy.max = policy->max; 24136f19efc0SLukasz Majewski __cpufreq_governor(policy, CPUFREQ_GOV_LIMITS); 24146f19efc0SLukasz Majewski } 24156f19efc0SLukasz Majewski } 24166f19efc0SLukasz Majewski 24176f19efc0SLukasz Majewski return ret; 24186f19efc0SLukasz Majewski } 24196f19efc0SLukasz Majewski 24206f19efc0SLukasz Majewski int cpufreq_boost_trigger_state(int state) 24216f19efc0SLukasz Majewski { 24226f19efc0SLukasz Majewski unsigned long flags; 24236f19efc0SLukasz Majewski int ret = 0; 24246f19efc0SLukasz Majewski 24256f19efc0SLukasz Majewski if (cpufreq_driver->boost_enabled == state) 24266f19efc0SLukasz Majewski return 0; 24276f19efc0SLukasz Majewski 24286f19efc0SLukasz Majewski write_lock_irqsave(&cpufreq_driver_lock, flags); 24296f19efc0SLukasz Majewski cpufreq_driver->boost_enabled = state; 24306f19efc0SLukasz Majewski write_unlock_irqrestore(&cpufreq_driver_lock, flags); 24316f19efc0SLukasz Majewski 24326f19efc0SLukasz Majewski ret = cpufreq_driver->set_boost(state); 24336f19efc0SLukasz Majewski if (ret) { 24346f19efc0SLukasz Majewski write_lock_irqsave(&cpufreq_driver_lock, flags); 24356f19efc0SLukasz Majewski cpufreq_driver->boost_enabled = !state; 24366f19efc0SLukasz Majewski write_unlock_irqrestore(&cpufreq_driver_lock, flags); 24376f19efc0SLukasz Majewski 2438e837f9b5SJoe Perches pr_err("%s: Cannot %s BOOST\n", 2439e837f9b5SJoe Perches __func__, state ? "enable" : "disable"); 24406f19efc0SLukasz Majewski } 24416f19efc0SLukasz Majewski 24426f19efc0SLukasz Majewski return ret; 24436f19efc0SLukasz Majewski } 24446f19efc0SLukasz Majewski 24456f19efc0SLukasz Majewski int cpufreq_boost_supported(void) 24466f19efc0SLukasz Majewski { 24476f19efc0SLukasz Majewski if (likely(cpufreq_driver)) 24486f19efc0SLukasz Majewski return cpufreq_driver->boost_supported; 24496f19efc0SLukasz Majewski 24506f19efc0SLukasz Majewski return 0; 24516f19efc0SLukasz Majewski } 24526f19efc0SLukasz Majewski EXPORT_SYMBOL_GPL(cpufreq_boost_supported); 24536f19efc0SLukasz Majewski 24546f19efc0SLukasz Majewski int cpufreq_boost_enabled(void) 24556f19efc0SLukasz Majewski { 24566f19efc0SLukasz Majewski return cpufreq_driver->boost_enabled; 24576f19efc0SLukasz Majewski } 24586f19efc0SLukasz Majewski EXPORT_SYMBOL_GPL(cpufreq_boost_enabled); 24596f19efc0SLukasz Majewski 24606f19efc0SLukasz Majewski /********************************************************************* 24611da177e4SLinus Torvalds * REGISTER / UNREGISTER CPUFREQ DRIVER * 24621da177e4SLinus Torvalds *********************************************************************/ 24631da177e4SLinus Torvalds 24641da177e4SLinus Torvalds /** 24651da177e4SLinus Torvalds * cpufreq_register_driver - register a CPU Frequency driver 24661da177e4SLinus Torvalds * @driver_data: A struct cpufreq_driver containing the values# 24671da177e4SLinus Torvalds * submitted by the CPU Frequency driver. 24681da177e4SLinus Torvalds * 24691da177e4SLinus Torvalds * Registers a CPU Frequency driver to this core code. This code 24701da177e4SLinus Torvalds * returns zero on success, -EBUSY when another driver got here first 24711da177e4SLinus Torvalds * (and isn't unregistered in the meantime). 24721da177e4SLinus Torvalds * 24731da177e4SLinus Torvalds */ 2474221dee28SLinus Torvalds int cpufreq_register_driver(struct cpufreq_driver *driver_data) 24751da177e4SLinus Torvalds { 24761da177e4SLinus Torvalds unsigned long flags; 24771da177e4SLinus Torvalds int ret; 24781da177e4SLinus Torvalds 2479a7b422cdSKonrad Rzeszutek Wilk if (cpufreq_disabled()) 2480a7b422cdSKonrad Rzeszutek Wilk return -ENODEV; 2481a7b422cdSKonrad Rzeszutek Wilk 24821da177e4SLinus Torvalds if (!driver_data || !driver_data->verify || !driver_data->init || 24839c0ebcf7SViresh Kumar !(driver_data->setpolicy || driver_data->target_index || 24849832235fSRafael J. Wysocki driver_data->target) || 24859832235fSRafael J. Wysocki (driver_data->setpolicy && (driver_data->target_index || 24861c03a2d0SViresh Kumar driver_data->target)) || 24871c03a2d0SViresh Kumar (!!driver_data->get_intermediate != !!driver_data->target_intermediate)) 24881da177e4SLinus Torvalds return -EINVAL; 24891da177e4SLinus Torvalds 24902d06d8c4SDominik Brodowski pr_debug("trying to register driver %s\n", driver_data->name); 24911da177e4SLinus Torvalds 24920d1857a1SNathan Zimmer write_lock_irqsave(&cpufreq_driver_lock, flags); 24931c3d85ddSRafael J. Wysocki if (cpufreq_driver) { 24940d1857a1SNathan Zimmer write_unlock_irqrestore(&cpufreq_driver_lock, flags); 24954dea5806SYinghai Lu return -EEXIST; 24961da177e4SLinus Torvalds } 24971c3d85ddSRafael J. Wysocki cpufreq_driver = driver_data; 24980d1857a1SNathan Zimmer write_unlock_irqrestore(&cpufreq_driver_lock, flags); 24991da177e4SLinus Torvalds 2500bc68b7dfSViresh Kumar if (driver_data->setpolicy) 2501bc68b7dfSViresh Kumar driver_data->flags |= CPUFREQ_CONST_LOOPS; 2502bc68b7dfSViresh Kumar 25036f19efc0SLukasz Majewski if (cpufreq_boost_supported()) { 25046f19efc0SLukasz Majewski /* 25056f19efc0SLukasz Majewski * Check if driver provides function to enable boost - 25066f19efc0SLukasz Majewski * if not, use cpufreq_boost_set_sw as default 25076f19efc0SLukasz Majewski */ 25086f19efc0SLukasz Majewski if (!cpufreq_driver->set_boost) 25096f19efc0SLukasz Majewski cpufreq_driver->set_boost = cpufreq_boost_set_sw; 25106f19efc0SLukasz Majewski 25116f19efc0SLukasz Majewski ret = cpufreq_sysfs_create_file(&boost.attr); 25126f19efc0SLukasz Majewski if (ret) { 25136f19efc0SLukasz Majewski pr_err("%s: cannot register global BOOST sysfs file\n", 25146f19efc0SLukasz Majewski __func__); 25156f19efc0SLukasz Majewski goto err_null_driver; 25166f19efc0SLukasz Majewski } 25176f19efc0SLukasz Majewski } 25186f19efc0SLukasz Majewski 25198a25a2fdSKay Sievers ret = subsys_interface_register(&cpufreq_interface); 25208f5bc2abSJiri Slaby if (ret) 25216f19efc0SLukasz Majewski goto err_boost_unreg; 25221da177e4SLinus Torvalds 2523ce1bcfe9SViresh Kumar if (!(cpufreq_driver->flags & CPUFREQ_STICKY) && 2524ce1bcfe9SViresh Kumar list_empty(&cpufreq_policy_list)) { 25251da177e4SLinus Torvalds /* if all ->init() calls failed, unregister */ 2526ce1bcfe9SViresh Kumar pr_debug("%s: No CPU initialized for driver %s\n", __func__, 2527e08f5f5bSGautham R Shenoy driver_data->name); 25288a25a2fdSKay Sievers goto err_if_unreg; 25291da177e4SLinus Torvalds } 25301da177e4SLinus Torvalds 253165edc68cSChandra Seetharaman register_hotcpu_notifier(&cpufreq_cpu_notifier); 25322d06d8c4SDominik Brodowski pr_debug("driver %s up and running\n", driver_data->name); 25331da177e4SLinus Torvalds 25348f5bc2abSJiri Slaby return 0; 25358a25a2fdSKay Sievers err_if_unreg: 25368a25a2fdSKay Sievers subsys_interface_unregister(&cpufreq_interface); 25376f19efc0SLukasz Majewski err_boost_unreg: 25386f19efc0SLukasz Majewski if (cpufreq_boost_supported()) 25396f19efc0SLukasz Majewski cpufreq_sysfs_remove_file(&boost.attr); 25408f5bc2abSJiri Slaby err_null_driver: 25410d1857a1SNathan Zimmer write_lock_irqsave(&cpufreq_driver_lock, flags); 25421c3d85ddSRafael J. Wysocki cpufreq_driver = NULL; 25430d1857a1SNathan Zimmer write_unlock_irqrestore(&cpufreq_driver_lock, flags); 25444d34a67dSDave Jones return ret; 25451da177e4SLinus Torvalds } 25461da177e4SLinus Torvalds EXPORT_SYMBOL_GPL(cpufreq_register_driver); 25471da177e4SLinus Torvalds 25481da177e4SLinus Torvalds /** 25491da177e4SLinus Torvalds * cpufreq_unregister_driver - unregister the current CPUFreq driver 25501da177e4SLinus Torvalds * 25511da177e4SLinus Torvalds * Unregister the current CPUFreq driver. Only call this if you have 25521da177e4SLinus Torvalds * the right to do so, i.e. if you have succeeded in initialising before! 25531da177e4SLinus Torvalds * Returns zero if successful, and -EINVAL if the cpufreq_driver is 25541da177e4SLinus Torvalds * currently not initialised. 25551da177e4SLinus Torvalds */ 2556221dee28SLinus Torvalds int cpufreq_unregister_driver(struct cpufreq_driver *driver) 25571da177e4SLinus Torvalds { 25581da177e4SLinus Torvalds unsigned long flags; 25591da177e4SLinus Torvalds 25601c3d85ddSRafael J. Wysocki if (!cpufreq_driver || (driver != cpufreq_driver)) 25611da177e4SLinus Torvalds return -EINVAL; 25621da177e4SLinus Torvalds 25632d06d8c4SDominik Brodowski pr_debug("unregistering driver %s\n", driver->name); 25641da177e4SLinus Torvalds 2565454d3a25SSebastian Andrzej Siewior /* Protect against concurrent cpu hotplug */ 2566454d3a25SSebastian Andrzej Siewior get_online_cpus(); 25678a25a2fdSKay Sievers subsys_interface_unregister(&cpufreq_interface); 25686f19efc0SLukasz Majewski if (cpufreq_boost_supported()) 25696f19efc0SLukasz Majewski cpufreq_sysfs_remove_file(&boost.attr); 25706f19efc0SLukasz Majewski 257165edc68cSChandra Seetharaman unregister_hotcpu_notifier(&cpufreq_cpu_notifier); 25721da177e4SLinus Torvalds 25730d1857a1SNathan Zimmer write_lock_irqsave(&cpufreq_driver_lock, flags); 25746eed9404SViresh Kumar 25751c3d85ddSRafael J. Wysocki cpufreq_driver = NULL; 25766eed9404SViresh Kumar 25770d1857a1SNathan Zimmer write_unlock_irqrestore(&cpufreq_driver_lock, flags); 2578454d3a25SSebastian Andrzej Siewior put_online_cpus(); 25791da177e4SLinus Torvalds 25801da177e4SLinus Torvalds return 0; 25811da177e4SLinus Torvalds } 25821da177e4SLinus Torvalds EXPORT_SYMBOL_GPL(cpufreq_unregister_driver); 25835a01f2e8SVenkatesh Pallipadi 258490de2a4aSDoug Anderson /* 258590de2a4aSDoug Anderson * Stop cpufreq at shutdown to make sure it isn't holding any locks 258690de2a4aSDoug Anderson * or mutexes when secondary CPUs are halted. 258790de2a4aSDoug Anderson */ 258890de2a4aSDoug Anderson static struct syscore_ops cpufreq_syscore_ops = { 258990de2a4aSDoug Anderson .shutdown = cpufreq_suspend, 259090de2a4aSDoug Anderson }; 259190de2a4aSDoug Anderson 25925a01f2e8SVenkatesh Pallipadi static int __init cpufreq_core_init(void) 25935a01f2e8SVenkatesh Pallipadi { 2594a7b422cdSKonrad Rzeszutek Wilk if (cpufreq_disabled()) 2595a7b422cdSKonrad Rzeszutek Wilk return -ENODEV; 2596a7b422cdSKonrad Rzeszutek Wilk 25972361be23SViresh Kumar cpufreq_global_kobject = kobject_create(); 25988aa84ad8SThomas Renninger BUG_ON(!cpufreq_global_kobject); 25998aa84ad8SThomas Renninger 260090de2a4aSDoug Anderson register_syscore_ops(&cpufreq_syscore_ops); 260190de2a4aSDoug Anderson 26025a01f2e8SVenkatesh Pallipadi return 0; 26035a01f2e8SVenkatesh Pallipadi } 26045a01f2e8SVenkatesh Pallipadi core_initcall(cpufreq_core_init); 2605