11da177e4SLinus Torvalds /* 21da177e4SLinus Torvalds * linux/drivers/cpufreq/cpufreq.c 31da177e4SLinus Torvalds * 41da177e4SLinus Torvalds * Copyright (C) 2001 Russell King 51da177e4SLinus Torvalds * (C) 2002 - 2003 Dominik Brodowski <linux@brodo.de> 6bb176f7dSViresh Kumar * (C) 2013 Viresh Kumar <viresh.kumar@linaro.org> 71da177e4SLinus Torvalds * 8c32b6b8eSAshok Raj * Oct 2005 - Ashok Raj <ashok.raj@intel.com> 9c32b6b8eSAshok Raj * Added handling for CPU hotplug 108ff69732SDave Jones * Feb 2006 - Jacob Shin <jacob.shin@amd.com> 118ff69732SDave Jones * Fix handling for CPU hotplug -- affected CPUs 12c32b6b8eSAshok Raj * 131da177e4SLinus Torvalds * This program is free software; you can redistribute it and/or modify 141da177e4SLinus Torvalds * it under the terms of the GNU General Public License version 2 as 151da177e4SLinus Torvalds * published by the Free Software Foundation. 161da177e4SLinus Torvalds */ 171da177e4SLinus Torvalds 18db701151SViresh Kumar #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 19db701151SViresh Kumar 205ff0a268SViresh Kumar #include <linux/cpu.h> 211da177e4SLinus Torvalds #include <linux/cpufreq.h> 221da177e4SLinus Torvalds #include <linux/delay.h> 231da177e4SLinus Torvalds #include <linux/device.h> 245ff0a268SViresh Kumar #include <linux/init.h> 255ff0a268SViresh Kumar #include <linux/kernel_stat.h> 265ff0a268SViresh Kumar #include <linux/module.h> 273fc54d37Sakpm@osdl.org #include <linux/mutex.h> 285ff0a268SViresh Kumar #include <linux/slab.h> 292f0aea93SViresh Kumar #include <linux/suspend.h> 3090de2a4aSDoug Anderson #include <linux/syscore_ops.h> 315ff0a268SViresh Kumar #include <linux/tick.h> 326f4f2723SThomas Renninger #include <trace/events/power.h> 336f4f2723SThomas Renninger 34b4f0676fSViresh Kumar static LIST_HEAD(cpufreq_policy_list); 35f963735aSViresh Kumar 36f963735aSViresh Kumar static inline bool policy_is_inactive(struct cpufreq_policy *policy) 37f963735aSViresh Kumar { 38f963735aSViresh Kumar return cpumask_empty(policy->cpus); 39f963735aSViresh Kumar } 40f963735aSViresh Kumar 41f963735aSViresh Kumar static bool suitable_policy(struct cpufreq_policy *policy, bool active) 42f963735aSViresh Kumar { 43f963735aSViresh Kumar return active == !policy_is_inactive(policy); 44f963735aSViresh Kumar } 45f963735aSViresh Kumar 46f963735aSViresh Kumar /* Finds Next Acive/Inactive policy */ 47f963735aSViresh Kumar static struct cpufreq_policy *next_policy(struct cpufreq_policy *policy, 48f963735aSViresh Kumar bool active) 49f963735aSViresh Kumar { 50f963735aSViresh Kumar do { 51f963735aSViresh Kumar policy = list_next_entry(policy, policy_list); 52f963735aSViresh Kumar 53f963735aSViresh Kumar /* No more policies in the list */ 54f963735aSViresh Kumar if (&policy->policy_list == &cpufreq_policy_list) 55f963735aSViresh Kumar return NULL; 56f963735aSViresh Kumar } while (!suitable_policy(policy, active)); 57f963735aSViresh Kumar 58f963735aSViresh Kumar return policy; 59f963735aSViresh Kumar } 60f963735aSViresh Kumar 61f963735aSViresh Kumar static struct cpufreq_policy *first_policy(bool active) 62f963735aSViresh Kumar { 63f963735aSViresh Kumar struct cpufreq_policy *policy; 64f963735aSViresh Kumar 65f963735aSViresh Kumar /* No policies in the list */ 66f963735aSViresh Kumar if (list_empty(&cpufreq_policy_list)) 67f963735aSViresh Kumar return NULL; 68f963735aSViresh Kumar 69f963735aSViresh Kumar policy = list_first_entry(&cpufreq_policy_list, typeof(*policy), 70f963735aSViresh Kumar policy_list); 71f963735aSViresh Kumar 72f963735aSViresh Kumar if (!suitable_policy(policy, active)) 73f963735aSViresh Kumar policy = next_policy(policy, active); 74f963735aSViresh Kumar 75f963735aSViresh Kumar return policy; 76f963735aSViresh Kumar } 77f963735aSViresh Kumar 78f963735aSViresh Kumar /* Macros to iterate over CPU policies */ 79f963735aSViresh Kumar #define for_each_suitable_policy(__policy, __active) \ 80f963735aSViresh Kumar for (__policy = first_policy(__active); \ 81f963735aSViresh Kumar __policy; \ 82f963735aSViresh Kumar __policy = next_policy(__policy, __active)) 83f963735aSViresh Kumar 84f963735aSViresh Kumar #define for_each_active_policy(__policy) \ 85f963735aSViresh Kumar for_each_suitable_policy(__policy, true) 86f963735aSViresh Kumar #define for_each_inactive_policy(__policy) \ 87f963735aSViresh Kumar for_each_suitable_policy(__policy, false) 88f963735aSViresh Kumar 89b4f0676fSViresh Kumar #define for_each_policy(__policy) \ 90b4f0676fSViresh Kumar list_for_each_entry(__policy, &cpufreq_policy_list, policy_list) 91b4f0676fSViresh Kumar 92f7b27061SViresh Kumar /* Iterate over governors */ 93f7b27061SViresh Kumar static LIST_HEAD(cpufreq_governor_list); 94f7b27061SViresh Kumar #define for_each_governor(__governor) \ 95f7b27061SViresh Kumar list_for_each_entry(__governor, &cpufreq_governor_list, governor_list) 96f7b27061SViresh Kumar 971da177e4SLinus Torvalds /** 98cd878479SDave Jones * The "cpufreq driver" - the arch- or hardware-dependent low 991da177e4SLinus Torvalds * level driver of CPUFreq support, and its spinlock. This lock 1001da177e4SLinus Torvalds * also protects the cpufreq_cpu_data array. 1011da177e4SLinus Torvalds */ 1021c3d85ddSRafael J. Wysocki static struct cpufreq_driver *cpufreq_driver; 1037a6aedfaSMike Travis static DEFINE_PER_CPU(struct cpufreq_policy *, cpufreq_cpu_data); 104bb176f7dSViresh Kumar static DEFINE_RWLOCK(cpufreq_driver_lock); 1056f1e4efdSJane Li DEFINE_MUTEX(cpufreq_governor_lock); 106bb176f7dSViresh Kumar 1072f0aea93SViresh Kumar /* Flag to suspend/resume CPUFreq governors */ 1082f0aea93SViresh Kumar static bool cpufreq_suspended; 1091da177e4SLinus Torvalds 1109c0ebcf7SViresh Kumar static inline bool has_target(void) 1119c0ebcf7SViresh Kumar { 1129c0ebcf7SViresh Kumar return cpufreq_driver->target_index || cpufreq_driver->target; 1139c0ebcf7SViresh Kumar } 1149c0ebcf7SViresh Kumar 1151da177e4SLinus Torvalds /* internal prototypes */ 11629464f28SDave Jones static int __cpufreq_governor(struct cpufreq_policy *policy, 11729464f28SDave Jones unsigned int event); 118d92d50a4SViresh Kumar static unsigned int __cpufreq_get(struct cpufreq_policy *policy); 11965f27f38SDavid Howells static void handle_update(struct work_struct *work); 1201da177e4SLinus Torvalds 1211da177e4SLinus Torvalds /** 1221da177e4SLinus Torvalds * Two notifier lists: the "policy" list is involved in the 1231da177e4SLinus Torvalds * validation process for a new CPU frequency policy; the 1241da177e4SLinus Torvalds * "transition" list for kernel code that needs to handle 1251da177e4SLinus Torvalds * changes to devices when the CPU clock speed changes. 1261da177e4SLinus Torvalds * The mutex locks both lists. 1271da177e4SLinus Torvalds */ 128e041c683SAlan Stern static BLOCKING_NOTIFIER_HEAD(cpufreq_policy_notifier_list); 129b4dfdbb3SAlan Stern static struct srcu_notifier_head cpufreq_transition_notifier_list; 1301da177e4SLinus Torvalds 13174212ca4SCesar Eduardo Barros static bool init_cpufreq_transition_notifier_list_called; 132b4dfdbb3SAlan Stern static int __init init_cpufreq_transition_notifier_list(void) 133b4dfdbb3SAlan Stern { 134b4dfdbb3SAlan Stern srcu_init_notifier_head(&cpufreq_transition_notifier_list); 13574212ca4SCesar Eduardo Barros init_cpufreq_transition_notifier_list_called = true; 136b4dfdbb3SAlan Stern return 0; 137b4dfdbb3SAlan Stern } 138b3438f82SLinus Torvalds pure_initcall(init_cpufreq_transition_notifier_list); 1391da177e4SLinus Torvalds 140a7b422cdSKonrad Rzeszutek Wilk static int off __read_mostly; 141da584455SViresh Kumar static int cpufreq_disabled(void) 142a7b422cdSKonrad Rzeszutek Wilk { 143a7b422cdSKonrad Rzeszutek Wilk return off; 144a7b422cdSKonrad Rzeszutek Wilk } 145a7b422cdSKonrad Rzeszutek Wilk void disable_cpufreq(void) 146a7b422cdSKonrad Rzeszutek Wilk { 147a7b422cdSKonrad Rzeszutek Wilk off = 1; 148a7b422cdSKonrad Rzeszutek Wilk } 1493fc54d37Sakpm@osdl.org static DEFINE_MUTEX(cpufreq_governor_mutex); 1501da177e4SLinus Torvalds 1514d5dcc42SViresh Kumar bool have_governor_per_policy(void) 1524d5dcc42SViresh Kumar { 1530b981e70SViresh Kumar return !!(cpufreq_driver->flags & CPUFREQ_HAVE_GOVERNOR_PER_POLICY); 1544d5dcc42SViresh Kumar } 1553f869d6dSViresh Kumar EXPORT_SYMBOL_GPL(have_governor_per_policy); 1564d5dcc42SViresh Kumar 157944e9a03SViresh Kumar struct kobject *get_governor_parent_kobj(struct cpufreq_policy *policy) 158944e9a03SViresh Kumar { 159944e9a03SViresh Kumar if (have_governor_per_policy()) 160944e9a03SViresh Kumar return &policy->kobj; 161944e9a03SViresh Kumar else 162944e9a03SViresh Kumar return cpufreq_global_kobject; 163944e9a03SViresh Kumar } 164944e9a03SViresh Kumar EXPORT_SYMBOL_GPL(get_governor_parent_kobj); 165944e9a03SViresh Kumar 1665a31d594SViresh Kumar struct cpufreq_frequency_table *cpufreq_frequency_get_table(unsigned int cpu) 1675a31d594SViresh Kumar { 1685a31d594SViresh Kumar struct cpufreq_policy *policy = per_cpu(cpufreq_cpu_data, cpu); 1695a31d594SViresh Kumar 1705a31d594SViresh Kumar return policy && !policy_is_inactive(policy) ? 1715a31d594SViresh Kumar policy->freq_table : NULL; 1725a31d594SViresh Kumar } 1735a31d594SViresh Kumar EXPORT_SYMBOL_GPL(cpufreq_frequency_get_table); 1745a31d594SViresh Kumar 17572a4ce34SViresh Kumar static inline u64 get_cpu_idle_time_jiffy(unsigned int cpu, u64 *wall) 17672a4ce34SViresh Kumar { 17772a4ce34SViresh Kumar u64 idle_time; 17872a4ce34SViresh Kumar u64 cur_wall_time; 17972a4ce34SViresh Kumar u64 busy_time; 18072a4ce34SViresh Kumar 18172a4ce34SViresh Kumar cur_wall_time = jiffies64_to_cputime64(get_jiffies_64()); 18272a4ce34SViresh Kumar 18372a4ce34SViresh Kumar busy_time = kcpustat_cpu(cpu).cpustat[CPUTIME_USER]; 18472a4ce34SViresh Kumar busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_SYSTEM]; 18572a4ce34SViresh Kumar busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_IRQ]; 18672a4ce34SViresh Kumar busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_SOFTIRQ]; 18772a4ce34SViresh Kumar busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_STEAL]; 18872a4ce34SViresh Kumar busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_NICE]; 18972a4ce34SViresh Kumar 19072a4ce34SViresh Kumar idle_time = cur_wall_time - busy_time; 19172a4ce34SViresh Kumar if (wall) 19272a4ce34SViresh Kumar *wall = cputime_to_usecs(cur_wall_time); 19372a4ce34SViresh Kumar 19472a4ce34SViresh Kumar return cputime_to_usecs(idle_time); 19572a4ce34SViresh Kumar } 19672a4ce34SViresh Kumar 19772a4ce34SViresh Kumar u64 get_cpu_idle_time(unsigned int cpu, u64 *wall, int io_busy) 19872a4ce34SViresh Kumar { 19972a4ce34SViresh Kumar u64 idle_time = get_cpu_idle_time_us(cpu, io_busy ? wall : NULL); 20072a4ce34SViresh Kumar 20172a4ce34SViresh Kumar if (idle_time == -1ULL) 20272a4ce34SViresh Kumar return get_cpu_idle_time_jiffy(cpu, wall); 20372a4ce34SViresh Kumar else if (!io_busy) 20472a4ce34SViresh Kumar idle_time += get_cpu_iowait_time_us(cpu, wall); 20572a4ce34SViresh Kumar 20672a4ce34SViresh Kumar return idle_time; 20772a4ce34SViresh Kumar } 20872a4ce34SViresh Kumar EXPORT_SYMBOL_GPL(get_cpu_idle_time); 20972a4ce34SViresh Kumar 21070e9e778SViresh Kumar /* 21170e9e778SViresh Kumar * This is a generic cpufreq init() routine which can be used by cpufreq 21270e9e778SViresh Kumar * drivers of SMP systems. It will do following: 21370e9e778SViresh Kumar * - validate & show freq table passed 21470e9e778SViresh Kumar * - set policies transition latency 21570e9e778SViresh Kumar * - policy->cpus with all possible CPUs 21670e9e778SViresh Kumar */ 21770e9e778SViresh Kumar int cpufreq_generic_init(struct cpufreq_policy *policy, 21870e9e778SViresh Kumar struct cpufreq_frequency_table *table, 21970e9e778SViresh Kumar unsigned int transition_latency) 22070e9e778SViresh Kumar { 22170e9e778SViresh Kumar int ret; 22270e9e778SViresh Kumar 22370e9e778SViresh Kumar ret = cpufreq_table_validate_and_show(policy, table); 22470e9e778SViresh Kumar if (ret) { 22570e9e778SViresh Kumar pr_err("%s: invalid frequency table: %d\n", __func__, ret); 22670e9e778SViresh Kumar return ret; 22770e9e778SViresh Kumar } 22870e9e778SViresh Kumar 22970e9e778SViresh Kumar policy->cpuinfo.transition_latency = transition_latency; 23070e9e778SViresh Kumar 23170e9e778SViresh Kumar /* 23258405af6SShailendra Verma * The driver only supports the SMP configuration where all processors 23370e9e778SViresh Kumar * share the clock and voltage and clock. 23470e9e778SViresh Kumar */ 23570e9e778SViresh Kumar cpumask_setall(policy->cpus); 23670e9e778SViresh Kumar 23770e9e778SViresh Kumar return 0; 23870e9e778SViresh Kumar } 23970e9e778SViresh Kumar EXPORT_SYMBOL_GPL(cpufreq_generic_init); 24070e9e778SViresh Kumar 2411f0bd44eSRafael J. Wysocki struct cpufreq_policy *cpufreq_cpu_get_raw(unsigned int cpu) 242652ed95dSViresh Kumar { 243652ed95dSViresh Kumar struct cpufreq_policy *policy = per_cpu(cpufreq_cpu_data, cpu); 244652ed95dSViresh Kumar 245988bed09SViresh Kumar return policy && cpumask_test_cpu(cpu, policy->cpus) ? policy : NULL; 246988bed09SViresh Kumar } 2471f0bd44eSRafael J. Wysocki EXPORT_SYMBOL_GPL(cpufreq_cpu_get_raw); 248988bed09SViresh Kumar 249988bed09SViresh Kumar unsigned int cpufreq_generic_get(unsigned int cpu) 250988bed09SViresh Kumar { 251988bed09SViresh Kumar struct cpufreq_policy *policy = cpufreq_cpu_get_raw(cpu); 252988bed09SViresh Kumar 253652ed95dSViresh Kumar if (!policy || IS_ERR(policy->clk)) { 254e837f9b5SJoe Perches pr_err("%s: No %s associated to cpu: %d\n", 255e837f9b5SJoe Perches __func__, policy ? "clk" : "policy", cpu); 256652ed95dSViresh Kumar return 0; 257652ed95dSViresh Kumar } 258652ed95dSViresh Kumar 259652ed95dSViresh Kumar return clk_get_rate(policy->clk) / 1000; 260652ed95dSViresh Kumar } 261652ed95dSViresh Kumar EXPORT_SYMBOL_GPL(cpufreq_generic_get); 262652ed95dSViresh Kumar 26350e9c852SViresh Kumar /** 26450e9c852SViresh Kumar * cpufreq_cpu_get: returns policy for a cpu and marks it busy. 26550e9c852SViresh Kumar * 26650e9c852SViresh Kumar * @cpu: cpu to find policy for. 26750e9c852SViresh Kumar * 26850e9c852SViresh Kumar * This returns policy for 'cpu', returns NULL if it doesn't exist. 26950e9c852SViresh Kumar * It also increments the kobject reference count to mark it busy and so would 27050e9c852SViresh Kumar * require a corresponding call to cpufreq_cpu_put() to decrement it back. 27150e9c852SViresh Kumar * If corresponding call cpufreq_cpu_put() isn't made, the policy wouldn't be 27250e9c852SViresh Kumar * freed as that depends on the kobj count. 27350e9c852SViresh Kumar * 27450e9c852SViresh Kumar * Return: A valid policy on success, otherwise NULL on failure. 27550e9c852SViresh Kumar */ 2766eed9404SViresh Kumar struct cpufreq_policy *cpufreq_cpu_get(unsigned int cpu) 2771da177e4SLinus Torvalds { 2786eed9404SViresh Kumar struct cpufreq_policy *policy = NULL; 2791da177e4SLinus Torvalds unsigned long flags; 2801da177e4SLinus Torvalds 2811b947c90SViresh Kumar if (WARN_ON(cpu >= nr_cpu_ids)) 2826eed9404SViresh Kumar return NULL; 2836eed9404SViresh Kumar 2841da177e4SLinus Torvalds /* get the cpufreq driver */ 2850d1857a1SNathan Zimmer read_lock_irqsave(&cpufreq_driver_lock, flags); 2861da177e4SLinus Torvalds 2876eed9404SViresh Kumar if (cpufreq_driver) { 2881da177e4SLinus Torvalds /* get the CPU */ 289988bed09SViresh Kumar policy = cpufreq_cpu_get_raw(cpu); 2906eed9404SViresh Kumar if (policy) 2916eed9404SViresh Kumar kobject_get(&policy->kobj); 2926eed9404SViresh Kumar } 2936eed9404SViresh Kumar 2946eed9404SViresh Kumar read_unlock_irqrestore(&cpufreq_driver_lock, flags); 2951da177e4SLinus Torvalds 2963a3e9e06SViresh Kumar return policy; 297a9144436SStephen Boyd } 2981da177e4SLinus Torvalds EXPORT_SYMBOL_GPL(cpufreq_cpu_get); 2991da177e4SLinus Torvalds 30050e9c852SViresh Kumar /** 30150e9c852SViresh Kumar * cpufreq_cpu_put: Decrements the usage count of a policy 30250e9c852SViresh Kumar * 30350e9c852SViresh Kumar * @policy: policy earlier returned by cpufreq_cpu_get(). 30450e9c852SViresh Kumar * 30550e9c852SViresh Kumar * This decrements the kobject reference count incremented earlier by calling 30650e9c852SViresh Kumar * cpufreq_cpu_get(). 30750e9c852SViresh Kumar */ 3083a3e9e06SViresh Kumar void cpufreq_cpu_put(struct cpufreq_policy *policy) 309a9144436SStephen Boyd { 3106eed9404SViresh Kumar kobject_put(&policy->kobj); 311a9144436SStephen Boyd } 3121da177e4SLinus Torvalds EXPORT_SYMBOL_GPL(cpufreq_cpu_put); 3131da177e4SLinus Torvalds 3141da177e4SLinus Torvalds /********************************************************************* 3151da177e4SLinus Torvalds * EXTERNALLY AFFECTING FREQUENCY CHANGES * 3161da177e4SLinus Torvalds *********************************************************************/ 3171da177e4SLinus Torvalds 3181da177e4SLinus Torvalds /** 3191da177e4SLinus Torvalds * adjust_jiffies - adjust the system "loops_per_jiffy" 3201da177e4SLinus Torvalds * 3211da177e4SLinus Torvalds * This function alters the system "loops_per_jiffy" for the clock 3221da177e4SLinus Torvalds * speed change. Note that loops_per_jiffy cannot be updated on SMP 3231da177e4SLinus Torvalds * systems as each CPU might be scaled differently. So, use the arch 3241da177e4SLinus Torvalds * per-CPU loops_per_jiffy value wherever possible. 3251da177e4SLinus Torvalds */ 32639c132eeSViresh Kumar static void adjust_jiffies(unsigned long val, struct cpufreq_freqs *ci) 32739c132eeSViresh Kumar { 3281da177e4SLinus Torvalds #ifndef CONFIG_SMP 3291da177e4SLinus Torvalds static unsigned long l_p_j_ref; 3301da177e4SLinus Torvalds static unsigned int l_p_j_ref_freq; 3311da177e4SLinus Torvalds 3321da177e4SLinus Torvalds if (ci->flags & CPUFREQ_CONST_LOOPS) 3331da177e4SLinus Torvalds return; 3341da177e4SLinus Torvalds 3351da177e4SLinus Torvalds if (!l_p_j_ref_freq) { 3361da177e4SLinus Torvalds l_p_j_ref = loops_per_jiffy; 3371da177e4SLinus Torvalds l_p_j_ref_freq = ci->old; 338e837f9b5SJoe Perches pr_debug("saving %lu as reference value for loops_per_jiffy; freq is %u kHz\n", 339e837f9b5SJoe Perches l_p_j_ref, l_p_j_ref_freq); 3401da177e4SLinus Torvalds } 3410b443eadSViresh Kumar if (val == CPUFREQ_POSTCHANGE && ci->old != ci->new) { 342e08f5f5bSGautham R Shenoy loops_per_jiffy = cpufreq_scale(l_p_j_ref, l_p_j_ref_freq, 343e08f5f5bSGautham R Shenoy ci->new); 344e837f9b5SJoe Perches pr_debug("scaling loops_per_jiffy to %lu for frequency %u kHz\n", 345e837f9b5SJoe Perches loops_per_jiffy, ci->new); 3461da177e4SLinus Torvalds } 3471da177e4SLinus Torvalds #endif 34839c132eeSViresh Kumar } 3491da177e4SLinus Torvalds 3500956df9cSViresh Kumar static void __cpufreq_notify_transition(struct cpufreq_policy *policy, 351b43a7ffbSViresh Kumar struct cpufreq_freqs *freqs, unsigned int state) 3521da177e4SLinus Torvalds { 3531da177e4SLinus Torvalds BUG_ON(irqs_disabled()); 3541da177e4SLinus Torvalds 355d5aaffa9SDirk Brandewie if (cpufreq_disabled()) 356d5aaffa9SDirk Brandewie return; 357d5aaffa9SDirk Brandewie 3581c3d85ddSRafael J. Wysocki freqs->flags = cpufreq_driver->flags; 3592d06d8c4SDominik Brodowski pr_debug("notification %u of frequency transition to %u kHz\n", 360e4472cb3SDave Jones state, freqs->new); 3611da177e4SLinus Torvalds 3621da177e4SLinus Torvalds switch (state) { 363e4472cb3SDave Jones 3641da177e4SLinus Torvalds case CPUFREQ_PRECHANGE: 365e4472cb3SDave Jones /* detect if the driver reported a value as "old frequency" 366e4472cb3SDave Jones * which is not equal to what the cpufreq core thinks is 367e4472cb3SDave Jones * "old frequency". 3681da177e4SLinus Torvalds */ 3691c3d85ddSRafael J. Wysocki if (!(cpufreq_driver->flags & CPUFREQ_CONST_LOOPS)) { 370e4472cb3SDave Jones if ((policy) && (policy->cpu == freqs->cpu) && 371e4472cb3SDave Jones (policy->cur) && (policy->cur != freqs->old)) { 372e837f9b5SJoe Perches pr_debug("Warning: CPU frequency is %u, cpufreq assumed %u kHz\n", 373e4472cb3SDave Jones freqs->old, policy->cur); 374e4472cb3SDave Jones freqs->old = policy->cur; 3751da177e4SLinus Torvalds } 3761da177e4SLinus Torvalds } 377b4dfdbb3SAlan Stern srcu_notifier_call_chain(&cpufreq_transition_notifier_list, 378e4472cb3SDave Jones CPUFREQ_PRECHANGE, freqs); 3791da177e4SLinus Torvalds adjust_jiffies(CPUFREQ_PRECHANGE, freqs); 3801da177e4SLinus Torvalds break; 381e4472cb3SDave Jones 3821da177e4SLinus Torvalds case CPUFREQ_POSTCHANGE: 3831da177e4SLinus Torvalds adjust_jiffies(CPUFREQ_POSTCHANGE, freqs); 384e837f9b5SJoe Perches pr_debug("FREQ: %lu - CPU: %lu\n", 385e837f9b5SJoe Perches (unsigned long)freqs->new, (unsigned long)freqs->cpu); 38625e41933SThomas Renninger trace_cpu_frequency(freqs->new, freqs->cpu); 387b4dfdbb3SAlan Stern srcu_notifier_call_chain(&cpufreq_transition_notifier_list, 388e4472cb3SDave Jones CPUFREQ_POSTCHANGE, freqs); 389e4472cb3SDave Jones if (likely(policy) && likely(policy->cpu == freqs->cpu)) 390e4472cb3SDave Jones policy->cur = freqs->new; 3911da177e4SLinus Torvalds break; 3921da177e4SLinus Torvalds } 3931da177e4SLinus Torvalds } 394bb176f7dSViresh Kumar 395b43a7ffbSViresh Kumar /** 396b43a7ffbSViresh Kumar * cpufreq_notify_transition - call notifier chain and adjust_jiffies 397b43a7ffbSViresh Kumar * on frequency transition. 398b43a7ffbSViresh Kumar * 399b43a7ffbSViresh Kumar * This function calls the transition notifiers and the "adjust_jiffies" 400b43a7ffbSViresh Kumar * function. It is called twice on all CPU frequency changes that have 401b43a7ffbSViresh Kumar * external effects. 402b43a7ffbSViresh Kumar */ 403236a9800SViresh Kumar static void cpufreq_notify_transition(struct cpufreq_policy *policy, 404b43a7ffbSViresh Kumar struct cpufreq_freqs *freqs, unsigned int state) 405b43a7ffbSViresh Kumar { 406b43a7ffbSViresh Kumar for_each_cpu(freqs->cpu, policy->cpus) 407b43a7ffbSViresh Kumar __cpufreq_notify_transition(policy, freqs, state); 408b43a7ffbSViresh Kumar } 4091da177e4SLinus Torvalds 410f7ba3b41SViresh Kumar /* Do post notifications when there are chances that transition has failed */ 411236a9800SViresh Kumar static void cpufreq_notify_post_transition(struct cpufreq_policy *policy, 412f7ba3b41SViresh Kumar struct cpufreq_freqs *freqs, int transition_failed) 413f7ba3b41SViresh Kumar { 414f7ba3b41SViresh Kumar cpufreq_notify_transition(policy, freqs, CPUFREQ_POSTCHANGE); 415f7ba3b41SViresh Kumar if (!transition_failed) 416f7ba3b41SViresh Kumar return; 417f7ba3b41SViresh Kumar 418f7ba3b41SViresh Kumar swap(freqs->old, freqs->new); 419f7ba3b41SViresh Kumar cpufreq_notify_transition(policy, freqs, CPUFREQ_PRECHANGE); 420f7ba3b41SViresh Kumar cpufreq_notify_transition(policy, freqs, CPUFREQ_POSTCHANGE); 421f7ba3b41SViresh Kumar } 422f7ba3b41SViresh Kumar 42312478cf0SSrivatsa S. Bhat void cpufreq_freq_transition_begin(struct cpufreq_policy *policy, 42412478cf0SSrivatsa S. Bhat struct cpufreq_freqs *freqs) 42512478cf0SSrivatsa S. Bhat { 426ca654dc3SSrivatsa S. Bhat 427ca654dc3SSrivatsa S. Bhat /* 428ca654dc3SSrivatsa S. Bhat * Catch double invocations of _begin() which lead to self-deadlock. 429ca654dc3SSrivatsa S. Bhat * ASYNC_NOTIFICATION drivers are left out because the cpufreq core 430ca654dc3SSrivatsa S. Bhat * doesn't invoke _begin() on their behalf, and hence the chances of 431ca654dc3SSrivatsa S. Bhat * double invocations are very low. Moreover, there are scenarios 432ca654dc3SSrivatsa S. Bhat * where these checks can emit false-positive warnings in these 433ca654dc3SSrivatsa S. Bhat * drivers; so we avoid that by skipping them altogether. 434ca654dc3SSrivatsa S. Bhat */ 435ca654dc3SSrivatsa S. Bhat WARN_ON(!(cpufreq_driver->flags & CPUFREQ_ASYNC_NOTIFICATION) 436ca654dc3SSrivatsa S. Bhat && current == policy->transition_task); 437ca654dc3SSrivatsa S. Bhat 43812478cf0SSrivatsa S. Bhat wait: 43912478cf0SSrivatsa S. Bhat wait_event(policy->transition_wait, !policy->transition_ongoing); 44012478cf0SSrivatsa S. Bhat 44112478cf0SSrivatsa S. Bhat spin_lock(&policy->transition_lock); 44212478cf0SSrivatsa S. Bhat 44312478cf0SSrivatsa S. Bhat if (unlikely(policy->transition_ongoing)) { 44412478cf0SSrivatsa S. Bhat spin_unlock(&policy->transition_lock); 44512478cf0SSrivatsa S. Bhat goto wait; 44612478cf0SSrivatsa S. Bhat } 44712478cf0SSrivatsa S. Bhat 44812478cf0SSrivatsa S. Bhat policy->transition_ongoing = true; 449ca654dc3SSrivatsa S. Bhat policy->transition_task = current; 45012478cf0SSrivatsa S. Bhat 45112478cf0SSrivatsa S. Bhat spin_unlock(&policy->transition_lock); 45212478cf0SSrivatsa S. Bhat 45312478cf0SSrivatsa S. Bhat cpufreq_notify_transition(policy, freqs, CPUFREQ_PRECHANGE); 45412478cf0SSrivatsa S. Bhat } 45512478cf0SSrivatsa S. Bhat EXPORT_SYMBOL_GPL(cpufreq_freq_transition_begin); 45612478cf0SSrivatsa S. Bhat 45712478cf0SSrivatsa S. Bhat void cpufreq_freq_transition_end(struct cpufreq_policy *policy, 45812478cf0SSrivatsa S. Bhat struct cpufreq_freqs *freqs, int transition_failed) 45912478cf0SSrivatsa S. Bhat { 46012478cf0SSrivatsa S. Bhat if (unlikely(WARN_ON(!policy->transition_ongoing))) 46112478cf0SSrivatsa S. Bhat return; 46212478cf0SSrivatsa S. Bhat 46312478cf0SSrivatsa S. Bhat cpufreq_notify_post_transition(policy, freqs, transition_failed); 46412478cf0SSrivatsa S. Bhat 46512478cf0SSrivatsa S. Bhat policy->transition_ongoing = false; 466ca654dc3SSrivatsa S. Bhat policy->transition_task = NULL; 46712478cf0SSrivatsa S. Bhat 46812478cf0SSrivatsa S. Bhat wake_up(&policy->transition_wait); 46912478cf0SSrivatsa S. Bhat } 47012478cf0SSrivatsa S. Bhat EXPORT_SYMBOL_GPL(cpufreq_freq_transition_end); 47112478cf0SSrivatsa S. Bhat 4721da177e4SLinus Torvalds 4731da177e4SLinus Torvalds /********************************************************************* 4741da177e4SLinus Torvalds * SYSFS INTERFACE * 4751da177e4SLinus Torvalds *********************************************************************/ 4768a5c74a1SRashika Kheria static ssize_t show_boost(struct kobject *kobj, 4776f19efc0SLukasz Majewski struct attribute *attr, char *buf) 4786f19efc0SLukasz Majewski { 4796f19efc0SLukasz Majewski return sprintf(buf, "%d\n", cpufreq_driver->boost_enabled); 4806f19efc0SLukasz Majewski } 4816f19efc0SLukasz Majewski 4826f19efc0SLukasz Majewski static ssize_t store_boost(struct kobject *kobj, struct attribute *attr, 4836f19efc0SLukasz Majewski const char *buf, size_t count) 4846f19efc0SLukasz Majewski { 4856f19efc0SLukasz Majewski int ret, enable; 4866f19efc0SLukasz Majewski 4876f19efc0SLukasz Majewski ret = sscanf(buf, "%d", &enable); 4886f19efc0SLukasz Majewski if (ret != 1 || enable < 0 || enable > 1) 4896f19efc0SLukasz Majewski return -EINVAL; 4906f19efc0SLukasz Majewski 4916f19efc0SLukasz Majewski if (cpufreq_boost_trigger_state(enable)) { 492e837f9b5SJoe Perches pr_err("%s: Cannot %s BOOST!\n", 493e837f9b5SJoe Perches __func__, enable ? "enable" : "disable"); 4946f19efc0SLukasz Majewski return -EINVAL; 4956f19efc0SLukasz Majewski } 4966f19efc0SLukasz Majewski 497e837f9b5SJoe Perches pr_debug("%s: cpufreq BOOST %s\n", 498e837f9b5SJoe Perches __func__, enable ? "enabled" : "disabled"); 4996f19efc0SLukasz Majewski 5006f19efc0SLukasz Majewski return count; 5016f19efc0SLukasz Majewski } 5026f19efc0SLukasz Majewski define_one_global_rw(boost); 5031da177e4SLinus Torvalds 50442f91fa1SViresh Kumar static struct cpufreq_governor *find_governor(const char *str_governor) 5053bcb09a3SJeremy Fitzhardinge { 5063bcb09a3SJeremy Fitzhardinge struct cpufreq_governor *t; 5073bcb09a3SJeremy Fitzhardinge 508f7b27061SViresh Kumar for_each_governor(t) 5097c4f4539SRasmus Villemoes if (!strncasecmp(str_governor, t->name, CPUFREQ_NAME_LEN)) 5103bcb09a3SJeremy Fitzhardinge return t; 5113bcb09a3SJeremy Fitzhardinge 5123bcb09a3SJeremy Fitzhardinge return NULL; 5133bcb09a3SJeremy Fitzhardinge } 5143bcb09a3SJeremy Fitzhardinge 5151da177e4SLinus Torvalds /** 5161da177e4SLinus Torvalds * cpufreq_parse_governor - parse a governor string 5171da177e4SLinus Torvalds */ 5181da177e4SLinus Torvalds static int cpufreq_parse_governor(char *str_governor, unsigned int *policy, 5191da177e4SLinus Torvalds struct cpufreq_governor **governor) 5201da177e4SLinus Torvalds { 5213bcb09a3SJeremy Fitzhardinge int err = -EINVAL; 5223bcb09a3SJeremy Fitzhardinge 5231c3d85ddSRafael J. Wysocki if (cpufreq_driver->setpolicy) { 5247c4f4539SRasmus Villemoes if (!strncasecmp(str_governor, "performance", CPUFREQ_NAME_LEN)) { 5251da177e4SLinus Torvalds *policy = CPUFREQ_POLICY_PERFORMANCE; 5263bcb09a3SJeremy Fitzhardinge err = 0; 5277c4f4539SRasmus Villemoes } else if (!strncasecmp(str_governor, "powersave", 528e08f5f5bSGautham R Shenoy CPUFREQ_NAME_LEN)) { 5291da177e4SLinus Torvalds *policy = CPUFREQ_POLICY_POWERSAVE; 5303bcb09a3SJeremy Fitzhardinge err = 0; 5311da177e4SLinus Torvalds } 5322e1cc3a5SViresh Kumar } else { 5331da177e4SLinus Torvalds struct cpufreq_governor *t; 5343bcb09a3SJeremy Fitzhardinge 5353fc54d37Sakpm@osdl.org mutex_lock(&cpufreq_governor_mutex); 5363bcb09a3SJeremy Fitzhardinge 53742f91fa1SViresh Kumar t = find_governor(str_governor); 5383bcb09a3SJeremy Fitzhardinge 539ea714970SJeremy Fitzhardinge if (t == NULL) { 540ea714970SJeremy Fitzhardinge int ret; 541ea714970SJeremy Fitzhardinge 542ea714970SJeremy Fitzhardinge mutex_unlock(&cpufreq_governor_mutex); 5431a8e1463SKees Cook ret = request_module("cpufreq_%s", str_governor); 544ea714970SJeremy Fitzhardinge mutex_lock(&cpufreq_governor_mutex); 545ea714970SJeremy Fitzhardinge 546ea714970SJeremy Fitzhardinge if (ret == 0) 54742f91fa1SViresh Kumar t = find_governor(str_governor); 548ea714970SJeremy Fitzhardinge } 549ea714970SJeremy Fitzhardinge 5503bcb09a3SJeremy Fitzhardinge if (t != NULL) { 5511da177e4SLinus Torvalds *governor = t; 5523bcb09a3SJeremy Fitzhardinge err = 0; 5531da177e4SLinus Torvalds } 5543bcb09a3SJeremy Fitzhardinge 5553bcb09a3SJeremy Fitzhardinge mutex_unlock(&cpufreq_governor_mutex); 5561da177e4SLinus Torvalds } 5573bcb09a3SJeremy Fitzhardinge return err; 5581da177e4SLinus Torvalds } 5591da177e4SLinus Torvalds 5601da177e4SLinus Torvalds /** 561e08f5f5bSGautham R Shenoy * cpufreq_per_cpu_attr_read() / show_##file_name() - 562e08f5f5bSGautham R Shenoy * print out cpufreq information 5631da177e4SLinus Torvalds * 5641da177e4SLinus Torvalds * Write out information from cpufreq_driver->policy[cpu]; object must be 5651da177e4SLinus Torvalds * "unsigned int". 5661da177e4SLinus Torvalds */ 5671da177e4SLinus Torvalds 5681da177e4SLinus Torvalds #define show_one(file_name, object) \ 5691da177e4SLinus Torvalds static ssize_t show_##file_name \ 5701da177e4SLinus Torvalds (struct cpufreq_policy *policy, char *buf) \ 5711da177e4SLinus Torvalds { \ 5721da177e4SLinus Torvalds return sprintf(buf, "%u\n", policy->object); \ 5731da177e4SLinus Torvalds } 5741da177e4SLinus Torvalds 5751da177e4SLinus Torvalds show_one(cpuinfo_min_freq, cpuinfo.min_freq); 5761da177e4SLinus Torvalds show_one(cpuinfo_max_freq, cpuinfo.max_freq); 577ed129784SThomas Renninger show_one(cpuinfo_transition_latency, cpuinfo.transition_latency); 5781da177e4SLinus Torvalds show_one(scaling_min_freq, min); 5791da177e4SLinus Torvalds show_one(scaling_max_freq, max); 580c034b02eSDirk Brandewie 58109347b29SViresh Kumar static ssize_t show_scaling_cur_freq(struct cpufreq_policy *policy, char *buf) 582c034b02eSDirk Brandewie { 583c034b02eSDirk Brandewie ssize_t ret; 584c034b02eSDirk Brandewie 585c034b02eSDirk Brandewie if (cpufreq_driver && cpufreq_driver->setpolicy && cpufreq_driver->get) 586c034b02eSDirk Brandewie ret = sprintf(buf, "%u\n", cpufreq_driver->get(policy->cpu)); 587c034b02eSDirk Brandewie else 588c034b02eSDirk Brandewie ret = sprintf(buf, "%u\n", policy->cur); 589c034b02eSDirk Brandewie return ret; 590c034b02eSDirk Brandewie } 5911da177e4SLinus Torvalds 592037ce839SViresh Kumar static int cpufreq_set_policy(struct cpufreq_policy *policy, 5933a3e9e06SViresh Kumar struct cpufreq_policy *new_policy); 5947970e08bSThomas Renninger 5951da177e4SLinus Torvalds /** 5961da177e4SLinus Torvalds * cpufreq_per_cpu_attr_write() / store_##file_name() - sysfs write access 5971da177e4SLinus Torvalds */ 5981da177e4SLinus Torvalds #define store_one(file_name, object) \ 5991da177e4SLinus Torvalds static ssize_t store_##file_name \ 6001da177e4SLinus Torvalds (struct cpufreq_policy *policy, const char *buf, size_t count) \ 6011da177e4SLinus Torvalds { \ 602619c144cSVince Hsu int ret, temp; \ 6031da177e4SLinus Torvalds struct cpufreq_policy new_policy; \ 6041da177e4SLinus Torvalds \ 6058fa5b631SViresh Kumar memcpy(&new_policy, policy, sizeof(*policy)); \ 6061da177e4SLinus Torvalds \ 6071da177e4SLinus Torvalds ret = sscanf(buf, "%u", &new_policy.object); \ 6081da177e4SLinus Torvalds if (ret != 1) \ 6091da177e4SLinus Torvalds return -EINVAL; \ 6101da177e4SLinus Torvalds \ 611619c144cSVince Hsu temp = new_policy.object; \ 612037ce839SViresh Kumar ret = cpufreq_set_policy(policy, &new_policy); \ 613619c144cSVince Hsu if (!ret) \ 614619c144cSVince Hsu policy->user_policy.object = temp; \ 6151da177e4SLinus Torvalds \ 6161da177e4SLinus Torvalds return ret ? ret : count; \ 6171da177e4SLinus Torvalds } 6181da177e4SLinus Torvalds 6191da177e4SLinus Torvalds store_one(scaling_min_freq, min); 6201da177e4SLinus Torvalds store_one(scaling_max_freq, max); 6211da177e4SLinus Torvalds 6221da177e4SLinus Torvalds /** 6231da177e4SLinus Torvalds * show_cpuinfo_cur_freq - current CPU frequency as detected by hardware 6241da177e4SLinus Torvalds */ 625e08f5f5bSGautham R Shenoy static ssize_t show_cpuinfo_cur_freq(struct cpufreq_policy *policy, 626e08f5f5bSGautham R Shenoy char *buf) 6271da177e4SLinus Torvalds { 628d92d50a4SViresh Kumar unsigned int cur_freq = __cpufreq_get(policy); 6291da177e4SLinus Torvalds if (!cur_freq) 6301da177e4SLinus Torvalds return sprintf(buf, "<unknown>"); 6311da177e4SLinus Torvalds return sprintf(buf, "%u\n", cur_freq); 6321da177e4SLinus Torvalds } 6331da177e4SLinus Torvalds 6341da177e4SLinus Torvalds /** 6351da177e4SLinus Torvalds * show_scaling_governor - show the current policy for the specified CPU 6361da177e4SLinus Torvalds */ 637905d77cdSDave Jones static ssize_t show_scaling_governor(struct cpufreq_policy *policy, char *buf) 6381da177e4SLinus Torvalds { 6391da177e4SLinus Torvalds if (policy->policy == CPUFREQ_POLICY_POWERSAVE) 6401da177e4SLinus Torvalds return sprintf(buf, "powersave\n"); 6411da177e4SLinus Torvalds else if (policy->policy == CPUFREQ_POLICY_PERFORMANCE) 6421da177e4SLinus Torvalds return sprintf(buf, "performance\n"); 6431da177e4SLinus Torvalds else if (policy->governor) 6444b972f0bSviresh kumar return scnprintf(buf, CPUFREQ_NAME_PLEN, "%s\n", 64529464f28SDave Jones policy->governor->name); 6461da177e4SLinus Torvalds return -EINVAL; 6471da177e4SLinus Torvalds } 6481da177e4SLinus Torvalds 6491da177e4SLinus Torvalds /** 6501da177e4SLinus Torvalds * store_scaling_governor - store policy for the specified CPU 6511da177e4SLinus Torvalds */ 6521da177e4SLinus Torvalds static ssize_t store_scaling_governor(struct cpufreq_policy *policy, 6531da177e4SLinus Torvalds const char *buf, size_t count) 6541da177e4SLinus Torvalds { 6555136fa56SSrivatsa S. Bhat int ret; 6561da177e4SLinus Torvalds char str_governor[16]; 6571da177e4SLinus Torvalds struct cpufreq_policy new_policy; 6581da177e4SLinus Torvalds 6598fa5b631SViresh Kumar memcpy(&new_policy, policy, sizeof(*policy)); 6601da177e4SLinus Torvalds 6611da177e4SLinus Torvalds ret = sscanf(buf, "%15s", str_governor); 6621da177e4SLinus Torvalds if (ret != 1) 6631da177e4SLinus Torvalds return -EINVAL; 6641da177e4SLinus Torvalds 665e08f5f5bSGautham R Shenoy if (cpufreq_parse_governor(str_governor, &new_policy.policy, 666e08f5f5bSGautham R Shenoy &new_policy.governor)) 6671da177e4SLinus Torvalds return -EINVAL; 6681da177e4SLinus Torvalds 669037ce839SViresh Kumar ret = cpufreq_set_policy(policy, &new_policy); 67088dc4384SViresh Kumar return ret ? ret : count; 6711da177e4SLinus Torvalds } 6721da177e4SLinus Torvalds 6731da177e4SLinus Torvalds /** 6741da177e4SLinus Torvalds * show_scaling_driver - show the cpufreq driver currently loaded 6751da177e4SLinus Torvalds */ 6761da177e4SLinus Torvalds static ssize_t show_scaling_driver(struct cpufreq_policy *policy, char *buf) 6771da177e4SLinus Torvalds { 6781c3d85ddSRafael J. Wysocki return scnprintf(buf, CPUFREQ_NAME_PLEN, "%s\n", cpufreq_driver->name); 6791da177e4SLinus Torvalds } 6801da177e4SLinus Torvalds 6811da177e4SLinus Torvalds /** 6821da177e4SLinus Torvalds * show_scaling_available_governors - show the available CPUfreq governors 6831da177e4SLinus Torvalds */ 6841da177e4SLinus Torvalds static ssize_t show_scaling_available_governors(struct cpufreq_policy *policy, 6851da177e4SLinus Torvalds char *buf) 6861da177e4SLinus Torvalds { 6871da177e4SLinus Torvalds ssize_t i = 0; 6881da177e4SLinus Torvalds struct cpufreq_governor *t; 6891da177e4SLinus Torvalds 6909c0ebcf7SViresh Kumar if (!has_target()) { 6911da177e4SLinus Torvalds i += sprintf(buf, "performance powersave"); 6921da177e4SLinus Torvalds goto out; 6931da177e4SLinus Torvalds } 6941da177e4SLinus Torvalds 695f7b27061SViresh Kumar for_each_governor(t) { 69629464f28SDave Jones if (i >= (ssize_t) ((PAGE_SIZE / sizeof(char)) 69729464f28SDave Jones - (CPUFREQ_NAME_LEN + 2))) 6981da177e4SLinus Torvalds goto out; 6994b972f0bSviresh kumar i += scnprintf(&buf[i], CPUFREQ_NAME_PLEN, "%s ", t->name); 7001da177e4SLinus Torvalds } 7011da177e4SLinus Torvalds out: 7021da177e4SLinus Torvalds i += sprintf(&buf[i], "\n"); 7031da177e4SLinus Torvalds return i; 7041da177e4SLinus Torvalds } 705e8628dd0SDarrick J. Wong 706f4fd3797SLan Tianyu ssize_t cpufreq_show_cpus(const struct cpumask *mask, char *buf) 7071da177e4SLinus Torvalds { 7081da177e4SLinus Torvalds ssize_t i = 0; 7091da177e4SLinus Torvalds unsigned int cpu; 7101da177e4SLinus Torvalds 711835481d9SRusty Russell for_each_cpu(cpu, mask) { 7121da177e4SLinus Torvalds if (i) 7131da177e4SLinus Torvalds i += scnprintf(&buf[i], (PAGE_SIZE - i - 2), " "); 7141da177e4SLinus Torvalds i += scnprintf(&buf[i], (PAGE_SIZE - i - 2), "%u", cpu); 7151da177e4SLinus Torvalds if (i >= (PAGE_SIZE - 5)) 7161da177e4SLinus Torvalds break; 7171da177e4SLinus Torvalds } 7181da177e4SLinus Torvalds i += sprintf(&buf[i], "\n"); 7191da177e4SLinus Torvalds return i; 7201da177e4SLinus Torvalds } 721f4fd3797SLan Tianyu EXPORT_SYMBOL_GPL(cpufreq_show_cpus); 7221da177e4SLinus Torvalds 723e8628dd0SDarrick J. Wong /** 724e8628dd0SDarrick J. Wong * show_related_cpus - show the CPUs affected by each transition even if 725e8628dd0SDarrick J. Wong * hw coordination is in use 726e8628dd0SDarrick J. Wong */ 727e8628dd0SDarrick J. Wong static ssize_t show_related_cpus(struct cpufreq_policy *policy, char *buf) 728e8628dd0SDarrick J. Wong { 729f4fd3797SLan Tianyu return cpufreq_show_cpus(policy->related_cpus, buf); 730e8628dd0SDarrick J. Wong } 731e8628dd0SDarrick J. Wong 732e8628dd0SDarrick J. Wong /** 733e8628dd0SDarrick J. Wong * show_affected_cpus - show the CPUs affected by each transition 734e8628dd0SDarrick J. Wong */ 735e8628dd0SDarrick J. Wong static ssize_t show_affected_cpus(struct cpufreq_policy *policy, char *buf) 736e8628dd0SDarrick J. Wong { 737f4fd3797SLan Tianyu return cpufreq_show_cpus(policy->cpus, buf); 738e8628dd0SDarrick J. Wong } 739e8628dd0SDarrick J. Wong 7409e76988eSVenki Pallipadi static ssize_t store_scaling_setspeed(struct cpufreq_policy *policy, 7419e76988eSVenki Pallipadi const char *buf, size_t count) 7429e76988eSVenki Pallipadi { 7439e76988eSVenki Pallipadi unsigned int freq = 0; 7449e76988eSVenki Pallipadi unsigned int ret; 7459e76988eSVenki Pallipadi 746879000f9SCHIKAMA masaki if (!policy->governor || !policy->governor->store_setspeed) 7479e76988eSVenki Pallipadi return -EINVAL; 7489e76988eSVenki Pallipadi 7499e76988eSVenki Pallipadi ret = sscanf(buf, "%u", &freq); 7509e76988eSVenki Pallipadi if (ret != 1) 7519e76988eSVenki Pallipadi return -EINVAL; 7529e76988eSVenki Pallipadi 7539e76988eSVenki Pallipadi policy->governor->store_setspeed(policy, freq); 7549e76988eSVenki Pallipadi 7559e76988eSVenki Pallipadi return count; 7569e76988eSVenki Pallipadi } 7579e76988eSVenki Pallipadi 7589e76988eSVenki Pallipadi static ssize_t show_scaling_setspeed(struct cpufreq_policy *policy, char *buf) 7599e76988eSVenki Pallipadi { 760879000f9SCHIKAMA masaki if (!policy->governor || !policy->governor->show_setspeed) 7619e76988eSVenki Pallipadi return sprintf(buf, "<unsupported>\n"); 7629e76988eSVenki Pallipadi 7639e76988eSVenki Pallipadi return policy->governor->show_setspeed(policy, buf); 7649e76988eSVenki Pallipadi } 7651da177e4SLinus Torvalds 766e2f74f35SThomas Renninger /** 7678bf1ac72Sviresh kumar * show_bios_limit - show the current cpufreq HW/BIOS limitation 768e2f74f35SThomas Renninger */ 769e2f74f35SThomas Renninger static ssize_t show_bios_limit(struct cpufreq_policy *policy, char *buf) 770e2f74f35SThomas Renninger { 771e2f74f35SThomas Renninger unsigned int limit; 772e2f74f35SThomas Renninger int ret; 7731c3d85ddSRafael J. Wysocki if (cpufreq_driver->bios_limit) { 7741c3d85ddSRafael J. Wysocki ret = cpufreq_driver->bios_limit(policy->cpu, &limit); 775e2f74f35SThomas Renninger if (!ret) 776e2f74f35SThomas Renninger return sprintf(buf, "%u\n", limit); 777e2f74f35SThomas Renninger } 778e2f74f35SThomas Renninger return sprintf(buf, "%u\n", policy->cpuinfo.max_freq); 779e2f74f35SThomas Renninger } 780e2f74f35SThomas Renninger 7816dad2a29SBorislav Petkov cpufreq_freq_attr_ro_perm(cpuinfo_cur_freq, 0400); 7826dad2a29SBorislav Petkov cpufreq_freq_attr_ro(cpuinfo_min_freq); 7836dad2a29SBorislav Petkov cpufreq_freq_attr_ro(cpuinfo_max_freq); 7846dad2a29SBorislav Petkov cpufreq_freq_attr_ro(cpuinfo_transition_latency); 7856dad2a29SBorislav Petkov cpufreq_freq_attr_ro(scaling_available_governors); 7866dad2a29SBorislav Petkov cpufreq_freq_attr_ro(scaling_driver); 7876dad2a29SBorislav Petkov cpufreq_freq_attr_ro(scaling_cur_freq); 7886dad2a29SBorislav Petkov cpufreq_freq_attr_ro(bios_limit); 7896dad2a29SBorislav Petkov cpufreq_freq_attr_ro(related_cpus); 7906dad2a29SBorislav Petkov cpufreq_freq_attr_ro(affected_cpus); 7916dad2a29SBorislav Petkov cpufreq_freq_attr_rw(scaling_min_freq); 7926dad2a29SBorislav Petkov cpufreq_freq_attr_rw(scaling_max_freq); 7936dad2a29SBorislav Petkov cpufreq_freq_attr_rw(scaling_governor); 7946dad2a29SBorislav Petkov cpufreq_freq_attr_rw(scaling_setspeed); 7951da177e4SLinus Torvalds 7961da177e4SLinus Torvalds static struct attribute *default_attrs[] = { 7971da177e4SLinus Torvalds &cpuinfo_min_freq.attr, 7981da177e4SLinus Torvalds &cpuinfo_max_freq.attr, 799ed129784SThomas Renninger &cpuinfo_transition_latency.attr, 8001da177e4SLinus Torvalds &scaling_min_freq.attr, 8011da177e4SLinus Torvalds &scaling_max_freq.attr, 8021da177e4SLinus Torvalds &affected_cpus.attr, 803e8628dd0SDarrick J. Wong &related_cpus.attr, 8041da177e4SLinus Torvalds &scaling_governor.attr, 8051da177e4SLinus Torvalds &scaling_driver.attr, 8061da177e4SLinus Torvalds &scaling_available_governors.attr, 8079e76988eSVenki Pallipadi &scaling_setspeed.attr, 8081da177e4SLinus Torvalds NULL 8091da177e4SLinus Torvalds }; 8101da177e4SLinus Torvalds 8111da177e4SLinus Torvalds #define to_policy(k) container_of(k, struct cpufreq_policy, kobj) 8121da177e4SLinus Torvalds #define to_attr(a) container_of(a, struct freq_attr, attr) 8131da177e4SLinus Torvalds 8141da177e4SLinus Torvalds static ssize_t show(struct kobject *kobj, struct attribute *attr, char *buf) 8151da177e4SLinus Torvalds { 8161da177e4SLinus Torvalds struct cpufreq_policy *policy = to_policy(kobj); 8171da177e4SLinus Torvalds struct freq_attr *fattr = to_attr(attr); 8181b750e3bSViresh Kumar ssize_t ret; 8196eed9404SViresh Kumar 820ad7722daSviresh kumar down_read(&policy->rwsem); 8215a01f2e8SVenkatesh Pallipadi 822e08f5f5bSGautham R Shenoy if (fattr->show) 823e08f5f5bSGautham R Shenoy ret = fattr->show(policy, buf); 824e08f5f5bSGautham R Shenoy else 825e08f5f5bSGautham R Shenoy ret = -EIO; 826e08f5f5bSGautham R Shenoy 827ad7722daSviresh kumar up_read(&policy->rwsem); 8281b750e3bSViresh Kumar 8291da177e4SLinus Torvalds return ret; 8301da177e4SLinus Torvalds } 8311da177e4SLinus Torvalds 8321da177e4SLinus Torvalds static ssize_t store(struct kobject *kobj, struct attribute *attr, 8331da177e4SLinus Torvalds const char *buf, size_t count) 8341da177e4SLinus Torvalds { 8351da177e4SLinus Torvalds struct cpufreq_policy *policy = to_policy(kobj); 8361da177e4SLinus Torvalds struct freq_attr *fattr = to_attr(attr); 837a07530b4SDave Jones ssize_t ret = -EINVAL; 8386eed9404SViresh Kumar 8394f750c93SSrivatsa S. Bhat get_online_cpus(); 8404f750c93SSrivatsa S. Bhat 8414f750c93SSrivatsa S. Bhat if (!cpu_online(policy->cpu)) 8424f750c93SSrivatsa S. Bhat goto unlock; 8434f750c93SSrivatsa S. Bhat 844ad7722daSviresh kumar down_write(&policy->rwsem); 8455a01f2e8SVenkatesh Pallipadi 846e08f5f5bSGautham R Shenoy if (fattr->store) 847e08f5f5bSGautham R Shenoy ret = fattr->store(policy, buf, count); 848e08f5f5bSGautham R Shenoy else 849e08f5f5bSGautham R Shenoy ret = -EIO; 850e08f5f5bSGautham R Shenoy 851ad7722daSviresh kumar up_write(&policy->rwsem); 8524f750c93SSrivatsa S. Bhat unlock: 8534f750c93SSrivatsa S. Bhat put_online_cpus(); 8544f750c93SSrivatsa S. Bhat 8551da177e4SLinus Torvalds return ret; 8561da177e4SLinus Torvalds } 8571da177e4SLinus Torvalds 8581da177e4SLinus Torvalds static void cpufreq_sysfs_release(struct kobject *kobj) 8591da177e4SLinus Torvalds { 8601da177e4SLinus Torvalds struct cpufreq_policy *policy = to_policy(kobj); 8612d06d8c4SDominik Brodowski pr_debug("last reference is dropped\n"); 8621da177e4SLinus Torvalds complete(&policy->kobj_unregister); 8631da177e4SLinus Torvalds } 8641da177e4SLinus Torvalds 86552cf25d0SEmese Revfy static const struct sysfs_ops sysfs_ops = { 8661da177e4SLinus Torvalds .show = show, 8671da177e4SLinus Torvalds .store = store, 8681da177e4SLinus Torvalds }; 8691da177e4SLinus Torvalds 8701da177e4SLinus Torvalds static struct kobj_type ktype_cpufreq = { 8711da177e4SLinus Torvalds .sysfs_ops = &sysfs_ops, 8721da177e4SLinus Torvalds .default_attrs = default_attrs, 8731da177e4SLinus Torvalds .release = cpufreq_sysfs_release, 8741da177e4SLinus Torvalds }; 8751da177e4SLinus Torvalds 87687549141SViresh Kumar static int add_cpu_dev_symlink(struct cpufreq_policy *policy, int cpu) 87787549141SViresh Kumar { 87887549141SViresh Kumar struct device *cpu_dev; 87987549141SViresh Kumar 88087549141SViresh Kumar pr_debug("%s: Adding symlink for CPU: %u\n", __func__, cpu); 88187549141SViresh Kumar 88287549141SViresh Kumar if (!policy) 88387549141SViresh Kumar return 0; 88487549141SViresh Kumar 88587549141SViresh Kumar cpu_dev = get_cpu_device(cpu); 88687549141SViresh Kumar if (WARN_ON(!cpu_dev)) 88787549141SViresh Kumar return 0; 88887549141SViresh Kumar 88987549141SViresh Kumar return sysfs_create_link(&cpu_dev->kobj, &policy->kobj, "cpufreq"); 89087549141SViresh Kumar } 89187549141SViresh Kumar 89287549141SViresh Kumar static void remove_cpu_dev_symlink(struct cpufreq_policy *policy, int cpu) 89387549141SViresh Kumar { 89487549141SViresh Kumar struct device *cpu_dev; 89587549141SViresh Kumar 89687549141SViresh Kumar pr_debug("%s: Removing symlink for CPU: %u\n", __func__, cpu); 89787549141SViresh Kumar 89887549141SViresh Kumar cpu_dev = get_cpu_device(cpu); 89987549141SViresh Kumar if (WARN_ON(!cpu_dev)) 90087549141SViresh Kumar return; 90187549141SViresh Kumar 90287549141SViresh Kumar sysfs_remove_link(&cpu_dev->kobj, "cpufreq"); 90387549141SViresh Kumar } 90487549141SViresh Kumar 90587549141SViresh Kumar /* Add/remove symlinks for all related CPUs */ 906308b60e7SViresh Kumar static int cpufreq_add_dev_symlink(struct cpufreq_policy *policy) 90719d6f7ecSDave Jones { 90819d6f7ecSDave Jones unsigned int j; 90919d6f7ecSDave Jones int ret = 0; 91019d6f7ecSDave Jones 91187549141SViresh Kumar /* Some related CPUs might not be present (physically hotplugged) */ 912559ed407SRafael J. Wysocki for_each_cpu(j, policy->real_cpus) { 9139d16f207SSaravana Kannan if (j == policy->kobj_cpu) 91419d6f7ecSDave Jones continue; 91519d6f7ecSDave Jones 91687549141SViresh Kumar ret = add_cpu_dev_symlink(policy, j); 91771c3461eSRafael J. Wysocki if (ret) 91871c3461eSRafael J. Wysocki break; 91919d6f7ecSDave Jones } 92087549141SViresh Kumar 92119d6f7ecSDave Jones return ret; 92219d6f7ecSDave Jones } 92319d6f7ecSDave Jones 92487549141SViresh Kumar static void cpufreq_remove_dev_symlink(struct cpufreq_policy *policy) 92587549141SViresh Kumar { 92687549141SViresh Kumar unsigned int j; 92787549141SViresh Kumar 92887549141SViresh Kumar /* Some related CPUs might not be present (physically hotplugged) */ 929559ed407SRafael J. Wysocki for_each_cpu(j, policy->real_cpus) { 93087549141SViresh Kumar if (j == policy->kobj_cpu) 93187549141SViresh Kumar continue; 93287549141SViresh Kumar 93387549141SViresh Kumar remove_cpu_dev_symlink(policy, j); 93487549141SViresh Kumar } 93587549141SViresh Kumar } 93687549141SViresh Kumar 937d9612a49SRafael J. Wysocki static int cpufreq_add_dev_interface(struct cpufreq_policy *policy) 938909a694eSDave Jones { 939909a694eSDave Jones struct freq_attr **drv_attr; 940909a694eSDave Jones int ret = 0; 941909a694eSDave Jones 942909a694eSDave Jones /* set up files for this cpu device */ 9431c3d85ddSRafael J. Wysocki drv_attr = cpufreq_driver->attr; 944f13f1184SViresh Kumar while (drv_attr && *drv_attr) { 945909a694eSDave Jones ret = sysfs_create_file(&policy->kobj, &((*drv_attr)->attr)); 946909a694eSDave Jones if (ret) 9476d4e81edSTomeu Vizoso return ret; 948909a694eSDave Jones drv_attr++; 949909a694eSDave Jones } 9501c3d85ddSRafael J. Wysocki if (cpufreq_driver->get) { 951909a694eSDave Jones ret = sysfs_create_file(&policy->kobj, &cpuinfo_cur_freq.attr); 952909a694eSDave Jones if (ret) 9536d4e81edSTomeu Vizoso return ret; 954909a694eSDave Jones } 955c034b02eSDirk Brandewie 956909a694eSDave Jones ret = sysfs_create_file(&policy->kobj, &scaling_cur_freq.attr); 957909a694eSDave Jones if (ret) 9586d4e81edSTomeu Vizoso return ret; 959c034b02eSDirk Brandewie 9601c3d85ddSRafael J. Wysocki if (cpufreq_driver->bios_limit) { 961e2f74f35SThomas Renninger ret = sysfs_create_file(&policy->kobj, &bios_limit.attr); 962e2f74f35SThomas Renninger if (ret) 9636d4e81edSTomeu Vizoso return ret; 964e2f74f35SThomas Renninger } 965909a694eSDave Jones 9666d4e81edSTomeu Vizoso return cpufreq_add_dev_symlink(policy); 967e18f1682SSrivatsa S. Bhat } 968e18f1682SSrivatsa S. Bhat 9697f0fa40fSViresh Kumar static int cpufreq_init_policy(struct cpufreq_policy *policy) 970e18f1682SSrivatsa S. Bhat { 9716e2c89d1Sviresh kumar struct cpufreq_governor *gov = NULL; 972e18f1682SSrivatsa S. Bhat struct cpufreq_policy new_policy; 973e18f1682SSrivatsa S. Bhat 974d5b73cd8SViresh Kumar memcpy(&new_policy, policy, sizeof(*policy)); 975a27a9ab7SJason Baron 9766e2c89d1Sviresh kumar /* Update governor of new_policy to the governor used before hotplug */ 9774573237bSViresh Kumar gov = find_governor(policy->last_governor); 9786e2c89d1Sviresh kumar if (gov) 9796e2c89d1Sviresh kumar pr_debug("Restoring governor %s for cpu %d\n", 9806e2c89d1Sviresh kumar policy->governor->name, policy->cpu); 9816e2c89d1Sviresh kumar else 9826e2c89d1Sviresh kumar gov = CPUFREQ_DEFAULT_GOVERNOR; 9836e2c89d1Sviresh kumar 9846e2c89d1Sviresh kumar new_policy.governor = gov; 9856e2c89d1Sviresh kumar 986a27a9ab7SJason Baron /* Use the default policy if its valid. */ 987a27a9ab7SJason Baron if (cpufreq_driver->setpolicy) 9886e2c89d1Sviresh kumar cpufreq_parse_governor(gov->name, &new_policy.policy, NULL); 989ecf7e461SDave Jones 990ecf7e461SDave Jones /* set default policy */ 9917f0fa40fSViresh Kumar return cpufreq_set_policy(policy, &new_policy); 992909a694eSDave Jones } 993909a694eSDave Jones 994d9612a49SRafael J. Wysocki static int cpufreq_add_policy_cpu(struct cpufreq_policy *policy, unsigned int cpu) 995fcf80582SViresh Kumar { 9969c0ebcf7SViresh Kumar int ret = 0; 997fcf80582SViresh Kumar 998bb29ae15SViresh Kumar /* Has this CPU been taken care of already? */ 999bb29ae15SViresh Kumar if (cpumask_test_cpu(cpu, policy->cpus)) 1000bb29ae15SViresh Kumar return 0; 1001bb29ae15SViresh Kumar 10029c0ebcf7SViresh Kumar if (has_target()) { 10033de9bdebSViresh Kumar ret = __cpufreq_governor(policy, CPUFREQ_GOV_STOP); 10043de9bdebSViresh Kumar if (ret) { 10053de9bdebSViresh Kumar pr_err("%s: Failed to stop governor\n", __func__); 10063de9bdebSViresh Kumar return ret; 10073de9bdebSViresh Kumar } 10083de9bdebSViresh Kumar } 1009fcf80582SViresh Kumar 1010ad7722daSviresh kumar down_write(&policy->rwsem); 1011fcf80582SViresh Kumar cpumask_set_cpu(cpu, policy->cpus); 1012ad7722daSviresh kumar up_write(&policy->rwsem); 10132eaa3e2dSViresh Kumar 10149c0ebcf7SViresh Kumar if (has_target()) { 1015e5c87b76SStratos Karafotis ret = __cpufreq_governor(policy, CPUFREQ_GOV_START); 1016e5c87b76SStratos Karafotis if (!ret) 1017e5c87b76SStratos Karafotis ret = __cpufreq_governor(policy, CPUFREQ_GOV_LIMITS); 1018e5c87b76SStratos Karafotis 1019e5c87b76SStratos Karafotis if (ret) { 10203de9bdebSViresh Kumar pr_err("%s: Failed to start governor\n", __func__); 10213de9bdebSViresh Kumar return ret; 10223de9bdebSViresh Kumar } 1023820c6ca2SViresh Kumar } 1024fcf80582SViresh Kumar 102587549141SViresh Kumar return 0; 1026fcf80582SViresh Kumar } 10271da177e4SLinus Torvalds 1028a34e63b1SRafael J. Wysocki static struct cpufreq_policy *cpufreq_policy_alloc(unsigned int cpu) 10298414809cSSrivatsa S. Bhat { 1030a34e63b1SRafael J. Wysocki struct device *dev = get_cpu_device(cpu); 1031e9698cc5SSrivatsa S. Bhat struct cpufreq_policy *policy; 10322fc3384dSViresh Kumar int ret; 1033e9698cc5SSrivatsa S. Bhat 1034a34e63b1SRafael J. Wysocki if (WARN_ON(!dev)) 1035a34e63b1SRafael J. Wysocki return NULL; 1036a34e63b1SRafael J. Wysocki 1037e9698cc5SSrivatsa S. Bhat policy = kzalloc(sizeof(*policy), GFP_KERNEL); 1038e9698cc5SSrivatsa S. Bhat if (!policy) 1039e9698cc5SSrivatsa S. Bhat return NULL; 1040e9698cc5SSrivatsa S. Bhat 1041e9698cc5SSrivatsa S. Bhat if (!alloc_cpumask_var(&policy->cpus, GFP_KERNEL)) 1042e9698cc5SSrivatsa S. Bhat goto err_free_policy; 1043e9698cc5SSrivatsa S. Bhat 1044e9698cc5SSrivatsa S. Bhat if (!zalloc_cpumask_var(&policy->related_cpus, GFP_KERNEL)) 1045e9698cc5SSrivatsa S. Bhat goto err_free_cpumask; 1046e9698cc5SSrivatsa S. Bhat 1047559ed407SRafael J. Wysocki if (!zalloc_cpumask_var(&policy->real_cpus, GFP_KERNEL)) 1048559ed407SRafael J. Wysocki goto err_free_rcpumask; 1049559ed407SRafael J. Wysocki 10502fc3384dSViresh Kumar ret = kobject_init_and_add(&policy->kobj, &ktype_cpufreq, &dev->kobj, 10512fc3384dSViresh Kumar "cpufreq"); 10522fc3384dSViresh Kumar if (ret) { 10532fc3384dSViresh Kumar pr_err("%s: failed to init policy->kobj: %d\n", __func__, ret); 1054559ed407SRafael J. Wysocki goto err_free_real_cpus; 10552fc3384dSViresh Kumar } 10562fc3384dSViresh Kumar 1057c88a1f8bSLukasz Majewski INIT_LIST_HEAD(&policy->policy_list); 1058ad7722daSviresh kumar init_rwsem(&policy->rwsem); 105912478cf0SSrivatsa S. Bhat spin_lock_init(&policy->transition_lock); 106012478cf0SSrivatsa S. Bhat init_waitqueue_head(&policy->transition_wait); 1061818c5712SViresh Kumar init_completion(&policy->kobj_unregister); 1062818c5712SViresh Kumar INIT_WORK(&policy->update, handle_update); 1063ad7722daSviresh kumar 1064a34e63b1SRafael J. Wysocki policy->cpu = cpu; 106587549141SViresh Kumar 106687549141SViresh Kumar /* Set this once on allocation */ 1067a34e63b1SRafael J. Wysocki policy->kobj_cpu = cpu; 106887549141SViresh Kumar 1069e9698cc5SSrivatsa S. Bhat return policy; 1070e9698cc5SSrivatsa S. Bhat 1071559ed407SRafael J. Wysocki err_free_real_cpus: 1072559ed407SRafael J. Wysocki free_cpumask_var(policy->real_cpus); 10732fc3384dSViresh Kumar err_free_rcpumask: 10742fc3384dSViresh Kumar free_cpumask_var(policy->related_cpus); 1075e9698cc5SSrivatsa S. Bhat err_free_cpumask: 1076e9698cc5SSrivatsa S. Bhat free_cpumask_var(policy->cpus); 1077e9698cc5SSrivatsa S. Bhat err_free_policy: 1078e9698cc5SSrivatsa S. Bhat kfree(policy); 1079e9698cc5SSrivatsa S. Bhat 1080e9698cc5SSrivatsa S. Bhat return NULL; 1081e9698cc5SSrivatsa S. Bhat } 1082e9698cc5SSrivatsa S. Bhat 10832fc3384dSViresh Kumar static void cpufreq_policy_put_kobj(struct cpufreq_policy *policy, bool notify) 108442f921a6SViresh Kumar { 108542f921a6SViresh Kumar struct kobject *kobj; 108642f921a6SViresh Kumar struct completion *cmp; 108742f921a6SViresh Kumar 10882fc3384dSViresh Kumar if (notify) 1089fcd7af91SViresh Kumar blocking_notifier_call_chain(&cpufreq_policy_notifier_list, 1090fcd7af91SViresh Kumar CPUFREQ_REMOVE_POLICY, policy); 1091fcd7af91SViresh Kumar 109287549141SViresh Kumar down_write(&policy->rwsem); 109387549141SViresh Kumar cpufreq_remove_dev_symlink(policy); 109442f921a6SViresh Kumar kobj = &policy->kobj; 109542f921a6SViresh Kumar cmp = &policy->kobj_unregister; 109687549141SViresh Kumar up_write(&policy->rwsem); 109742f921a6SViresh Kumar kobject_put(kobj); 109842f921a6SViresh Kumar 109942f921a6SViresh Kumar /* 110042f921a6SViresh Kumar * We need to make sure that the underlying kobj is 110142f921a6SViresh Kumar * actually not referenced anymore by anybody before we 110242f921a6SViresh Kumar * proceed with unloading. 110342f921a6SViresh Kumar */ 110442f921a6SViresh Kumar pr_debug("waiting for dropping of refcount\n"); 110542f921a6SViresh Kumar wait_for_completion(cmp); 110642f921a6SViresh Kumar pr_debug("wait complete\n"); 110742f921a6SViresh Kumar } 110842f921a6SViresh Kumar 11093654c5ccSViresh Kumar static void cpufreq_policy_free(struct cpufreq_policy *policy, bool notify) 1110e9698cc5SSrivatsa S. Bhat { 1111988bed09SViresh Kumar unsigned long flags; 1112988bed09SViresh Kumar int cpu; 1113988bed09SViresh Kumar 1114988bed09SViresh Kumar /* Remove policy from list */ 1115988bed09SViresh Kumar write_lock_irqsave(&cpufreq_driver_lock, flags); 1116988bed09SViresh Kumar list_del(&policy->policy_list); 1117988bed09SViresh Kumar 1118988bed09SViresh Kumar for_each_cpu(cpu, policy->related_cpus) 1119988bed09SViresh Kumar per_cpu(cpufreq_cpu_data, cpu) = NULL; 1120988bed09SViresh Kumar write_unlock_irqrestore(&cpufreq_driver_lock, flags); 1121988bed09SViresh Kumar 11223654c5ccSViresh Kumar cpufreq_policy_put_kobj(policy, notify); 1123559ed407SRafael J. Wysocki free_cpumask_var(policy->real_cpus); 1124e9698cc5SSrivatsa S. Bhat free_cpumask_var(policy->related_cpus); 1125e9698cc5SSrivatsa S. Bhat free_cpumask_var(policy->cpus); 1126e9698cc5SSrivatsa S. Bhat kfree(policy); 1127e9698cc5SSrivatsa S. Bhat } 1128e9698cc5SSrivatsa S. Bhat 11290b275352SRafael J. Wysocki static int cpufreq_online(unsigned int cpu) 11301da177e4SLinus Torvalds { 11317f0c020aSViresh Kumar struct cpufreq_policy *policy; 1132194d99c7SRafael J. Wysocki bool new_policy; 11331da177e4SLinus Torvalds unsigned long flags; 11340b275352SRafael J. Wysocki unsigned int j; 11350b275352SRafael J. Wysocki int ret; 1136c32b6b8eSAshok Raj 11370b275352SRafael J. Wysocki pr_debug("%s: bringing CPU%u online\n", __func__, cpu); 11386eed9404SViresh Kumar 1139bb29ae15SViresh Kumar /* Check if this CPU already has a policy to manage it */ 11409104bb26SViresh Kumar policy = per_cpu(cpufreq_cpu_data, cpu); 114111ce707eSRafael J. Wysocki if (policy) { 11429104bb26SViresh Kumar WARN_ON(!cpumask_test_cpu(cpu, policy->related_cpus)); 114311ce707eSRafael J. Wysocki if (!policy_is_inactive(policy)) 1144d9612a49SRafael J. Wysocki return cpufreq_add_policy_cpu(policy, cpu); 11451da177e4SLinus Torvalds 114611ce707eSRafael J. Wysocki /* This is the only online CPU for the policy. Start over. */ 1147194d99c7SRafael J. Wysocki new_policy = false; 114811ce707eSRafael J. Wysocki down_write(&policy->rwsem); 114911ce707eSRafael J. Wysocki policy->cpu = cpu; 115011ce707eSRafael J. Wysocki policy->governor = NULL; 115111ce707eSRafael J. Wysocki up_write(&policy->rwsem); 115211ce707eSRafael J. Wysocki } else { 1153194d99c7SRafael J. Wysocki new_policy = true; 1154a34e63b1SRafael J. Wysocki policy = cpufreq_policy_alloc(cpu); 1155059019a3SDave Jones if (!policy) 1156d4d854d6SRafael J. Wysocki return -ENOMEM; 115772368d12SRafael J. Wysocki } 11580d66b91eSSrivatsa S. Bhat 1159835481d9SRusty Russell cpumask_copy(policy->cpus, cpumask_of(cpu)); 11601da177e4SLinus Torvalds 11611da177e4SLinus Torvalds /* call driver. From then on the cpufreq must be able 11621da177e4SLinus Torvalds * to accept all calls to ->verify and ->setpolicy for this CPU 11631da177e4SLinus Torvalds */ 11641c3d85ddSRafael J. Wysocki ret = cpufreq_driver->init(policy); 11651da177e4SLinus Torvalds if (ret) { 11662d06d8c4SDominik Brodowski pr_debug("initialization failed\n"); 11678101f997SViresh Kumar goto out_free_policy; 11681da177e4SLinus Torvalds } 1169643ae6e8SViresh Kumar 11706d4e81edSTomeu Vizoso down_write(&policy->rwsem); 11716d4e81edSTomeu Vizoso 1172194d99c7SRafael J. Wysocki if (new_policy) { 11734d1f3a5bSRafael J. Wysocki /* related_cpus should at least include policy->cpus. */ 11740998a03aSViresh Kumar cpumask_copy(policy->related_cpus, policy->cpus); 11754d1f3a5bSRafael J. Wysocki /* Remember CPUs present at the policy creation time. */ 1176559ed407SRafael J. Wysocki cpumask_and(policy->real_cpus, policy->cpus, cpu_present_mask); 11774d1f3a5bSRafael J. Wysocki } 1178559ed407SRafael J. Wysocki 11795a7e56a5SViresh Kumar /* 11805a7e56a5SViresh Kumar * affected cpus must always be the one, which are online. We aren't 11815a7e56a5SViresh Kumar * managing offline cpus here. 11825a7e56a5SViresh Kumar */ 11835a7e56a5SViresh Kumar cpumask_and(policy->cpus, policy->cpus, cpu_online_mask); 11845a7e56a5SViresh Kumar 1185194d99c7SRafael J. Wysocki if (new_policy) { 11865a7e56a5SViresh Kumar policy->user_policy.min = policy->min; 11875a7e56a5SViresh Kumar policy->user_policy.max = policy->max; 11886d4e81edSTomeu Vizoso 1189652ed95dSViresh Kumar write_lock_irqsave(&cpufreq_driver_lock, flags); 1190988bed09SViresh Kumar for_each_cpu(j, policy->related_cpus) 1191652ed95dSViresh Kumar per_cpu(cpufreq_cpu_data, j) = policy; 1192652ed95dSViresh Kumar write_unlock_irqrestore(&cpufreq_driver_lock, flags); 1193988bed09SViresh Kumar } 1194652ed95dSViresh Kumar 11952ed99e39SRafael J. Wysocki if (cpufreq_driver->get && !cpufreq_driver->setpolicy) { 1196da60ce9fSViresh Kumar policy->cur = cpufreq_driver->get(policy->cpu); 1197da60ce9fSViresh Kumar if (!policy->cur) { 1198da60ce9fSViresh Kumar pr_err("%s: ->get() failed\n", __func__); 11998101f997SViresh Kumar goto out_exit_policy; 1200da60ce9fSViresh Kumar } 1201da60ce9fSViresh Kumar } 1202da60ce9fSViresh Kumar 1203d3916691SViresh Kumar /* 1204d3916691SViresh Kumar * Sometimes boot loaders set CPU frequency to a value outside of 1205d3916691SViresh Kumar * frequency table present with cpufreq core. In such cases CPU might be 1206d3916691SViresh Kumar * unstable if it has to run on that frequency for long duration of time 1207d3916691SViresh Kumar * and so its better to set it to a frequency which is specified in 1208d3916691SViresh Kumar * freq-table. This also makes cpufreq stats inconsistent as 1209d3916691SViresh Kumar * cpufreq-stats would fail to register because current frequency of CPU 1210d3916691SViresh Kumar * isn't found in freq-table. 1211d3916691SViresh Kumar * 1212d3916691SViresh Kumar * Because we don't want this change to effect boot process badly, we go 1213d3916691SViresh Kumar * for the next freq which is >= policy->cur ('cur' must be set by now, 1214d3916691SViresh Kumar * otherwise we will end up setting freq to lowest of the table as 'cur' 1215d3916691SViresh Kumar * is initialized to zero). 1216d3916691SViresh Kumar * 1217d3916691SViresh Kumar * We are passing target-freq as "policy->cur - 1" otherwise 1218d3916691SViresh Kumar * __cpufreq_driver_target() would simply fail, as policy->cur will be 1219d3916691SViresh Kumar * equal to target-freq. 1220d3916691SViresh Kumar */ 1221d3916691SViresh Kumar if ((cpufreq_driver->flags & CPUFREQ_NEED_INITIAL_FREQ_CHECK) 1222d3916691SViresh Kumar && has_target()) { 1223d3916691SViresh Kumar /* Are we running at unknown frequency ? */ 1224d3916691SViresh Kumar ret = cpufreq_frequency_table_get_index(policy, policy->cur); 1225d3916691SViresh Kumar if (ret == -EINVAL) { 1226d3916691SViresh Kumar /* Warn user and fix it */ 1227d3916691SViresh Kumar pr_warn("%s: CPU%d: Running at unlisted freq: %u KHz\n", 1228d3916691SViresh Kumar __func__, policy->cpu, policy->cur); 1229d3916691SViresh Kumar ret = __cpufreq_driver_target(policy, policy->cur - 1, 1230d3916691SViresh Kumar CPUFREQ_RELATION_L); 1231d3916691SViresh Kumar 1232d3916691SViresh Kumar /* 1233d3916691SViresh Kumar * Reaching here after boot in a few seconds may not 1234d3916691SViresh Kumar * mean that system will remain stable at "unknown" 1235d3916691SViresh Kumar * frequency for longer duration. Hence, a BUG_ON(). 1236d3916691SViresh Kumar */ 1237d3916691SViresh Kumar BUG_ON(ret); 1238d3916691SViresh Kumar pr_warn("%s: CPU%d: Unlisted initial frequency changed to: %u KHz\n", 1239d3916691SViresh Kumar __func__, policy->cpu, policy->cur); 1240d3916691SViresh Kumar } 1241d3916691SViresh Kumar } 1242d3916691SViresh Kumar 1243a1531acdSThomas Renninger blocking_notifier_call_chain(&cpufreq_policy_notifier_list, 1244a1531acdSThomas Renninger CPUFREQ_START, policy); 1245a1531acdSThomas Renninger 1246194d99c7SRafael J. Wysocki if (new_policy) { 1247d9612a49SRafael J. Wysocki ret = cpufreq_add_dev_interface(policy); 124819d6f7ecSDave Jones if (ret) 12498101f997SViresh Kumar goto out_exit_policy; 1250fcd7af91SViresh Kumar blocking_notifier_call_chain(&cpufreq_policy_notifier_list, 1251fcd7af91SViresh Kumar CPUFREQ_CREATE_POLICY, policy); 1252c88a1f8bSLukasz Majewski 1253c88a1f8bSLukasz Majewski write_lock_irqsave(&cpufreq_driver_lock, flags); 1254c88a1f8bSLukasz Majewski list_add(&policy->policy_list, &cpufreq_policy_list); 1255c88a1f8bSLukasz Majewski write_unlock_irqrestore(&cpufreq_driver_lock, flags); 1256988bed09SViresh Kumar } 12578ff69732SDave Jones 12587f0fa40fSViresh Kumar ret = cpufreq_init_policy(policy); 12597f0fa40fSViresh Kumar if (ret) { 12607f0fa40fSViresh Kumar pr_err("%s: Failed to initialize policy for cpu: %d (%d)\n", 12617f0fa40fSViresh Kumar __func__, cpu, ret); 1262194d99c7SRafael J. Wysocki /* cpufreq_policy_free() will notify based on this */ 1263194d99c7SRafael J. Wysocki new_policy = false; 1264194d99c7SRafael J. Wysocki goto out_exit_policy; 126508fd8c1cSViresh Kumar } 1266e18f1682SSrivatsa S. Bhat 12674e97b631SViresh Kumar up_write(&policy->rwsem); 126808fd8c1cSViresh Kumar 1269038c5b3eSGreg Kroah-Hartman kobject_uevent(&policy->kobj, KOBJ_ADD); 12707c45cf31SViresh Kumar 12717c45cf31SViresh Kumar /* Callback for handling stuff after policy is ready */ 12727c45cf31SViresh Kumar if (cpufreq_driver->ready) 12737c45cf31SViresh Kumar cpufreq_driver->ready(policy); 12747c45cf31SViresh Kumar 12752d06d8c4SDominik Brodowski pr_debug("initialization complete\n"); 12761da177e4SLinus Torvalds 12771da177e4SLinus Torvalds return 0; 12781da177e4SLinus Torvalds 12798101f997SViresh Kumar out_exit_policy: 12807106e02bSPrarit Bhargava up_write(&policy->rwsem); 12817106e02bSPrarit Bhargava 1282da60ce9fSViresh Kumar if (cpufreq_driver->exit) 1283da60ce9fSViresh Kumar cpufreq_driver->exit(policy); 12848101f997SViresh Kumar out_free_policy: 1285194d99c7SRafael J. Wysocki cpufreq_policy_free(policy, !new_policy); 12861da177e4SLinus Torvalds return ret; 12871da177e4SLinus Torvalds } 12881da177e4SLinus Torvalds 12890b275352SRafael J. Wysocki /** 12900b275352SRafael J. Wysocki * cpufreq_add_dev - the cpufreq interface for a CPU device. 12910b275352SRafael J. Wysocki * @dev: CPU device. 12920b275352SRafael J. Wysocki * @sif: Subsystem interface structure pointer (not used) 12930b275352SRafael J. Wysocki */ 12940b275352SRafael J. Wysocki static int cpufreq_add_dev(struct device *dev, struct subsys_interface *sif) 12950b275352SRafael J. Wysocki { 12960b275352SRafael J. Wysocki unsigned cpu = dev->id; 12970b275352SRafael J. Wysocki int ret; 12980b275352SRafael J. Wysocki 12990b275352SRafael J. Wysocki dev_dbg(dev, "%s: adding CPU%u\n", __func__, cpu); 13000b275352SRafael J. Wysocki 13010b275352SRafael J. Wysocki if (cpu_online(cpu)) { 13020b275352SRafael J. Wysocki ret = cpufreq_online(cpu); 13030b275352SRafael J. Wysocki } else { 13040b275352SRafael J. Wysocki /* 13050b275352SRafael J. Wysocki * A hotplug notifier will follow and we will handle it as CPU 13060b275352SRafael J. Wysocki * online then. For now, just create the sysfs link, unless 13070b275352SRafael J. Wysocki * there is no policy or the link is already present. 13080b275352SRafael J. Wysocki */ 13090b275352SRafael J. Wysocki struct cpufreq_policy *policy = per_cpu(cpufreq_cpu_data, cpu); 13100b275352SRafael J. Wysocki 13110b275352SRafael J. Wysocki ret = policy && !cpumask_test_and_set_cpu(cpu, policy->real_cpus) 13120b275352SRafael J. Wysocki ? add_cpu_dev_symlink(policy, cpu) : 0; 13130b275352SRafael J. Wysocki } 13141da177e4SLinus Torvalds 13151da177e4SLinus Torvalds return ret; 13161da177e4SLinus Torvalds } 13171da177e4SLinus Torvalds 131815c0b4d2SRafael J. Wysocki static void cpufreq_offline_prepare(unsigned int cpu) 13191da177e4SLinus Torvalds { 13203a3e9e06SViresh Kumar struct cpufreq_policy *policy; 13211da177e4SLinus Torvalds 1322b8eed8afSViresh Kumar pr_debug("%s: unregistering CPU %u\n", __func__, cpu); 13231da177e4SLinus Torvalds 1324988bed09SViresh Kumar policy = cpufreq_cpu_get_raw(cpu); 13253a3e9e06SViresh Kumar if (!policy) { 1326b8eed8afSViresh Kumar pr_debug("%s: No cpu_data found\n", __func__); 132715c0b4d2SRafael J. Wysocki return; 13281da177e4SLinus Torvalds } 13291da177e4SLinus Torvalds 13309c0ebcf7SViresh Kumar if (has_target()) { 133115c0b4d2SRafael J. Wysocki int ret = __cpufreq_governor(policy, CPUFREQ_GOV_STOP); 1332559ed407SRafael J. Wysocki if (ret) 13333de9bdebSViresh Kumar pr_err("%s: Failed to stop governor\n", __func__); 1334db5f2995SViresh Kumar } 13351da177e4SLinus Torvalds 13364573237bSViresh Kumar down_write(&policy->rwsem); 13379591becbSViresh Kumar cpumask_clear_cpu(cpu, policy->cpus); 13384573237bSViresh Kumar 13399591becbSViresh Kumar if (policy_is_inactive(policy)) { 13409591becbSViresh Kumar if (has_target()) 13414573237bSViresh Kumar strncpy(policy->last_governor, policy->governor->name, 13424573237bSViresh Kumar CPUFREQ_NAME_LEN); 13439591becbSViresh Kumar } else if (cpu == policy->cpu) { 13449591becbSViresh Kumar /* Nominate new CPU */ 13459591becbSViresh Kumar policy->cpu = cpumask_any(policy->cpus); 13469591becbSViresh Kumar } 13474573237bSViresh Kumar up_write(&policy->rwsem); 13481da177e4SLinus Torvalds 13499591becbSViresh Kumar /* Start governor again for active policy */ 13509591becbSViresh Kumar if (!policy_is_inactive(policy)) { 13519591becbSViresh Kumar if (has_target()) { 135215c0b4d2SRafael J. Wysocki int ret = __cpufreq_governor(policy, CPUFREQ_GOV_START); 13539591becbSViresh Kumar if (!ret) 13549591becbSViresh Kumar ret = __cpufreq_governor(policy, CPUFREQ_GOV_LIMITS); 135587549141SViresh Kumar 13569591becbSViresh Kumar if (ret) 13579591becbSViresh Kumar pr_err("%s: Failed to start governor\n", __func__); 13589591becbSViresh Kumar } 13599591becbSViresh Kumar } else if (cpufreq_driver->stop_cpu) { 1360367dc4aaSDirk Brandewie cpufreq_driver->stop_cpu(policy); 13619591becbSViresh Kumar } 1362cedb70afSSrivatsa S. Bhat } 1363cedb70afSSrivatsa S. Bhat 136415c0b4d2SRafael J. Wysocki static void cpufreq_offline_finish(unsigned int cpu) 1365cedb70afSSrivatsa S. Bhat { 13669591becbSViresh Kumar struct cpufreq_policy *policy = per_cpu(cpufreq_cpu_data, cpu); 1367cedb70afSSrivatsa S. Bhat 1368cedb70afSSrivatsa S. Bhat if (!policy) { 1369cedb70afSSrivatsa S. Bhat pr_debug("%s: No cpu_data found\n", __func__); 137015c0b4d2SRafael J. Wysocki return; 1371cedb70afSSrivatsa S. Bhat } 1372cedb70afSSrivatsa S. Bhat 13739591becbSViresh Kumar /* Only proceed for inactive policies */ 13749591becbSViresh Kumar if (!policy_is_inactive(policy)) 137515c0b4d2SRafael J. Wysocki return; 137687549141SViresh Kumar 137787549141SViresh Kumar /* If cpu is last user of policy, free policy */ 137887549141SViresh Kumar if (has_target()) { 137915c0b4d2SRafael J. Wysocki int ret = __cpufreq_governor(policy, CPUFREQ_GOV_POLICY_EXIT); 1380559ed407SRafael J. Wysocki if (ret) 138187549141SViresh Kumar pr_err("%s: Failed to exit governor\n", __func__); 13823de9bdebSViresh Kumar } 13832a998599SRafael J. Wysocki 13848414809cSSrivatsa S. Bhat /* 13858414809cSSrivatsa S. Bhat * Perform the ->exit() even during light-weight tear-down, 13868414809cSSrivatsa S. Bhat * since this is a core component, and is essential for the 13878414809cSSrivatsa S. Bhat * subsequent light-weight ->init() to succeed. 13888414809cSSrivatsa S. Bhat */ 138955582bccSSrinivas Pandruvada if (cpufreq_driver->exit) { 13903a3e9e06SViresh Kumar cpufreq_driver->exit(policy); 139155582bccSSrinivas Pandruvada policy->freq_table = NULL; 139255582bccSSrinivas Pandruvada } 13931da177e4SLinus Torvalds } 13941da177e4SLinus Torvalds 1395cedb70afSSrivatsa S. Bhat /** 139627a862e9SViresh Kumar * cpufreq_remove_dev - remove a CPU device 1397cedb70afSSrivatsa S. Bhat * 1398cedb70afSSrivatsa S. Bhat * Removes the cpufreq interface for a CPU device. 1399cedb70afSSrivatsa S. Bhat */ 140071db87baSViresh Kumar static void cpufreq_remove_dev(struct device *dev, struct subsys_interface *sif) 14015a01f2e8SVenkatesh Pallipadi { 14028a25a2fdSKay Sievers unsigned int cpu = dev->id; 140387549141SViresh Kumar struct cpufreq_policy *policy = per_cpu(cpufreq_cpu_data, cpu); 140487549141SViresh Kumar 140587549141SViresh Kumar if (!policy) 140671db87baSViresh Kumar return; 1407ec28297aSVenki Pallipadi 1408559ed407SRafael J. Wysocki if (cpu_online(cpu)) { 140915c0b4d2SRafael J. Wysocki cpufreq_offline_prepare(cpu); 141015c0b4d2SRafael J. Wysocki cpufreq_offline_finish(cpu); 141187549141SViresh Kumar } 141287549141SViresh Kumar 1413559ed407SRafael J. Wysocki cpumask_clear_cpu(cpu, policy->real_cpus); 1414559ed407SRafael J. Wysocki 1415559ed407SRafael J. Wysocki if (cpumask_empty(policy->real_cpus)) { 14163654c5ccSViresh Kumar cpufreq_policy_free(policy, true); 141771db87baSViresh Kumar return; 141887549141SViresh Kumar } 141987549141SViresh Kumar 1420559ed407SRafael J. Wysocki if (cpu != policy->kobj_cpu) { 1421559ed407SRafael J. Wysocki remove_cpu_dev_symlink(policy, cpu); 1422559ed407SRafael J. Wysocki } else { 1423559ed407SRafael J. Wysocki /* 1424559ed407SRafael J. Wysocki * The CPU owning the policy object is going away. Move it to 1425559ed407SRafael J. Wysocki * another suitable CPU. 1426559ed407SRafael J. Wysocki */ 1427559ed407SRafael J. Wysocki unsigned int new_cpu = cpumask_first(policy->real_cpus); 1428559ed407SRafael J. Wysocki struct device *new_dev = get_cpu_device(new_cpu); 142927a862e9SViresh Kumar 1430559ed407SRafael J. Wysocki dev_dbg(dev, "%s: Moving policy object to CPU%u\n", __func__, new_cpu); 143127a862e9SViresh Kumar 1432559ed407SRafael J. Wysocki sysfs_remove_link(&new_dev->kobj, "cpufreq"); 1433559ed407SRafael J. Wysocki policy->kobj_cpu = new_cpu; 1434559ed407SRafael J. Wysocki WARN_ON(kobject_move(&policy->kobj, &new_dev->kobj)); 1435559ed407SRafael J. Wysocki } 14365a01f2e8SVenkatesh Pallipadi } 14375a01f2e8SVenkatesh Pallipadi 143865f27f38SDavid Howells static void handle_update(struct work_struct *work) 14391da177e4SLinus Torvalds { 144065f27f38SDavid Howells struct cpufreq_policy *policy = 144165f27f38SDavid Howells container_of(work, struct cpufreq_policy, update); 144265f27f38SDavid Howells unsigned int cpu = policy->cpu; 14432d06d8c4SDominik Brodowski pr_debug("handle_update for cpu %u called\n", cpu); 14441da177e4SLinus Torvalds cpufreq_update_policy(cpu); 14451da177e4SLinus Torvalds } 14461da177e4SLinus Torvalds 14471da177e4SLinus Torvalds /** 1448bb176f7dSViresh Kumar * cpufreq_out_of_sync - If actual and saved CPU frequency differs, we're 1449bb176f7dSViresh Kumar * in deep trouble. 1450a1e1dc41SViresh Kumar * @policy: policy managing CPUs 14511da177e4SLinus Torvalds * @new_freq: CPU frequency the CPU actually runs at 14521da177e4SLinus Torvalds * 145329464f28SDave Jones * We adjust to current frequency first, and need to clean up later. 145429464f28SDave Jones * So either call to cpufreq_update_policy() or schedule handle_update()). 14551da177e4SLinus Torvalds */ 1456a1e1dc41SViresh Kumar static void cpufreq_out_of_sync(struct cpufreq_policy *policy, 1457e08f5f5bSGautham R Shenoy unsigned int new_freq) 14581da177e4SLinus Torvalds { 14591da177e4SLinus Torvalds struct cpufreq_freqs freqs; 1460b43a7ffbSViresh Kumar 1461e837f9b5SJoe Perches pr_debug("Warning: CPU frequency out of sync: cpufreq and timing core thinks of %u, is %u kHz\n", 1462a1e1dc41SViresh Kumar policy->cur, new_freq); 14631da177e4SLinus Torvalds 1464a1e1dc41SViresh Kumar freqs.old = policy->cur; 14651da177e4SLinus Torvalds freqs.new = new_freq; 1466b43a7ffbSViresh Kumar 14678fec051eSViresh Kumar cpufreq_freq_transition_begin(policy, &freqs); 14688fec051eSViresh Kumar cpufreq_freq_transition_end(policy, &freqs, 0); 14691da177e4SLinus Torvalds } 14701da177e4SLinus Torvalds 14711da177e4SLinus Torvalds /** 14724ab70df4SDhaval Giani * cpufreq_quick_get - get the CPU frequency (in kHz) from policy->cur 147395235ca2SVenkatesh Pallipadi * @cpu: CPU number 147495235ca2SVenkatesh Pallipadi * 147595235ca2SVenkatesh Pallipadi * This is the last known freq, without actually getting it from the driver. 147695235ca2SVenkatesh Pallipadi * Return value will be same as what is shown in scaling_cur_freq in sysfs. 147795235ca2SVenkatesh Pallipadi */ 147895235ca2SVenkatesh Pallipadi unsigned int cpufreq_quick_get(unsigned int cpu) 147995235ca2SVenkatesh Pallipadi { 14809e21ba8bSDirk Brandewie struct cpufreq_policy *policy; 1481e08f5f5bSGautham R Shenoy unsigned int ret_freq = 0; 148295235ca2SVenkatesh Pallipadi 14831c3d85ddSRafael J. Wysocki if (cpufreq_driver && cpufreq_driver->setpolicy && cpufreq_driver->get) 14841c3d85ddSRafael J. Wysocki return cpufreq_driver->get(cpu); 14859e21ba8bSDirk Brandewie 14869e21ba8bSDirk Brandewie policy = cpufreq_cpu_get(cpu); 148795235ca2SVenkatesh Pallipadi if (policy) { 1488e08f5f5bSGautham R Shenoy ret_freq = policy->cur; 148995235ca2SVenkatesh Pallipadi cpufreq_cpu_put(policy); 149095235ca2SVenkatesh Pallipadi } 149195235ca2SVenkatesh Pallipadi 14924d34a67dSDave Jones return ret_freq; 149395235ca2SVenkatesh Pallipadi } 149495235ca2SVenkatesh Pallipadi EXPORT_SYMBOL(cpufreq_quick_get); 149595235ca2SVenkatesh Pallipadi 14963d737108SJesse Barnes /** 14973d737108SJesse Barnes * cpufreq_quick_get_max - get the max reported CPU frequency for this CPU 14983d737108SJesse Barnes * @cpu: CPU number 14993d737108SJesse Barnes * 15003d737108SJesse Barnes * Just return the max possible frequency for a given CPU. 15013d737108SJesse Barnes */ 15023d737108SJesse Barnes unsigned int cpufreq_quick_get_max(unsigned int cpu) 15033d737108SJesse Barnes { 15043d737108SJesse Barnes struct cpufreq_policy *policy = cpufreq_cpu_get(cpu); 15053d737108SJesse Barnes unsigned int ret_freq = 0; 15063d737108SJesse Barnes 15073d737108SJesse Barnes if (policy) { 15083d737108SJesse Barnes ret_freq = policy->max; 15093d737108SJesse Barnes cpufreq_cpu_put(policy); 15103d737108SJesse Barnes } 15113d737108SJesse Barnes 15123d737108SJesse Barnes return ret_freq; 15133d737108SJesse Barnes } 15143d737108SJesse Barnes EXPORT_SYMBOL(cpufreq_quick_get_max); 15153d737108SJesse Barnes 1516d92d50a4SViresh Kumar static unsigned int __cpufreq_get(struct cpufreq_policy *policy) 15171da177e4SLinus Torvalds { 1518e08f5f5bSGautham R Shenoy unsigned int ret_freq = 0; 15191da177e4SLinus Torvalds 15201c3d85ddSRafael J. Wysocki if (!cpufreq_driver->get) 15214d34a67dSDave Jones return ret_freq; 15221da177e4SLinus Torvalds 1523d92d50a4SViresh Kumar ret_freq = cpufreq_driver->get(policy->cpu); 15241da177e4SLinus Torvalds 152511e584cfSViresh Kumar /* Updating inactive policies is invalid, so avoid doing that. */ 152611e584cfSViresh Kumar if (unlikely(policy_is_inactive(policy))) 152711e584cfSViresh Kumar return ret_freq; 152811e584cfSViresh Kumar 1529e08f5f5bSGautham R Shenoy if (ret_freq && policy->cur && 15301c3d85ddSRafael J. Wysocki !(cpufreq_driver->flags & CPUFREQ_CONST_LOOPS)) { 1531e08f5f5bSGautham R Shenoy /* verify no discrepancy between actual and 1532e08f5f5bSGautham R Shenoy saved value exists */ 1533e08f5f5bSGautham R Shenoy if (unlikely(ret_freq != policy->cur)) { 1534a1e1dc41SViresh Kumar cpufreq_out_of_sync(policy, ret_freq); 15351da177e4SLinus Torvalds schedule_work(&policy->update); 15361da177e4SLinus Torvalds } 15371da177e4SLinus Torvalds } 15381da177e4SLinus Torvalds 15394d34a67dSDave Jones return ret_freq; 15405a01f2e8SVenkatesh Pallipadi } 15411da177e4SLinus Torvalds 15425a01f2e8SVenkatesh Pallipadi /** 15435a01f2e8SVenkatesh Pallipadi * cpufreq_get - get the current CPU frequency (in kHz) 15445a01f2e8SVenkatesh Pallipadi * @cpu: CPU number 15455a01f2e8SVenkatesh Pallipadi * 15465a01f2e8SVenkatesh Pallipadi * Get the CPU current (static) CPU frequency 15475a01f2e8SVenkatesh Pallipadi */ 15485a01f2e8SVenkatesh Pallipadi unsigned int cpufreq_get(unsigned int cpu) 15495a01f2e8SVenkatesh Pallipadi { 1550999976e0SAaron Plattner struct cpufreq_policy *policy = cpufreq_cpu_get(cpu); 15515a01f2e8SVenkatesh Pallipadi unsigned int ret_freq = 0; 15525a01f2e8SVenkatesh Pallipadi 1553999976e0SAaron Plattner if (policy) { 1554ad7722daSviresh kumar down_read(&policy->rwsem); 1555d92d50a4SViresh Kumar ret_freq = __cpufreq_get(policy); 1556ad7722daSviresh kumar up_read(&policy->rwsem); 1557999976e0SAaron Plattner 1558999976e0SAaron Plattner cpufreq_cpu_put(policy); 1559999976e0SAaron Plattner } 15606eed9404SViresh Kumar 15614d34a67dSDave Jones return ret_freq; 15621da177e4SLinus Torvalds } 15631da177e4SLinus Torvalds EXPORT_SYMBOL(cpufreq_get); 15641da177e4SLinus Torvalds 15658a25a2fdSKay Sievers static struct subsys_interface cpufreq_interface = { 15668a25a2fdSKay Sievers .name = "cpufreq", 15678a25a2fdSKay Sievers .subsys = &cpu_subsys, 15688a25a2fdSKay Sievers .add_dev = cpufreq_add_dev, 15698a25a2fdSKay Sievers .remove_dev = cpufreq_remove_dev, 1570e00e56dfSRafael J. Wysocki }; 1571e00e56dfSRafael J. Wysocki 1572e28867eaSViresh Kumar /* 1573e28867eaSViresh Kumar * In case platform wants some specific frequency to be configured 1574e28867eaSViresh Kumar * during suspend.. 157542d4dc3fSBenjamin Herrenschmidt */ 1576e28867eaSViresh Kumar int cpufreq_generic_suspend(struct cpufreq_policy *policy) 157742d4dc3fSBenjamin Herrenschmidt { 1578e28867eaSViresh Kumar int ret; 15794bc5d341SDave Jones 1580e28867eaSViresh Kumar if (!policy->suspend_freq) { 1581201f3716SBartlomiej Zolnierkiewicz pr_debug("%s: suspend_freq not defined\n", __func__); 1582201f3716SBartlomiej Zolnierkiewicz return 0; 158342d4dc3fSBenjamin Herrenschmidt } 158442d4dc3fSBenjamin Herrenschmidt 1585e28867eaSViresh Kumar pr_debug("%s: Setting suspend-freq: %u\n", __func__, 1586e28867eaSViresh Kumar policy->suspend_freq); 1587e28867eaSViresh Kumar 1588e28867eaSViresh Kumar ret = __cpufreq_driver_target(policy, policy->suspend_freq, 1589e28867eaSViresh Kumar CPUFREQ_RELATION_H); 1590e28867eaSViresh Kumar if (ret) 1591e28867eaSViresh Kumar pr_err("%s: unable to set suspend-freq: %u. err: %d\n", 1592e28867eaSViresh Kumar __func__, policy->suspend_freq, ret); 1593e28867eaSViresh Kumar 1594c9060494SDave Jones return ret; 159542d4dc3fSBenjamin Herrenschmidt } 1596e28867eaSViresh Kumar EXPORT_SYMBOL(cpufreq_generic_suspend); 159742d4dc3fSBenjamin Herrenschmidt 159842d4dc3fSBenjamin Herrenschmidt /** 15992f0aea93SViresh Kumar * cpufreq_suspend() - Suspend CPUFreq governors 16001da177e4SLinus Torvalds * 16012f0aea93SViresh Kumar * Called during system wide Suspend/Hibernate cycles for suspending governors 16022f0aea93SViresh Kumar * as some platforms can't change frequency after this point in suspend cycle. 16032f0aea93SViresh Kumar * Because some of the devices (like: i2c, regulators, etc) they use for 16042f0aea93SViresh Kumar * changing frequency are suspended quickly after this point. 16051da177e4SLinus Torvalds */ 16062f0aea93SViresh Kumar void cpufreq_suspend(void) 16071da177e4SLinus Torvalds { 16083a3e9e06SViresh Kumar struct cpufreq_policy *policy; 16091da177e4SLinus Torvalds 16102f0aea93SViresh Kumar if (!cpufreq_driver) 1611e00e56dfSRafael J. Wysocki return; 16121da177e4SLinus Torvalds 16132f0aea93SViresh Kumar if (!has_target()) 1614b1b12babSViresh Kumar goto suspend; 16151da177e4SLinus Torvalds 16162f0aea93SViresh Kumar pr_debug("%s: Suspending Governors\n", __func__); 16172f0aea93SViresh Kumar 1618f963735aSViresh Kumar for_each_active_policy(policy) { 16192f0aea93SViresh Kumar if (__cpufreq_governor(policy, CPUFREQ_GOV_STOP)) 16202f0aea93SViresh Kumar pr_err("%s: Failed to stop governor for policy: %p\n", 16212f0aea93SViresh Kumar __func__, policy); 16222f0aea93SViresh Kumar else if (cpufreq_driver->suspend 16232f0aea93SViresh Kumar && cpufreq_driver->suspend(policy)) 16242f0aea93SViresh Kumar pr_err("%s: Failed to suspend driver: %p\n", __func__, 16252f0aea93SViresh Kumar policy); 16261da177e4SLinus Torvalds } 1627b1b12babSViresh Kumar 1628b1b12babSViresh Kumar suspend: 1629b1b12babSViresh Kumar cpufreq_suspended = true; 16301da177e4SLinus Torvalds } 16311da177e4SLinus Torvalds 16321da177e4SLinus Torvalds /** 16332f0aea93SViresh Kumar * cpufreq_resume() - Resume CPUFreq governors 16341da177e4SLinus Torvalds * 16352f0aea93SViresh Kumar * Called during system wide Suspend/Hibernate cycle for resuming governors that 16362f0aea93SViresh Kumar * are suspended with cpufreq_suspend(). 16371da177e4SLinus Torvalds */ 16382f0aea93SViresh Kumar void cpufreq_resume(void) 16391da177e4SLinus Torvalds { 16401da177e4SLinus Torvalds struct cpufreq_policy *policy; 16411da177e4SLinus Torvalds 16422f0aea93SViresh Kumar if (!cpufreq_driver) 16431da177e4SLinus Torvalds return; 16441da177e4SLinus Torvalds 16458e30444eSLan Tianyu cpufreq_suspended = false; 16468e30444eSLan Tianyu 16472f0aea93SViresh Kumar if (!has_target()) 16482f0aea93SViresh Kumar return; 16491da177e4SLinus Torvalds 16502f0aea93SViresh Kumar pr_debug("%s: Resuming Governors\n", __func__); 16512f0aea93SViresh Kumar 1652f963735aSViresh Kumar for_each_active_policy(policy) { 16530c5aa405SViresh Kumar if (cpufreq_driver->resume && cpufreq_driver->resume(policy)) 16540c5aa405SViresh Kumar pr_err("%s: Failed to resume driver: %p\n", __func__, 16550c5aa405SViresh Kumar policy); 16560c5aa405SViresh Kumar else if (__cpufreq_governor(policy, CPUFREQ_GOV_START) 16572f0aea93SViresh Kumar || __cpufreq_governor(policy, CPUFREQ_GOV_LIMITS)) 16582f0aea93SViresh Kumar pr_err("%s: Failed to start governor for policy: %p\n", 16592f0aea93SViresh Kumar __func__, policy); 1660c75de0acSViresh Kumar } 16612f0aea93SViresh Kumar 16622f0aea93SViresh Kumar /* 1663c75de0acSViresh Kumar * schedule call cpufreq_update_policy() for first-online CPU, as that 1664c75de0acSViresh Kumar * wouldn't be hotplugged-out on suspend. It will verify that the 1665c75de0acSViresh Kumar * current freq is in sync with what we believe it to be. 16662f0aea93SViresh Kumar */ 1667c75de0acSViresh Kumar policy = cpufreq_cpu_get_raw(cpumask_first(cpu_online_mask)); 1668c75de0acSViresh Kumar if (WARN_ON(!policy)) 1669c75de0acSViresh Kumar return; 1670c75de0acSViresh Kumar 16713a3e9e06SViresh Kumar schedule_work(&policy->update); 16721da177e4SLinus Torvalds } 16731da177e4SLinus Torvalds 16749d95046eSBorislav Petkov /** 16759d95046eSBorislav Petkov * cpufreq_get_current_driver - return current driver's name 16769d95046eSBorislav Petkov * 16779d95046eSBorislav Petkov * Return the name string of the currently loaded cpufreq driver 16789d95046eSBorislav Petkov * or NULL, if none. 16799d95046eSBorislav Petkov */ 16809d95046eSBorislav Petkov const char *cpufreq_get_current_driver(void) 16819d95046eSBorislav Petkov { 16821c3d85ddSRafael J. Wysocki if (cpufreq_driver) 16831c3d85ddSRafael J. Wysocki return cpufreq_driver->name; 16841c3d85ddSRafael J. Wysocki 16851c3d85ddSRafael J. Wysocki return NULL; 16869d95046eSBorislav Petkov } 16879d95046eSBorislav Petkov EXPORT_SYMBOL_GPL(cpufreq_get_current_driver); 16881da177e4SLinus Torvalds 168951315cdfSThomas Petazzoni /** 169051315cdfSThomas Petazzoni * cpufreq_get_driver_data - return current driver data 169151315cdfSThomas Petazzoni * 169251315cdfSThomas Petazzoni * Return the private data of the currently loaded cpufreq 169351315cdfSThomas Petazzoni * driver, or NULL if no cpufreq driver is loaded. 169451315cdfSThomas Petazzoni */ 169551315cdfSThomas Petazzoni void *cpufreq_get_driver_data(void) 169651315cdfSThomas Petazzoni { 169751315cdfSThomas Petazzoni if (cpufreq_driver) 169851315cdfSThomas Petazzoni return cpufreq_driver->driver_data; 169951315cdfSThomas Petazzoni 170051315cdfSThomas Petazzoni return NULL; 170151315cdfSThomas Petazzoni } 170251315cdfSThomas Petazzoni EXPORT_SYMBOL_GPL(cpufreq_get_driver_data); 170351315cdfSThomas Petazzoni 17041da177e4SLinus Torvalds /********************************************************************* 17051da177e4SLinus Torvalds * NOTIFIER LISTS INTERFACE * 17061da177e4SLinus Torvalds *********************************************************************/ 17071da177e4SLinus Torvalds 17081da177e4SLinus Torvalds /** 17091da177e4SLinus Torvalds * cpufreq_register_notifier - register a driver with cpufreq 17101da177e4SLinus Torvalds * @nb: notifier function to register 17111da177e4SLinus Torvalds * @list: CPUFREQ_TRANSITION_NOTIFIER or CPUFREQ_POLICY_NOTIFIER 17121da177e4SLinus Torvalds * 17131da177e4SLinus Torvalds * Add a driver to one of two lists: either a list of drivers that 17141da177e4SLinus Torvalds * are notified about clock rate changes (once before and once after 17151da177e4SLinus Torvalds * the transition), or a list of drivers that are notified about 17161da177e4SLinus Torvalds * changes in cpufreq policy. 17171da177e4SLinus Torvalds * 17181da177e4SLinus Torvalds * This function may sleep, and has the same return conditions as 1719e041c683SAlan Stern * blocking_notifier_chain_register. 17201da177e4SLinus Torvalds */ 17211da177e4SLinus Torvalds int cpufreq_register_notifier(struct notifier_block *nb, unsigned int list) 17221da177e4SLinus Torvalds { 17231da177e4SLinus Torvalds int ret; 17241da177e4SLinus Torvalds 1725d5aaffa9SDirk Brandewie if (cpufreq_disabled()) 1726d5aaffa9SDirk Brandewie return -EINVAL; 1727d5aaffa9SDirk Brandewie 172874212ca4SCesar Eduardo Barros WARN_ON(!init_cpufreq_transition_notifier_list_called); 172974212ca4SCesar Eduardo Barros 17301da177e4SLinus Torvalds switch (list) { 17311da177e4SLinus Torvalds case CPUFREQ_TRANSITION_NOTIFIER: 1732b4dfdbb3SAlan Stern ret = srcu_notifier_chain_register( 1733e041c683SAlan Stern &cpufreq_transition_notifier_list, nb); 17341da177e4SLinus Torvalds break; 17351da177e4SLinus Torvalds case CPUFREQ_POLICY_NOTIFIER: 1736e041c683SAlan Stern ret = blocking_notifier_chain_register( 1737e041c683SAlan Stern &cpufreq_policy_notifier_list, nb); 17381da177e4SLinus Torvalds break; 17391da177e4SLinus Torvalds default: 17401da177e4SLinus Torvalds ret = -EINVAL; 17411da177e4SLinus Torvalds } 17421da177e4SLinus Torvalds 17431da177e4SLinus Torvalds return ret; 17441da177e4SLinus Torvalds } 17451da177e4SLinus Torvalds EXPORT_SYMBOL(cpufreq_register_notifier); 17461da177e4SLinus Torvalds 17471da177e4SLinus Torvalds /** 17481da177e4SLinus Torvalds * cpufreq_unregister_notifier - unregister a driver with cpufreq 17491da177e4SLinus Torvalds * @nb: notifier block to be unregistered 17501da177e4SLinus Torvalds * @list: CPUFREQ_TRANSITION_NOTIFIER or CPUFREQ_POLICY_NOTIFIER 17511da177e4SLinus Torvalds * 17521da177e4SLinus Torvalds * Remove a driver from the CPU frequency notifier list. 17531da177e4SLinus Torvalds * 17541da177e4SLinus Torvalds * This function may sleep, and has the same return conditions as 1755e041c683SAlan Stern * blocking_notifier_chain_unregister. 17561da177e4SLinus Torvalds */ 17571da177e4SLinus Torvalds int cpufreq_unregister_notifier(struct notifier_block *nb, unsigned int list) 17581da177e4SLinus Torvalds { 17591da177e4SLinus Torvalds int ret; 17601da177e4SLinus Torvalds 1761d5aaffa9SDirk Brandewie if (cpufreq_disabled()) 1762d5aaffa9SDirk Brandewie return -EINVAL; 1763d5aaffa9SDirk Brandewie 17641da177e4SLinus Torvalds switch (list) { 17651da177e4SLinus Torvalds case CPUFREQ_TRANSITION_NOTIFIER: 1766b4dfdbb3SAlan Stern ret = srcu_notifier_chain_unregister( 1767e041c683SAlan Stern &cpufreq_transition_notifier_list, nb); 17681da177e4SLinus Torvalds break; 17691da177e4SLinus Torvalds case CPUFREQ_POLICY_NOTIFIER: 1770e041c683SAlan Stern ret = blocking_notifier_chain_unregister( 1771e041c683SAlan Stern &cpufreq_policy_notifier_list, nb); 17721da177e4SLinus Torvalds break; 17731da177e4SLinus Torvalds default: 17741da177e4SLinus Torvalds ret = -EINVAL; 17751da177e4SLinus Torvalds } 17761da177e4SLinus Torvalds 17771da177e4SLinus Torvalds return ret; 17781da177e4SLinus Torvalds } 17791da177e4SLinus Torvalds EXPORT_SYMBOL(cpufreq_unregister_notifier); 17801da177e4SLinus Torvalds 17811da177e4SLinus Torvalds 17821da177e4SLinus Torvalds /********************************************************************* 17831da177e4SLinus Torvalds * GOVERNORS * 17841da177e4SLinus Torvalds *********************************************************************/ 17851da177e4SLinus Torvalds 17861c03a2d0SViresh Kumar /* Must set freqs->new to intermediate frequency */ 17871c03a2d0SViresh Kumar static int __target_intermediate(struct cpufreq_policy *policy, 17881c03a2d0SViresh Kumar struct cpufreq_freqs *freqs, int index) 17891c03a2d0SViresh Kumar { 17901c03a2d0SViresh Kumar int ret; 17911c03a2d0SViresh Kumar 17921c03a2d0SViresh Kumar freqs->new = cpufreq_driver->get_intermediate(policy, index); 17931c03a2d0SViresh Kumar 17941c03a2d0SViresh Kumar /* We don't need to switch to intermediate freq */ 17951c03a2d0SViresh Kumar if (!freqs->new) 17961c03a2d0SViresh Kumar return 0; 17971c03a2d0SViresh Kumar 17981c03a2d0SViresh Kumar pr_debug("%s: cpu: %d, switching to intermediate freq: oldfreq: %u, intermediate freq: %u\n", 17991c03a2d0SViresh Kumar __func__, policy->cpu, freqs->old, freqs->new); 18001c03a2d0SViresh Kumar 18011c03a2d0SViresh Kumar cpufreq_freq_transition_begin(policy, freqs); 18021c03a2d0SViresh Kumar ret = cpufreq_driver->target_intermediate(policy, index); 18031c03a2d0SViresh Kumar cpufreq_freq_transition_end(policy, freqs, ret); 18041c03a2d0SViresh Kumar 18051c03a2d0SViresh Kumar if (ret) 18061c03a2d0SViresh Kumar pr_err("%s: Failed to change to intermediate frequency: %d\n", 18071c03a2d0SViresh Kumar __func__, ret); 18081c03a2d0SViresh Kumar 18091c03a2d0SViresh Kumar return ret; 18101c03a2d0SViresh Kumar } 18111c03a2d0SViresh Kumar 18128d65775dSViresh Kumar static int __target_index(struct cpufreq_policy *policy, 18138d65775dSViresh Kumar struct cpufreq_frequency_table *freq_table, int index) 18148d65775dSViresh Kumar { 18151c03a2d0SViresh Kumar struct cpufreq_freqs freqs = {.old = policy->cur, .flags = 0}; 18161c03a2d0SViresh Kumar unsigned int intermediate_freq = 0; 18178d65775dSViresh Kumar int retval = -EINVAL; 18188d65775dSViresh Kumar bool notify; 18198d65775dSViresh Kumar 18208d65775dSViresh Kumar notify = !(cpufreq_driver->flags & CPUFREQ_ASYNC_NOTIFICATION); 18218d65775dSViresh Kumar if (notify) { 18221c03a2d0SViresh Kumar /* Handle switching to intermediate frequency */ 18231c03a2d0SViresh Kumar if (cpufreq_driver->get_intermediate) { 18241c03a2d0SViresh Kumar retval = __target_intermediate(policy, &freqs, index); 18251c03a2d0SViresh Kumar if (retval) 18261c03a2d0SViresh Kumar return retval; 18278d65775dSViresh Kumar 18281c03a2d0SViresh Kumar intermediate_freq = freqs.new; 18291c03a2d0SViresh Kumar /* Set old freq to intermediate */ 18301c03a2d0SViresh Kumar if (intermediate_freq) 18311c03a2d0SViresh Kumar freqs.old = freqs.new; 18321c03a2d0SViresh Kumar } 18331c03a2d0SViresh Kumar 18341c03a2d0SViresh Kumar freqs.new = freq_table[index].frequency; 18358d65775dSViresh Kumar pr_debug("%s: cpu: %d, oldfreq: %u, new freq: %u\n", 18368d65775dSViresh Kumar __func__, policy->cpu, freqs.old, freqs.new); 18378d65775dSViresh Kumar 18388d65775dSViresh Kumar cpufreq_freq_transition_begin(policy, &freqs); 18398d65775dSViresh Kumar } 18408d65775dSViresh Kumar 18418d65775dSViresh Kumar retval = cpufreq_driver->target_index(policy, index); 18428d65775dSViresh Kumar if (retval) 18438d65775dSViresh Kumar pr_err("%s: Failed to change cpu frequency: %d\n", __func__, 18448d65775dSViresh Kumar retval); 18458d65775dSViresh Kumar 18461c03a2d0SViresh Kumar if (notify) { 18478d65775dSViresh Kumar cpufreq_freq_transition_end(policy, &freqs, retval); 18488d65775dSViresh Kumar 18491c03a2d0SViresh Kumar /* 18501c03a2d0SViresh Kumar * Failed after setting to intermediate freq? Driver should have 18511c03a2d0SViresh Kumar * reverted back to initial frequency and so should we. Check 18521c03a2d0SViresh Kumar * here for intermediate_freq instead of get_intermediate, in 185358405af6SShailendra Verma * case we haven't switched to intermediate freq at all. 18541c03a2d0SViresh Kumar */ 18551c03a2d0SViresh Kumar if (unlikely(retval && intermediate_freq)) { 18561c03a2d0SViresh Kumar freqs.old = intermediate_freq; 18571c03a2d0SViresh Kumar freqs.new = policy->restore_freq; 18581c03a2d0SViresh Kumar cpufreq_freq_transition_begin(policy, &freqs); 18591c03a2d0SViresh Kumar cpufreq_freq_transition_end(policy, &freqs, 0); 18601c03a2d0SViresh Kumar } 18611c03a2d0SViresh Kumar } 18621c03a2d0SViresh Kumar 18638d65775dSViresh Kumar return retval; 18648d65775dSViresh Kumar } 18658d65775dSViresh Kumar 18661da177e4SLinus Torvalds int __cpufreq_driver_target(struct cpufreq_policy *policy, 18671da177e4SLinus Torvalds unsigned int target_freq, 18681da177e4SLinus Torvalds unsigned int relation) 18691da177e4SLinus Torvalds { 18707249924eSViresh Kumar unsigned int old_target_freq = target_freq; 18718d65775dSViresh Kumar int retval = -EINVAL; 1872c32b6b8eSAshok Raj 1873a7b422cdSKonrad Rzeszutek Wilk if (cpufreq_disabled()) 1874a7b422cdSKonrad Rzeszutek Wilk return -ENODEV; 1875a7b422cdSKonrad Rzeszutek Wilk 18767249924eSViresh Kumar /* Make sure that target_freq is within supported range */ 18777249924eSViresh Kumar if (target_freq > policy->max) 18787249924eSViresh Kumar target_freq = policy->max; 18797249924eSViresh Kumar if (target_freq < policy->min) 18807249924eSViresh Kumar target_freq = policy->min; 18817249924eSViresh Kumar 18827249924eSViresh Kumar pr_debug("target for CPU %u: %u kHz, relation %u, requested %u kHz\n", 18837249924eSViresh Kumar policy->cpu, target_freq, relation, old_target_freq); 18845a1c0228SViresh Kumar 18859c0ebcf7SViresh Kumar /* 18869c0ebcf7SViresh Kumar * This might look like a redundant call as we are checking it again 18879c0ebcf7SViresh Kumar * after finding index. But it is left intentionally for cases where 18889c0ebcf7SViresh Kumar * exactly same freq is called again and so we can save on few function 18899c0ebcf7SViresh Kumar * calls. 18909c0ebcf7SViresh Kumar */ 18915a1c0228SViresh Kumar if (target_freq == policy->cur) 18925a1c0228SViresh Kumar return 0; 18935a1c0228SViresh Kumar 18941c03a2d0SViresh Kumar /* Save last value to restore later on errors */ 18951c03a2d0SViresh Kumar policy->restore_freq = policy->cur; 18961c03a2d0SViresh Kumar 18971c3d85ddSRafael J. Wysocki if (cpufreq_driver->target) 18981c3d85ddSRafael J. Wysocki retval = cpufreq_driver->target(policy, target_freq, relation); 18999c0ebcf7SViresh Kumar else if (cpufreq_driver->target_index) { 19009c0ebcf7SViresh Kumar struct cpufreq_frequency_table *freq_table; 19019c0ebcf7SViresh Kumar int index; 190290d45d17SAshok Raj 19039c0ebcf7SViresh Kumar freq_table = cpufreq_frequency_get_table(policy->cpu); 19049c0ebcf7SViresh Kumar if (unlikely(!freq_table)) { 19059c0ebcf7SViresh Kumar pr_err("%s: Unable to find freq_table\n", __func__); 19069c0ebcf7SViresh Kumar goto out; 19079c0ebcf7SViresh Kumar } 19089c0ebcf7SViresh Kumar 19099c0ebcf7SViresh Kumar retval = cpufreq_frequency_table_target(policy, freq_table, 19109c0ebcf7SViresh Kumar target_freq, relation, &index); 19119c0ebcf7SViresh Kumar if (unlikely(retval)) { 19129c0ebcf7SViresh Kumar pr_err("%s: Unable to find matching freq\n", __func__); 19139c0ebcf7SViresh Kumar goto out; 19149c0ebcf7SViresh Kumar } 19159c0ebcf7SViresh Kumar 1916d4019f0aSViresh Kumar if (freq_table[index].frequency == policy->cur) { 19179c0ebcf7SViresh Kumar retval = 0; 1918d4019f0aSViresh Kumar goto out; 1919d4019f0aSViresh Kumar } 1920d4019f0aSViresh Kumar 19218d65775dSViresh Kumar retval = __target_index(policy, freq_table, index); 19229c0ebcf7SViresh Kumar } 19239c0ebcf7SViresh Kumar 19249c0ebcf7SViresh Kumar out: 19251da177e4SLinus Torvalds return retval; 19261da177e4SLinus Torvalds } 19271da177e4SLinus Torvalds EXPORT_SYMBOL_GPL(__cpufreq_driver_target); 19281da177e4SLinus Torvalds 19291da177e4SLinus Torvalds int cpufreq_driver_target(struct cpufreq_policy *policy, 19301da177e4SLinus Torvalds unsigned int target_freq, 19311da177e4SLinus Torvalds unsigned int relation) 19321da177e4SLinus Torvalds { 1933f1829e4aSJulia Lawall int ret = -EINVAL; 19341da177e4SLinus Torvalds 1935ad7722daSviresh kumar down_write(&policy->rwsem); 19361da177e4SLinus Torvalds 19371da177e4SLinus Torvalds ret = __cpufreq_driver_target(policy, target_freq, relation); 19381da177e4SLinus Torvalds 1939ad7722daSviresh kumar up_write(&policy->rwsem); 19401da177e4SLinus Torvalds 19411da177e4SLinus Torvalds return ret; 19421da177e4SLinus Torvalds } 19431da177e4SLinus Torvalds EXPORT_SYMBOL_GPL(cpufreq_driver_target); 19441da177e4SLinus Torvalds 1945e08f5f5bSGautham R Shenoy static int __cpufreq_governor(struct cpufreq_policy *policy, 1946e08f5f5bSGautham R Shenoy unsigned int event) 19471da177e4SLinus Torvalds { 1948cc993cabSDave Jones int ret; 19496afde10cSThomas Renninger 19506afde10cSThomas Renninger /* Only must be defined when default governor is known to have latency 19516afde10cSThomas Renninger restrictions, like e.g. conservative or ondemand. 19526afde10cSThomas Renninger That this is the case is already ensured in Kconfig 19536afde10cSThomas Renninger */ 19546afde10cSThomas Renninger #ifdef CONFIG_CPU_FREQ_GOV_PERFORMANCE 19556afde10cSThomas Renninger struct cpufreq_governor *gov = &cpufreq_gov_performance; 19566afde10cSThomas Renninger #else 19576afde10cSThomas Renninger struct cpufreq_governor *gov = NULL; 19586afde10cSThomas Renninger #endif 19591c256245SThomas Renninger 19602f0aea93SViresh Kumar /* Don't start any governor operations if we are entering suspend */ 19612f0aea93SViresh Kumar if (cpufreq_suspended) 19622f0aea93SViresh Kumar return 0; 1963cb57720bSEthan Zhao /* 1964cb57720bSEthan Zhao * Governor might not be initiated here if ACPI _PPC changed 1965cb57720bSEthan Zhao * notification happened, so check it. 1966cb57720bSEthan Zhao */ 1967cb57720bSEthan Zhao if (!policy->governor) 1968cb57720bSEthan Zhao return -EINVAL; 19692f0aea93SViresh Kumar 19701c256245SThomas Renninger if (policy->governor->max_transition_latency && 19711c256245SThomas Renninger policy->cpuinfo.transition_latency > 19721c256245SThomas Renninger policy->governor->max_transition_latency) { 19736afde10cSThomas Renninger if (!gov) 19746afde10cSThomas Renninger return -EINVAL; 19756afde10cSThomas Renninger else { 1976e837f9b5SJoe Perches pr_warn("%s governor failed, too long transition latency of HW, fallback to %s governor\n", 1977e837f9b5SJoe Perches policy->governor->name, gov->name); 19781c256245SThomas Renninger policy->governor = gov; 19791c256245SThomas Renninger } 19806afde10cSThomas Renninger } 19811da177e4SLinus Torvalds 1982fe492f3fSViresh Kumar if (event == CPUFREQ_GOV_POLICY_INIT) 19831da177e4SLinus Torvalds if (!try_module_get(policy->governor->owner)) 19841da177e4SLinus Torvalds return -EINVAL; 19851da177e4SLinus Torvalds 198663431f78SViresh Kumar pr_debug("%s: for CPU %u, event %u\n", __func__, policy->cpu, event); 198795731ebbSXiaoguang Chen 198895731ebbSXiaoguang Chen mutex_lock(&cpufreq_governor_lock); 198956d07db2SSrivatsa S. Bhat if ((policy->governor_enabled && event == CPUFREQ_GOV_START) 1990f73d3933SViresh Kumar || (!policy->governor_enabled 1991f73d3933SViresh Kumar && (event == CPUFREQ_GOV_LIMITS || event == CPUFREQ_GOV_STOP))) { 199295731ebbSXiaoguang Chen mutex_unlock(&cpufreq_governor_lock); 199395731ebbSXiaoguang Chen return -EBUSY; 199495731ebbSXiaoguang Chen } 199595731ebbSXiaoguang Chen 199695731ebbSXiaoguang Chen if (event == CPUFREQ_GOV_STOP) 199795731ebbSXiaoguang Chen policy->governor_enabled = false; 199895731ebbSXiaoguang Chen else if (event == CPUFREQ_GOV_START) 199995731ebbSXiaoguang Chen policy->governor_enabled = true; 200095731ebbSXiaoguang Chen 200195731ebbSXiaoguang Chen mutex_unlock(&cpufreq_governor_lock); 200295731ebbSXiaoguang Chen 20031da177e4SLinus Torvalds ret = policy->governor->governor(policy, event); 20041da177e4SLinus Torvalds 20054d5dcc42SViresh Kumar if (!ret) { 20064d5dcc42SViresh Kumar if (event == CPUFREQ_GOV_POLICY_INIT) 20078e53695fSViresh Kumar policy->governor->initialized++; 20084d5dcc42SViresh Kumar else if (event == CPUFREQ_GOV_POLICY_EXIT) 20098e53695fSViresh Kumar policy->governor->initialized--; 201095731ebbSXiaoguang Chen } else { 201195731ebbSXiaoguang Chen /* Restore original values */ 201295731ebbSXiaoguang Chen mutex_lock(&cpufreq_governor_lock); 201395731ebbSXiaoguang Chen if (event == CPUFREQ_GOV_STOP) 201495731ebbSXiaoguang Chen policy->governor_enabled = true; 201595731ebbSXiaoguang Chen else if (event == CPUFREQ_GOV_START) 201695731ebbSXiaoguang Chen policy->governor_enabled = false; 201795731ebbSXiaoguang Chen mutex_unlock(&cpufreq_governor_lock); 20184d5dcc42SViresh Kumar } 2019b394058fSViresh Kumar 2020fe492f3fSViresh Kumar if (((event == CPUFREQ_GOV_POLICY_INIT) && ret) || 2021fe492f3fSViresh Kumar ((event == CPUFREQ_GOV_POLICY_EXIT) && !ret)) 20221da177e4SLinus Torvalds module_put(policy->governor->owner); 20231da177e4SLinus Torvalds 20241da177e4SLinus Torvalds return ret; 20251da177e4SLinus Torvalds } 20261da177e4SLinus Torvalds 20271da177e4SLinus Torvalds int cpufreq_register_governor(struct cpufreq_governor *governor) 20281da177e4SLinus Torvalds { 20293bcb09a3SJeremy Fitzhardinge int err; 20301da177e4SLinus Torvalds 20311da177e4SLinus Torvalds if (!governor) 20321da177e4SLinus Torvalds return -EINVAL; 20331da177e4SLinus Torvalds 2034a7b422cdSKonrad Rzeszutek Wilk if (cpufreq_disabled()) 2035a7b422cdSKonrad Rzeszutek Wilk return -ENODEV; 2036a7b422cdSKonrad Rzeszutek Wilk 20373fc54d37Sakpm@osdl.org mutex_lock(&cpufreq_governor_mutex); 20381da177e4SLinus Torvalds 2039b394058fSViresh Kumar governor->initialized = 0; 20403bcb09a3SJeremy Fitzhardinge err = -EBUSY; 204142f91fa1SViresh Kumar if (!find_governor(governor->name)) { 20423bcb09a3SJeremy Fitzhardinge err = 0; 20431da177e4SLinus Torvalds list_add(&governor->governor_list, &cpufreq_governor_list); 20443bcb09a3SJeremy Fitzhardinge } 20451da177e4SLinus Torvalds 20463fc54d37Sakpm@osdl.org mutex_unlock(&cpufreq_governor_mutex); 20473bcb09a3SJeremy Fitzhardinge return err; 20481da177e4SLinus Torvalds } 20491da177e4SLinus Torvalds EXPORT_SYMBOL_GPL(cpufreq_register_governor); 20501da177e4SLinus Torvalds 20511da177e4SLinus Torvalds void cpufreq_unregister_governor(struct cpufreq_governor *governor) 20521da177e4SLinus Torvalds { 20534573237bSViresh Kumar struct cpufreq_policy *policy; 20544573237bSViresh Kumar unsigned long flags; 205590e41bacSPrarit Bhargava 20561da177e4SLinus Torvalds if (!governor) 20571da177e4SLinus Torvalds return; 20581da177e4SLinus Torvalds 2059a7b422cdSKonrad Rzeszutek Wilk if (cpufreq_disabled()) 2060a7b422cdSKonrad Rzeszutek Wilk return; 2061a7b422cdSKonrad Rzeszutek Wilk 20624573237bSViresh Kumar /* clear last_governor for all inactive policies */ 20634573237bSViresh Kumar read_lock_irqsave(&cpufreq_driver_lock, flags); 20644573237bSViresh Kumar for_each_inactive_policy(policy) { 206518bf3a12SViresh Kumar if (!strcmp(policy->last_governor, governor->name)) { 206618bf3a12SViresh Kumar policy->governor = NULL; 20674573237bSViresh Kumar strcpy(policy->last_governor, "\0"); 206890e41bacSPrarit Bhargava } 206918bf3a12SViresh Kumar } 20704573237bSViresh Kumar read_unlock_irqrestore(&cpufreq_driver_lock, flags); 207190e41bacSPrarit Bhargava 20723fc54d37Sakpm@osdl.org mutex_lock(&cpufreq_governor_mutex); 20731da177e4SLinus Torvalds list_del(&governor->governor_list); 20743fc54d37Sakpm@osdl.org mutex_unlock(&cpufreq_governor_mutex); 20751da177e4SLinus Torvalds return; 20761da177e4SLinus Torvalds } 20771da177e4SLinus Torvalds EXPORT_SYMBOL_GPL(cpufreq_unregister_governor); 20781da177e4SLinus Torvalds 20791da177e4SLinus Torvalds 20801da177e4SLinus Torvalds /********************************************************************* 20811da177e4SLinus Torvalds * POLICY INTERFACE * 20821da177e4SLinus Torvalds *********************************************************************/ 20831da177e4SLinus Torvalds 20841da177e4SLinus Torvalds /** 20851da177e4SLinus Torvalds * cpufreq_get_policy - get the current cpufreq_policy 208629464f28SDave Jones * @policy: struct cpufreq_policy into which the current cpufreq_policy 208729464f28SDave Jones * is written 20881da177e4SLinus Torvalds * 20891da177e4SLinus Torvalds * Reads the current cpufreq policy. 20901da177e4SLinus Torvalds */ 20911da177e4SLinus Torvalds int cpufreq_get_policy(struct cpufreq_policy *policy, unsigned int cpu) 20921da177e4SLinus Torvalds { 20931da177e4SLinus Torvalds struct cpufreq_policy *cpu_policy; 20941da177e4SLinus Torvalds if (!policy) 20951da177e4SLinus Torvalds return -EINVAL; 20961da177e4SLinus Torvalds 20971da177e4SLinus Torvalds cpu_policy = cpufreq_cpu_get(cpu); 20981da177e4SLinus Torvalds if (!cpu_policy) 20991da177e4SLinus Torvalds return -EINVAL; 21001da177e4SLinus Torvalds 2101d5b73cd8SViresh Kumar memcpy(policy, cpu_policy, sizeof(*policy)); 21021da177e4SLinus Torvalds 21031da177e4SLinus Torvalds cpufreq_cpu_put(cpu_policy); 21041da177e4SLinus Torvalds return 0; 21051da177e4SLinus Torvalds } 21061da177e4SLinus Torvalds EXPORT_SYMBOL(cpufreq_get_policy); 21071da177e4SLinus Torvalds 2108153d7f3fSArjan van de Ven /* 2109037ce839SViresh Kumar * policy : current policy. 2110037ce839SViresh Kumar * new_policy: policy to be set. 2111153d7f3fSArjan van de Ven */ 2112037ce839SViresh Kumar static int cpufreq_set_policy(struct cpufreq_policy *policy, 21133a3e9e06SViresh Kumar struct cpufreq_policy *new_policy) 21141da177e4SLinus Torvalds { 2115d9a789c7SRafael J. Wysocki struct cpufreq_governor *old_gov; 2116d9a789c7SRafael J. Wysocki int ret; 21171da177e4SLinus Torvalds 2118e837f9b5SJoe Perches pr_debug("setting new policy for CPU %u: %u - %u kHz\n", 2119e837f9b5SJoe Perches new_policy->cpu, new_policy->min, new_policy->max); 21201da177e4SLinus Torvalds 2121d5b73cd8SViresh Kumar memcpy(&new_policy->cpuinfo, &policy->cpuinfo, sizeof(policy->cpuinfo)); 21221da177e4SLinus Torvalds 2123fba9573bSPan Xinhui /* 2124fba9573bSPan Xinhui * This check works well when we store new min/max freq attributes, 2125fba9573bSPan Xinhui * because new_policy is a copy of policy with one field updated. 2126fba9573bSPan Xinhui */ 2127fba9573bSPan Xinhui if (new_policy->min > new_policy->max) 2128d9a789c7SRafael J. Wysocki return -EINVAL; 21299c9a43edSMattia Dongili 21301da177e4SLinus Torvalds /* verify the cpu speed can be set within this limit */ 21313a3e9e06SViresh Kumar ret = cpufreq_driver->verify(new_policy); 21321da177e4SLinus Torvalds if (ret) 2133d9a789c7SRafael J. Wysocki return ret; 21341da177e4SLinus Torvalds 21351da177e4SLinus Torvalds /* adjust if necessary - all reasons */ 2136e041c683SAlan Stern blocking_notifier_call_chain(&cpufreq_policy_notifier_list, 21373a3e9e06SViresh Kumar CPUFREQ_ADJUST, new_policy); 21381da177e4SLinus Torvalds 2139bb176f7dSViresh Kumar /* 2140bb176f7dSViresh Kumar * verify the cpu speed can be set within this limit, which might be 2141bb176f7dSViresh Kumar * different to the first one 2142bb176f7dSViresh Kumar */ 21433a3e9e06SViresh Kumar ret = cpufreq_driver->verify(new_policy); 2144e041c683SAlan Stern if (ret) 2145d9a789c7SRafael J. Wysocki return ret; 21461da177e4SLinus Torvalds 21471da177e4SLinus Torvalds /* notification of the new policy */ 2148e041c683SAlan Stern blocking_notifier_call_chain(&cpufreq_policy_notifier_list, 21493a3e9e06SViresh Kumar CPUFREQ_NOTIFY, new_policy); 21501da177e4SLinus Torvalds 21513a3e9e06SViresh Kumar policy->min = new_policy->min; 21523a3e9e06SViresh Kumar policy->max = new_policy->max; 21531da177e4SLinus Torvalds 21542d06d8c4SDominik Brodowski pr_debug("new min and max freqs are %u - %u kHz\n", 21553a3e9e06SViresh Kumar policy->min, policy->max); 21561da177e4SLinus Torvalds 21571c3d85ddSRafael J. Wysocki if (cpufreq_driver->setpolicy) { 21583a3e9e06SViresh Kumar policy->policy = new_policy->policy; 21592d06d8c4SDominik Brodowski pr_debug("setting range\n"); 2160d9a789c7SRafael J. Wysocki return cpufreq_driver->setpolicy(new_policy); 2161d9a789c7SRafael J. Wysocki } 2162d9a789c7SRafael J. Wysocki 2163d9a789c7SRafael J. Wysocki if (new_policy->governor == policy->governor) 2164d9a789c7SRafael J. Wysocki goto out; 21651da177e4SLinus Torvalds 21662d06d8c4SDominik Brodowski pr_debug("governor switch\n"); 21671da177e4SLinus Torvalds 2168d9a789c7SRafael J. Wysocki /* save old, working values */ 2169d9a789c7SRafael J. Wysocki old_gov = policy->governor; 21701da177e4SLinus Torvalds /* end old governor */ 2171d9a789c7SRafael J. Wysocki if (old_gov) { 21724bc384aeSViresh Kumar ret = __cpufreq_governor(policy, CPUFREQ_GOV_STOP); 21734bc384aeSViresh Kumar if (ret) { 21744bc384aeSViresh Kumar /* This can happen due to race with other operations */ 21754bc384aeSViresh Kumar pr_debug("%s: Failed to Stop Governor: %s (%d)\n", 21764bc384aeSViresh Kumar __func__, old_gov->name, ret); 21774bc384aeSViresh Kumar return ret; 21784bc384aeSViresh Kumar } 21794bc384aeSViresh Kumar 2180ad7722daSviresh kumar up_write(&policy->rwsem); 21814bc384aeSViresh Kumar ret = __cpufreq_governor(policy, CPUFREQ_GOV_POLICY_EXIT); 2182ad7722daSviresh kumar down_write(&policy->rwsem); 21834bc384aeSViresh Kumar 21844bc384aeSViresh Kumar if (ret) { 21854bc384aeSViresh Kumar pr_err("%s: Failed to Exit Governor: %s (%d)\n", 21864bc384aeSViresh Kumar __func__, old_gov->name, ret); 21874bc384aeSViresh Kumar return ret; 21884bc384aeSViresh Kumar } 21897bd353a9SViresh Kumar } 21901da177e4SLinus Torvalds 21911da177e4SLinus Torvalds /* start new governor */ 21923a3e9e06SViresh Kumar policy->governor = new_policy->governor; 21934bc384aeSViresh Kumar ret = __cpufreq_governor(policy, CPUFREQ_GOV_POLICY_INIT); 21944bc384aeSViresh Kumar if (!ret) { 21954bc384aeSViresh Kumar ret = __cpufreq_governor(policy, CPUFREQ_GOV_START); 21964bc384aeSViresh Kumar if (!ret) 2197d9a789c7SRafael J. Wysocki goto out; 2198d9a789c7SRafael J. Wysocki 2199ad7722daSviresh kumar up_write(&policy->rwsem); 2200d9a789c7SRafael J. Wysocki __cpufreq_governor(policy, CPUFREQ_GOV_POLICY_EXIT); 2201ad7722daSviresh kumar down_write(&policy->rwsem); 2202955ef483SViresh Kumar } 22037bd353a9SViresh Kumar 22041da177e4SLinus Torvalds /* new governor failed, so re-start old one */ 2205d9a789c7SRafael J. Wysocki pr_debug("starting governor %s failed\n", policy->governor->name); 22061da177e4SLinus Torvalds if (old_gov) { 22073a3e9e06SViresh Kumar policy->governor = old_gov; 22084bc384aeSViresh Kumar if (__cpufreq_governor(policy, CPUFREQ_GOV_POLICY_INIT)) 22094bc384aeSViresh Kumar policy->governor = NULL; 22104bc384aeSViresh Kumar else 2211d9a789c7SRafael J. Wysocki __cpufreq_governor(policy, CPUFREQ_GOV_START); 22121da177e4SLinus Torvalds } 22131da177e4SLinus Torvalds 22144bc384aeSViresh Kumar return ret; 2215d9a789c7SRafael J. Wysocki 2216d9a789c7SRafael J. Wysocki out: 2217d9a789c7SRafael J. Wysocki pr_debug("governor: change or update limits\n"); 2218d9a789c7SRafael J. Wysocki return __cpufreq_governor(policy, CPUFREQ_GOV_LIMITS); 22191da177e4SLinus Torvalds } 22201da177e4SLinus Torvalds 22211da177e4SLinus Torvalds /** 22221da177e4SLinus Torvalds * cpufreq_update_policy - re-evaluate an existing cpufreq policy 22231da177e4SLinus Torvalds * @cpu: CPU which shall be re-evaluated 22241da177e4SLinus Torvalds * 222525985edcSLucas De Marchi * Useful for policy notifiers which have different necessities 22261da177e4SLinus Torvalds * at different times. 22271da177e4SLinus Torvalds */ 22281da177e4SLinus Torvalds int cpufreq_update_policy(unsigned int cpu) 22291da177e4SLinus Torvalds { 22303a3e9e06SViresh Kumar struct cpufreq_policy *policy = cpufreq_cpu_get(cpu); 22313a3e9e06SViresh Kumar struct cpufreq_policy new_policy; 2232f1829e4aSJulia Lawall int ret; 22331da177e4SLinus Torvalds 2234fefa8ff8SAaron Plattner if (!policy) 2235fefa8ff8SAaron Plattner return -ENODEV; 22361da177e4SLinus Torvalds 2237ad7722daSviresh kumar down_write(&policy->rwsem); 22381da177e4SLinus Torvalds 22392d06d8c4SDominik Brodowski pr_debug("updating policy for CPU %u\n", cpu); 2240d5b73cd8SViresh Kumar memcpy(&new_policy, policy, sizeof(*policy)); 22413a3e9e06SViresh Kumar new_policy.min = policy->user_policy.min; 22423a3e9e06SViresh Kumar new_policy.max = policy->user_policy.max; 22431da177e4SLinus Torvalds 2244bb176f7dSViresh Kumar /* 2245bb176f7dSViresh Kumar * BIOS might change freq behind our back 2246bb176f7dSViresh Kumar * -> ask driver for current freq and notify governors about a change 2247bb176f7dSViresh Kumar */ 22482ed99e39SRafael J. Wysocki if (cpufreq_driver->get && !cpufreq_driver->setpolicy) { 22493a3e9e06SViresh Kumar new_policy.cur = cpufreq_driver->get(cpu); 2250bd0fa9bbSViresh Kumar if (WARN_ON(!new_policy.cur)) { 2251bd0fa9bbSViresh Kumar ret = -EIO; 2252fefa8ff8SAaron Plattner goto unlock; 2253bd0fa9bbSViresh Kumar } 2254bd0fa9bbSViresh Kumar 22553a3e9e06SViresh Kumar if (!policy->cur) { 2256e837f9b5SJoe Perches pr_debug("Driver did not initialize current freq\n"); 22573a3e9e06SViresh Kumar policy->cur = new_policy.cur; 2258a85f7bd3SThomas Renninger } else { 22599c0ebcf7SViresh Kumar if (policy->cur != new_policy.cur && has_target()) 2260a1e1dc41SViresh Kumar cpufreq_out_of_sync(policy, new_policy.cur); 22610961dd0dSThomas Renninger } 2262a85f7bd3SThomas Renninger } 22630961dd0dSThomas Renninger 2264037ce839SViresh Kumar ret = cpufreq_set_policy(policy, &new_policy); 22651da177e4SLinus Torvalds 2266fefa8ff8SAaron Plattner unlock: 2267ad7722daSviresh kumar up_write(&policy->rwsem); 22685a01f2e8SVenkatesh Pallipadi 22693a3e9e06SViresh Kumar cpufreq_cpu_put(policy); 22701da177e4SLinus Torvalds return ret; 22711da177e4SLinus Torvalds } 22721da177e4SLinus Torvalds EXPORT_SYMBOL(cpufreq_update_policy); 22731da177e4SLinus Torvalds 22742760984fSPaul Gortmaker static int cpufreq_cpu_callback(struct notifier_block *nfb, 2275c32b6b8eSAshok Raj unsigned long action, void *hcpu) 2276c32b6b8eSAshok Raj { 2277c32b6b8eSAshok Raj unsigned int cpu = (unsigned long)hcpu; 2278c32b6b8eSAshok Raj 22795302c3fbSSrivatsa S. Bhat switch (action & ~CPU_TASKS_FROZEN) { 2280c32b6b8eSAshok Raj case CPU_ONLINE: 22810b275352SRafael J. Wysocki cpufreq_online(cpu); 2282c32b6b8eSAshok Raj break; 22835302c3fbSSrivatsa S. Bhat 2284c32b6b8eSAshok Raj case CPU_DOWN_PREPARE: 228515c0b4d2SRafael J. Wysocki cpufreq_offline_prepare(cpu); 22861aee40acSSrivatsa S. Bhat break; 22871aee40acSSrivatsa S. Bhat 22881aee40acSSrivatsa S. Bhat case CPU_POST_DEAD: 228915c0b4d2SRafael J. Wysocki cpufreq_offline_finish(cpu); 2290c32b6b8eSAshok Raj break; 22915302c3fbSSrivatsa S. Bhat 22925a01f2e8SVenkatesh Pallipadi case CPU_DOWN_FAILED: 22930b275352SRafael J. Wysocki cpufreq_online(cpu); 2294c32b6b8eSAshok Raj break; 2295c32b6b8eSAshok Raj } 2296c32b6b8eSAshok Raj return NOTIFY_OK; 2297c32b6b8eSAshok Raj } 2298c32b6b8eSAshok Raj 22999c36f746SNeal Buckendahl static struct notifier_block __refdata cpufreq_cpu_notifier = { 2300c32b6b8eSAshok Raj .notifier_call = cpufreq_cpu_callback, 2301c32b6b8eSAshok Raj }; 23021da177e4SLinus Torvalds 23031da177e4SLinus Torvalds /********************************************************************* 23046f19efc0SLukasz Majewski * BOOST * 23056f19efc0SLukasz Majewski *********************************************************************/ 23066f19efc0SLukasz Majewski static int cpufreq_boost_set_sw(int state) 23076f19efc0SLukasz Majewski { 23086f19efc0SLukasz Majewski struct cpufreq_frequency_table *freq_table; 23096f19efc0SLukasz Majewski struct cpufreq_policy *policy; 23106f19efc0SLukasz Majewski int ret = -EINVAL; 23116f19efc0SLukasz Majewski 2312f963735aSViresh Kumar for_each_active_policy(policy) { 23136f19efc0SLukasz Majewski freq_table = cpufreq_frequency_get_table(policy->cpu); 23146f19efc0SLukasz Majewski if (freq_table) { 23156f19efc0SLukasz Majewski ret = cpufreq_frequency_table_cpuinfo(policy, 23166f19efc0SLukasz Majewski freq_table); 23176f19efc0SLukasz Majewski if (ret) { 23186f19efc0SLukasz Majewski pr_err("%s: Policy frequency update failed\n", 23196f19efc0SLukasz Majewski __func__); 23206f19efc0SLukasz Majewski break; 23216f19efc0SLukasz Majewski } 23226f19efc0SLukasz Majewski policy->user_policy.max = policy->max; 23236f19efc0SLukasz Majewski __cpufreq_governor(policy, CPUFREQ_GOV_LIMITS); 23246f19efc0SLukasz Majewski } 23256f19efc0SLukasz Majewski } 23266f19efc0SLukasz Majewski 23276f19efc0SLukasz Majewski return ret; 23286f19efc0SLukasz Majewski } 23296f19efc0SLukasz Majewski 23306f19efc0SLukasz Majewski int cpufreq_boost_trigger_state(int state) 23316f19efc0SLukasz Majewski { 23326f19efc0SLukasz Majewski unsigned long flags; 23336f19efc0SLukasz Majewski int ret = 0; 23346f19efc0SLukasz Majewski 23356f19efc0SLukasz Majewski if (cpufreq_driver->boost_enabled == state) 23366f19efc0SLukasz Majewski return 0; 23376f19efc0SLukasz Majewski 23386f19efc0SLukasz Majewski write_lock_irqsave(&cpufreq_driver_lock, flags); 23396f19efc0SLukasz Majewski cpufreq_driver->boost_enabled = state; 23406f19efc0SLukasz Majewski write_unlock_irqrestore(&cpufreq_driver_lock, flags); 23416f19efc0SLukasz Majewski 23426f19efc0SLukasz Majewski ret = cpufreq_driver->set_boost(state); 23436f19efc0SLukasz Majewski if (ret) { 23446f19efc0SLukasz Majewski write_lock_irqsave(&cpufreq_driver_lock, flags); 23456f19efc0SLukasz Majewski cpufreq_driver->boost_enabled = !state; 23466f19efc0SLukasz Majewski write_unlock_irqrestore(&cpufreq_driver_lock, flags); 23476f19efc0SLukasz Majewski 2348e837f9b5SJoe Perches pr_err("%s: Cannot %s BOOST\n", 2349e837f9b5SJoe Perches __func__, state ? "enable" : "disable"); 23506f19efc0SLukasz Majewski } 23516f19efc0SLukasz Majewski 23526f19efc0SLukasz Majewski return ret; 23536f19efc0SLukasz Majewski } 23546f19efc0SLukasz Majewski 23556f19efc0SLukasz Majewski int cpufreq_boost_supported(void) 23566f19efc0SLukasz Majewski { 23576f19efc0SLukasz Majewski if (likely(cpufreq_driver)) 23586f19efc0SLukasz Majewski return cpufreq_driver->boost_supported; 23596f19efc0SLukasz Majewski 23606f19efc0SLukasz Majewski return 0; 23616f19efc0SLukasz Majewski } 23626f19efc0SLukasz Majewski EXPORT_SYMBOL_GPL(cpufreq_boost_supported); 23636f19efc0SLukasz Majewski 236444139ed4SViresh Kumar static int create_boost_sysfs_file(void) 236544139ed4SViresh Kumar { 236644139ed4SViresh Kumar int ret; 236744139ed4SViresh Kumar 236844139ed4SViresh Kumar if (!cpufreq_boost_supported()) 236944139ed4SViresh Kumar return 0; 237044139ed4SViresh Kumar 237144139ed4SViresh Kumar /* 237244139ed4SViresh Kumar * Check if driver provides function to enable boost - 237344139ed4SViresh Kumar * if not, use cpufreq_boost_set_sw as default 237444139ed4SViresh Kumar */ 237544139ed4SViresh Kumar if (!cpufreq_driver->set_boost) 237644139ed4SViresh Kumar cpufreq_driver->set_boost = cpufreq_boost_set_sw; 237744139ed4SViresh Kumar 2378*c82bd444SViresh Kumar ret = sysfs_create_file(cpufreq_global_kobject, &boost.attr); 237944139ed4SViresh Kumar if (ret) 238044139ed4SViresh Kumar pr_err("%s: cannot register global BOOST sysfs file\n", 238144139ed4SViresh Kumar __func__); 238244139ed4SViresh Kumar 238344139ed4SViresh Kumar return ret; 238444139ed4SViresh Kumar } 238544139ed4SViresh Kumar 238644139ed4SViresh Kumar static void remove_boost_sysfs_file(void) 238744139ed4SViresh Kumar { 238844139ed4SViresh Kumar if (cpufreq_boost_supported()) 2389*c82bd444SViresh Kumar sysfs_remove_file(cpufreq_global_kobject, &boost.attr); 239044139ed4SViresh Kumar } 239144139ed4SViresh Kumar 239244139ed4SViresh Kumar int cpufreq_enable_boost_support(void) 239344139ed4SViresh Kumar { 239444139ed4SViresh Kumar if (!cpufreq_driver) 239544139ed4SViresh Kumar return -EINVAL; 239644139ed4SViresh Kumar 239744139ed4SViresh Kumar if (cpufreq_boost_supported()) 239844139ed4SViresh Kumar return 0; 239944139ed4SViresh Kumar 240044139ed4SViresh Kumar cpufreq_driver->boost_supported = true; 240144139ed4SViresh Kumar 240244139ed4SViresh Kumar /* This will get removed on driver unregister */ 240344139ed4SViresh Kumar return create_boost_sysfs_file(); 240444139ed4SViresh Kumar } 240544139ed4SViresh Kumar EXPORT_SYMBOL_GPL(cpufreq_enable_boost_support); 240644139ed4SViresh Kumar 24076f19efc0SLukasz Majewski int cpufreq_boost_enabled(void) 24086f19efc0SLukasz Majewski { 24096f19efc0SLukasz Majewski return cpufreq_driver->boost_enabled; 24106f19efc0SLukasz Majewski } 24116f19efc0SLukasz Majewski EXPORT_SYMBOL_GPL(cpufreq_boost_enabled); 24126f19efc0SLukasz Majewski 24136f19efc0SLukasz Majewski /********************************************************************* 24141da177e4SLinus Torvalds * REGISTER / UNREGISTER CPUFREQ DRIVER * 24151da177e4SLinus Torvalds *********************************************************************/ 24161da177e4SLinus Torvalds 24171da177e4SLinus Torvalds /** 24181da177e4SLinus Torvalds * cpufreq_register_driver - register a CPU Frequency driver 24191da177e4SLinus Torvalds * @driver_data: A struct cpufreq_driver containing the values# 24201da177e4SLinus Torvalds * submitted by the CPU Frequency driver. 24211da177e4SLinus Torvalds * 24221da177e4SLinus Torvalds * Registers a CPU Frequency driver to this core code. This code 24231da177e4SLinus Torvalds * returns zero on success, -EBUSY when another driver got here first 24241da177e4SLinus Torvalds * (and isn't unregistered in the meantime). 24251da177e4SLinus Torvalds * 24261da177e4SLinus Torvalds */ 2427221dee28SLinus Torvalds int cpufreq_register_driver(struct cpufreq_driver *driver_data) 24281da177e4SLinus Torvalds { 24291da177e4SLinus Torvalds unsigned long flags; 24301da177e4SLinus Torvalds int ret; 24311da177e4SLinus Torvalds 2432a7b422cdSKonrad Rzeszutek Wilk if (cpufreq_disabled()) 2433a7b422cdSKonrad Rzeszutek Wilk return -ENODEV; 2434a7b422cdSKonrad Rzeszutek Wilk 24351da177e4SLinus Torvalds if (!driver_data || !driver_data->verify || !driver_data->init || 24369c0ebcf7SViresh Kumar !(driver_data->setpolicy || driver_data->target_index || 24379832235fSRafael J. Wysocki driver_data->target) || 24389832235fSRafael J. Wysocki (driver_data->setpolicy && (driver_data->target_index || 24391c03a2d0SViresh Kumar driver_data->target)) || 24401c03a2d0SViresh Kumar (!!driver_data->get_intermediate != !!driver_data->target_intermediate)) 24411da177e4SLinus Torvalds return -EINVAL; 24421da177e4SLinus Torvalds 24432d06d8c4SDominik Brodowski pr_debug("trying to register driver %s\n", driver_data->name); 24441da177e4SLinus Torvalds 2445fdd320daSRafael J. Wysocki /* Protect against concurrent CPU online/offline. */ 2446fdd320daSRafael J. Wysocki get_online_cpus(); 2447fdd320daSRafael J. Wysocki 24480d1857a1SNathan Zimmer write_lock_irqsave(&cpufreq_driver_lock, flags); 24491c3d85ddSRafael J. Wysocki if (cpufreq_driver) { 24500d1857a1SNathan Zimmer write_unlock_irqrestore(&cpufreq_driver_lock, flags); 2451fdd320daSRafael J. Wysocki ret = -EEXIST; 2452fdd320daSRafael J. Wysocki goto out; 24531da177e4SLinus Torvalds } 24541c3d85ddSRafael J. Wysocki cpufreq_driver = driver_data; 24550d1857a1SNathan Zimmer write_unlock_irqrestore(&cpufreq_driver_lock, flags); 24561da177e4SLinus Torvalds 2457bc68b7dfSViresh Kumar if (driver_data->setpolicy) 2458bc68b7dfSViresh Kumar driver_data->flags |= CPUFREQ_CONST_LOOPS; 2459bc68b7dfSViresh Kumar 246044139ed4SViresh Kumar ret = create_boost_sysfs_file(); 246144139ed4SViresh Kumar if (ret) 24626f19efc0SLukasz Majewski goto err_null_driver; 24636f19efc0SLukasz Majewski 24648a25a2fdSKay Sievers ret = subsys_interface_register(&cpufreq_interface); 24658f5bc2abSJiri Slaby if (ret) 24666f19efc0SLukasz Majewski goto err_boost_unreg; 24671da177e4SLinus Torvalds 2468ce1bcfe9SViresh Kumar if (!(cpufreq_driver->flags & CPUFREQ_STICKY) && 2469ce1bcfe9SViresh Kumar list_empty(&cpufreq_policy_list)) { 24701da177e4SLinus Torvalds /* if all ->init() calls failed, unregister */ 2471ce1bcfe9SViresh Kumar pr_debug("%s: No CPU initialized for driver %s\n", __func__, 2472e08f5f5bSGautham R Shenoy driver_data->name); 24738a25a2fdSKay Sievers goto err_if_unreg; 24741da177e4SLinus Torvalds } 24751da177e4SLinus Torvalds 247665edc68cSChandra Seetharaman register_hotcpu_notifier(&cpufreq_cpu_notifier); 24772d06d8c4SDominik Brodowski pr_debug("driver %s up and running\n", driver_data->name); 24781da177e4SLinus Torvalds 2479fdd320daSRafael J. Wysocki out: 2480fdd320daSRafael J. Wysocki put_online_cpus(); 2481fdd320daSRafael J. Wysocki return ret; 2482fdd320daSRafael J. Wysocki 24838a25a2fdSKay Sievers err_if_unreg: 24848a25a2fdSKay Sievers subsys_interface_unregister(&cpufreq_interface); 24856f19efc0SLukasz Majewski err_boost_unreg: 248644139ed4SViresh Kumar remove_boost_sysfs_file(); 24878f5bc2abSJiri Slaby err_null_driver: 24880d1857a1SNathan Zimmer write_lock_irqsave(&cpufreq_driver_lock, flags); 24891c3d85ddSRafael J. Wysocki cpufreq_driver = NULL; 24900d1857a1SNathan Zimmer write_unlock_irqrestore(&cpufreq_driver_lock, flags); 2491fdd320daSRafael J. Wysocki goto out; 24921da177e4SLinus Torvalds } 24931da177e4SLinus Torvalds EXPORT_SYMBOL_GPL(cpufreq_register_driver); 24941da177e4SLinus Torvalds 24951da177e4SLinus Torvalds /** 24961da177e4SLinus Torvalds * cpufreq_unregister_driver - unregister the current CPUFreq driver 24971da177e4SLinus Torvalds * 24981da177e4SLinus Torvalds * Unregister the current CPUFreq driver. Only call this if you have 24991da177e4SLinus Torvalds * the right to do so, i.e. if you have succeeded in initialising before! 25001da177e4SLinus Torvalds * Returns zero if successful, and -EINVAL if the cpufreq_driver is 25011da177e4SLinus Torvalds * currently not initialised. 25021da177e4SLinus Torvalds */ 2503221dee28SLinus Torvalds int cpufreq_unregister_driver(struct cpufreq_driver *driver) 25041da177e4SLinus Torvalds { 25051da177e4SLinus Torvalds unsigned long flags; 25061da177e4SLinus Torvalds 25071c3d85ddSRafael J. Wysocki if (!cpufreq_driver || (driver != cpufreq_driver)) 25081da177e4SLinus Torvalds return -EINVAL; 25091da177e4SLinus Torvalds 25102d06d8c4SDominik Brodowski pr_debug("unregistering driver %s\n", driver->name); 25111da177e4SLinus Torvalds 2512454d3a25SSebastian Andrzej Siewior /* Protect against concurrent cpu hotplug */ 2513454d3a25SSebastian Andrzej Siewior get_online_cpus(); 25148a25a2fdSKay Sievers subsys_interface_unregister(&cpufreq_interface); 251544139ed4SViresh Kumar remove_boost_sysfs_file(); 251665edc68cSChandra Seetharaman unregister_hotcpu_notifier(&cpufreq_cpu_notifier); 25171da177e4SLinus Torvalds 25180d1857a1SNathan Zimmer write_lock_irqsave(&cpufreq_driver_lock, flags); 25196eed9404SViresh Kumar 25201c3d85ddSRafael J. Wysocki cpufreq_driver = NULL; 25216eed9404SViresh Kumar 25220d1857a1SNathan Zimmer write_unlock_irqrestore(&cpufreq_driver_lock, flags); 2523454d3a25SSebastian Andrzej Siewior put_online_cpus(); 25241da177e4SLinus Torvalds 25251da177e4SLinus Torvalds return 0; 25261da177e4SLinus Torvalds } 25271da177e4SLinus Torvalds EXPORT_SYMBOL_GPL(cpufreq_unregister_driver); 25285a01f2e8SVenkatesh Pallipadi 252990de2a4aSDoug Anderson /* 253090de2a4aSDoug Anderson * Stop cpufreq at shutdown to make sure it isn't holding any locks 253190de2a4aSDoug Anderson * or mutexes when secondary CPUs are halted. 253290de2a4aSDoug Anderson */ 253390de2a4aSDoug Anderson static struct syscore_ops cpufreq_syscore_ops = { 253490de2a4aSDoug Anderson .shutdown = cpufreq_suspend, 253590de2a4aSDoug Anderson }; 253690de2a4aSDoug Anderson 2537*c82bd444SViresh Kumar struct kobject *cpufreq_global_kobject; 2538*c82bd444SViresh Kumar EXPORT_SYMBOL(cpufreq_global_kobject); 2539*c82bd444SViresh Kumar 25405a01f2e8SVenkatesh Pallipadi static int __init cpufreq_core_init(void) 25415a01f2e8SVenkatesh Pallipadi { 2542a7b422cdSKonrad Rzeszutek Wilk if (cpufreq_disabled()) 2543a7b422cdSKonrad Rzeszutek Wilk return -ENODEV; 2544a7b422cdSKonrad Rzeszutek Wilk 25458eec1020SViresh Kumar cpufreq_global_kobject = kobject_create_and_add("cpufreq", &cpu_subsys.dev_root->kobj); 25468aa84ad8SThomas Renninger BUG_ON(!cpufreq_global_kobject); 25478aa84ad8SThomas Renninger 254890de2a4aSDoug Anderson register_syscore_ops(&cpufreq_syscore_ops); 254990de2a4aSDoug Anderson 25505a01f2e8SVenkatesh Pallipadi return 0; 25515a01f2e8SVenkatesh Pallipadi } 25525a01f2e8SVenkatesh Pallipadi core_initcall(cpufreq_core_init); 2553