11da177e4SLinus Torvalds /* 21da177e4SLinus Torvalds * linux/drivers/cpufreq/cpufreq.c 31da177e4SLinus Torvalds * 41da177e4SLinus Torvalds * Copyright (C) 2001 Russell King 51da177e4SLinus Torvalds * (C) 2002 - 2003 Dominik Brodowski <linux@brodo.de> 6bb176f7dSViresh Kumar * (C) 2013 Viresh Kumar <viresh.kumar@linaro.org> 71da177e4SLinus Torvalds * 8c32b6b8eSAshok Raj * Oct 2005 - Ashok Raj <ashok.raj@intel.com> 9c32b6b8eSAshok Raj * Added handling for CPU hotplug 108ff69732SDave Jones * Feb 2006 - Jacob Shin <jacob.shin@amd.com> 118ff69732SDave Jones * Fix handling for CPU hotplug -- affected CPUs 12c32b6b8eSAshok Raj * 131da177e4SLinus Torvalds * This program is free software; you can redistribute it and/or modify 141da177e4SLinus Torvalds * it under the terms of the GNU General Public License version 2 as 151da177e4SLinus Torvalds * published by the Free Software Foundation. 161da177e4SLinus Torvalds */ 171da177e4SLinus Torvalds 18db701151SViresh Kumar #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 19db701151SViresh Kumar 205ff0a268SViresh Kumar #include <linux/cpu.h> 211da177e4SLinus Torvalds #include <linux/cpufreq.h> 221da177e4SLinus Torvalds #include <linux/delay.h> 231da177e4SLinus Torvalds #include <linux/device.h> 245ff0a268SViresh Kumar #include <linux/init.h> 255ff0a268SViresh Kumar #include <linux/kernel_stat.h> 265ff0a268SViresh Kumar #include <linux/module.h> 273fc54d37Sakpm@osdl.org #include <linux/mutex.h> 285ff0a268SViresh Kumar #include <linux/slab.h> 292f0aea93SViresh Kumar #include <linux/suspend.h> 3090de2a4aSDoug Anderson #include <linux/syscore_ops.h> 315ff0a268SViresh Kumar #include <linux/tick.h> 326f4f2723SThomas Renninger #include <trace/events/power.h> 336f4f2723SThomas Renninger 34b4f0676fSViresh Kumar static LIST_HEAD(cpufreq_policy_list); 35f963735aSViresh Kumar 36f963735aSViresh Kumar static inline bool policy_is_inactive(struct cpufreq_policy *policy) 37f963735aSViresh Kumar { 38f963735aSViresh Kumar return cpumask_empty(policy->cpus); 39f963735aSViresh Kumar } 40f963735aSViresh Kumar 41f963735aSViresh Kumar static bool suitable_policy(struct cpufreq_policy *policy, bool active) 42f963735aSViresh Kumar { 43f963735aSViresh Kumar return active == !policy_is_inactive(policy); 44f963735aSViresh Kumar } 45f963735aSViresh Kumar 46f963735aSViresh Kumar /* Finds Next Acive/Inactive policy */ 47f963735aSViresh Kumar static struct cpufreq_policy *next_policy(struct cpufreq_policy *policy, 48f963735aSViresh Kumar bool active) 49f963735aSViresh Kumar { 50f963735aSViresh Kumar do { 51f963735aSViresh Kumar policy = list_next_entry(policy, policy_list); 52f963735aSViresh Kumar 53f963735aSViresh Kumar /* No more policies in the list */ 54f963735aSViresh Kumar if (&policy->policy_list == &cpufreq_policy_list) 55f963735aSViresh Kumar return NULL; 56f963735aSViresh Kumar } while (!suitable_policy(policy, active)); 57f963735aSViresh Kumar 58f963735aSViresh Kumar return policy; 59f963735aSViresh Kumar } 60f963735aSViresh Kumar 61f963735aSViresh Kumar static struct cpufreq_policy *first_policy(bool active) 62f963735aSViresh Kumar { 63f963735aSViresh Kumar struct cpufreq_policy *policy; 64f963735aSViresh Kumar 65f963735aSViresh Kumar /* No policies in the list */ 66f963735aSViresh Kumar if (list_empty(&cpufreq_policy_list)) 67f963735aSViresh Kumar return NULL; 68f963735aSViresh Kumar 69f963735aSViresh Kumar policy = list_first_entry(&cpufreq_policy_list, typeof(*policy), 70f963735aSViresh Kumar policy_list); 71f963735aSViresh Kumar 72f963735aSViresh Kumar if (!suitable_policy(policy, active)) 73f963735aSViresh Kumar policy = next_policy(policy, active); 74f963735aSViresh Kumar 75f963735aSViresh Kumar return policy; 76f963735aSViresh Kumar } 77f963735aSViresh Kumar 78f963735aSViresh Kumar /* Macros to iterate over CPU policies */ 79f963735aSViresh Kumar #define for_each_suitable_policy(__policy, __active) \ 80f963735aSViresh Kumar for (__policy = first_policy(__active); \ 81f963735aSViresh Kumar __policy; \ 82f963735aSViresh Kumar __policy = next_policy(__policy, __active)) 83f963735aSViresh Kumar 84f963735aSViresh Kumar #define for_each_active_policy(__policy) \ 85f963735aSViresh Kumar for_each_suitable_policy(__policy, true) 86f963735aSViresh Kumar #define for_each_inactive_policy(__policy) \ 87f963735aSViresh Kumar for_each_suitable_policy(__policy, false) 88f963735aSViresh Kumar 89b4f0676fSViresh Kumar #define for_each_policy(__policy) \ 90b4f0676fSViresh Kumar list_for_each_entry(__policy, &cpufreq_policy_list, policy_list) 91b4f0676fSViresh Kumar 92f7b27061SViresh Kumar /* Iterate over governors */ 93f7b27061SViresh Kumar static LIST_HEAD(cpufreq_governor_list); 94f7b27061SViresh Kumar #define for_each_governor(__governor) \ 95f7b27061SViresh Kumar list_for_each_entry(__governor, &cpufreq_governor_list, governor_list) 96f7b27061SViresh Kumar 971da177e4SLinus Torvalds /** 98cd878479SDave Jones * The "cpufreq driver" - the arch- or hardware-dependent low 991da177e4SLinus Torvalds * level driver of CPUFreq support, and its spinlock. This lock 1001da177e4SLinus Torvalds * also protects the cpufreq_cpu_data array. 1011da177e4SLinus Torvalds */ 1021c3d85ddSRafael J. Wysocki static struct cpufreq_driver *cpufreq_driver; 1037a6aedfaSMike Travis static DEFINE_PER_CPU(struct cpufreq_policy *, cpufreq_cpu_data); 104bb176f7dSViresh Kumar static DEFINE_RWLOCK(cpufreq_driver_lock); 1056f1e4efdSJane Li DEFINE_MUTEX(cpufreq_governor_lock); 106bb176f7dSViresh Kumar 1072f0aea93SViresh Kumar /* Flag to suspend/resume CPUFreq governors */ 1082f0aea93SViresh Kumar static bool cpufreq_suspended; 1091da177e4SLinus Torvalds 1109c0ebcf7SViresh Kumar static inline bool has_target(void) 1119c0ebcf7SViresh Kumar { 1129c0ebcf7SViresh Kumar return cpufreq_driver->target_index || cpufreq_driver->target; 1139c0ebcf7SViresh Kumar } 1149c0ebcf7SViresh Kumar 1151da177e4SLinus Torvalds /* internal prototypes */ 11629464f28SDave Jones static int __cpufreq_governor(struct cpufreq_policy *policy, 11729464f28SDave Jones unsigned int event); 118d92d50a4SViresh Kumar static unsigned int __cpufreq_get(struct cpufreq_policy *policy); 11965f27f38SDavid Howells static void handle_update(struct work_struct *work); 1201da177e4SLinus Torvalds 1211da177e4SLinus Torvalds /** 1221da177e4SLinus Torvalds * Two notifier lists: the "policy" list is involved in the 1231da177e4SLinus Torvalds * validation process for a new CPU frequency policy; the 1241da177e4SLinus Torvalds * "transition" list for kernel code that needs to handle 1251da177e4SLinus Torvalds * changes to devices when the CPU clock speed changes. 1261da177e4SLinus Torvalds * The mutex locks both lists. 1271da177e4SLinus Torvalds */ 128e041c683SAlan Stern static BLOCKING_NOTIFIER_HEAD(cpufreq_policy_notifier_list); 129b4dfdbb3SAlan Stern static struct srcu_notifier_head cpufreq_transition_notifier_list; 1301da177e4SLinus Torvalds 13174212ca4SCesar Eduardo Barros static bool init_cpufreq_transition_notifier_list_called; 132b4dfdbb3SAlan Stern static int __init init_cpufreq_transition_notifier_list(void) 133b4dfdbb3SAlan Stern { 134b4dfdbb3SAlan Stern srcu_init_notifier_head(&cpufreq_transition_notifier_list); 13574212ca4SCesar Eduardo Barros init_cpufreq_transition_notifier_list_called = true; 136b4dfdbb3SAlan Stern return 0; 137b4dfdbb3SAlan Stern } 138b3438f82SLinus Torvalds pure_initcall(init_cpufreq_transition_notifier_list); 1391da177e4SLinus Torvalds 140a7b422cdSKonrad Rzeszutek Wilk static int off __read_mostly; 141da584455SViresh Kumar static int cpufreq_disabled(void) 142a7b422cdSKonrad Rzeszutek Wilk { 143a7b422cdSKonrad Rzeszutek Wilk return off; 144a7b422cdSKonrad Rzeszutek Wilk } 145a7b422cdSKonrad Rzeszutek Wilk void disable_cpufreq(void) 146a7b422cdSKonrad Rzeszutek Wilk { 147a7b422cdSKonrad Rzeszutek Wilk off = 1; 148a7b422cdSKonrad Rzeszutek Wilk } 1493fc54d37Sakpm@osdl.org static DEFINE_MUTEX(cpufreq_governor_mutex); 1501da177e4SLinus Torvalds 1514d5dcc42SViresh Kumar bool have_governor_per_policy(void) 1524d5dcc42SViresh Kumar { 1530b981e70SViresh Kumar return !!(cpufreq_driver->flags & CPUFREQ_HAVE_GOVERNOR_PER_POLICY); 1544d5dcc42SViresh Kumar } 1553f869d6dSViresh Kumar EXPORT_SYMBOL_GPL(have_governor_per_policy); 1564d5dcc42SViresh Kumar 157944e9a03SViresh Kumar struct kobject *get_governor_parent_kobj(struct cpufreq_policy *policy) 158944e9a03SViresh Kumar { 159944e9a03SViresh Kumar if (have_governor_per_policy()) 160944e9a03SViresh Kumar return &policy->kobj; 161944e9a03SViresh Kumar else 162944e9a03SViresh Kumar return cpufreq_global_kobject; 163944e9a03SViresh Kumar } 164944e9a03SViresh Kumar EXPORT_SYMBOL_GPL(get_governor_parent_kobj); 165944e9a03SViresh Kumar 1665a31d594SViresh Kumar struct cpufreq_frequency_table *cpufreq_frequency_get_table(unsigned int cpu) 1675a31d594SViresh Kumar { 1685a31d594SViresh Kumar struct cpufreq_policy *policy = per_cpu(cpufreq_cpu_data, cpu); 1695a31d594SViresh Kumar 1705a31d594SViresh Kumar return policy && !policy_is_inactive(policy) ? 1715a31d594SViresh Kumar policy->freq_table : NULL; 1725a31d594SViresh Kumar } 1735a31d594SViresh Kumar EXPORT_SYMBOL_GPL(cpufreq_frequency_get_table); 1745a31d594SViresh Kumar 17572a4ce34SViresh Kumar static inline u64 get_cpu_idle_time_jiffy(unsigned int cpu, u64 *wall) 17672a4ce34SViresh Kumar { 17772a4ce34SViresh Kumar u64 idle_time; 17872a4ce34SViresh Kumar u64 cur_wall_time; 17972a4ce34SViresh Kumar u64 busy_time; 18072a4ce34SViresh Kumar 18172a4ce34SViresh Kumar cur_wall_time = jiffies64_to_cputime64(get_jiffies_64()); 18272a4ce34SViresh Kumar 18372a4ce34SViresh Kumar busy_time = kcpustat_cpu(cpu).cpustat[CPUTIME_USER]; 18472a4ce34SViresh Kumar busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_SYSTEM]; 18572a4ce34SViresh Kumar busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_IRQ]; 18672a4ce34SViresh Kumar busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_SOFTIRQ]; 18772a4ce34SViresh Kumar busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_STEAL]; 18872a4ce34SViresh Kumar busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_NICE]; 18972a4ce34SViresh Kumar 19072a4ce34SViresh Kumar idle_time = cur_wall_time - busy_time; 19172a4ce34SViresh Kumar if (wall) 19272a4ce34SViresh Kumar *wall = cputime_to_usecs(cur_wall_time); 19372a4ce34SViresh Kumar 19472a4ce34SViresh Kumar return cputime_to_usecs(idle_time); 19572a4ce34SViresh Kumar } 19672a4ce34SViresh Kumar 19772a4ce34SViresh Kumar u64 get_cpu_idle_time(unsigned int cpu, u64 *wall, int io_busy) 19872a4ce34SViresh Kumar { 19972a4ce34SViresh Kumar u64 idle_time = get_cpu_idle_time_us(cpu, io_busy ? wall : NULL); 20072a4ce34SViresh Kumar 20172a4ce34SViresh Kumar if (idle_time == -1ULL) 20272a4ce34SViresh Kumar return get_cpu_idle_time_jiffy(cpu, wall); 20372a4ce34SViresh Kumar else if (!io_busy) 20472a4ce34SViresh Kumar idle_time += get_cpu_iowait_time_us(cpu, wall); 20572a4ce34SViresh Kumar 20672a4ce34SViresh Kumar return idle_time; 20772a4ce34SViresh Kumar } 20872a4ce34SViresh Kumar EXPORT_SYMBOL_GPL(get_cpu_idle_time); 20972a4ce34SViresh Kumar 21070e9e778SViresh Kumar /* 21170e9e778SViresh Kumar * This is a generic cpufreq init() routine which can be used by cpufreq 21270e9e778SViresh Kumar * drivers of SMP systems. It will do following: 21370e9e778SViresh Kumar * - validate & show freq table passed 21470e9e778SViresh Kumar * - set policies transition latency 21570e9e778SViresh Kumar * - policy->cpus with all possible CPUs 21670e9e778SViresh Kumar */ 21770e9e778SViresh Kumar int cpufreq_generic_init(struct cpufreq_policy *policy, 21870e9e778SViresh Kumar struct cpufreq_frequency_table *table, 21970e9e778SViresh Kumar unsigned int transition_latency) 22070e9e778SViresh Kumar { 22170e9e778SViresh Kumar int ret; 22270e9e778SViresh Kumar 22370e9e778SViresh Kumar ret = cpufreq_table_validate_and_show(policy, table); 22470e9e778SViresh Kumar if (ret) { 22570e9e778SViresh Kumar pr_err("%s: invalid frequency table: %d\n", __func__, ret); 22670e9e778SViresh Kumar return ret; 22770e9e778SViresh Kumar } 22870e9e778SViresh Kumar 22970e9e778SViresh Kumar policy->cpuinfo.transition_latency = transition_latency; 23070e9e778SViresh Kumar 23170e9e778SViresh Kumar /* 23258405af6SShailendra Verma * The driver only supports the SMP configuration where all processors 23370e9e778SViresh Kumar * share the clock and voltage and clock. 23470e9e778SViresh Kumar */ 23570e9e778SViresh Kumar cpumask_setall(policy->cpus); 23670e9e778SViresh Kumar 23770e9e778SViresh Kumar return 0; 23870e9e778SViresh Kumar } 23970e9e778SViresh Kumar EXPORT_SYMBOL_GPL(cpufreq_generic_init); 24070e9e778SViresh Kumar 241988bed09SViresh Kumar /* Only for cpufreq core internal use */ 242988bed09SViresh Kumar struct cpufreq_policy *cpufreq_cpu_get_raw(unsigned int cpu) 243652ed95dSViresh Kumar { 244652ed95dSViresh Kumar struct cpufreq_policy *policy = per_cpu(cpufreq_cpu_data, cpu); 245652ed95dSViresh Kumar 246988bed09SViresh Kumar return policy && cpumask_test_cpu(cpu, policy->cpus) ? policy : NULL; 247988bed09SViresh Kumar } 248988bed09SViresh Kumar 249988bed09SViresh Kumar unsigned int cpufreq_generic_get(unsigned int cpu) 250988bed09SViresh Kumar { 251988bed09SViresh Kumar struct cpufreq_policy *policy = cpufreq_cpu_get_raw(cpu); 252988bed09SViresh Kumar 253652ed95dSViresh Kumar if (!policy || IS_ERR(policy->clk)) { 254e837f9b5SJoe Perches pr_err("%s: No %s associated to cpu: %d\n", 255e837f9b5SJoe Perches __func__, policy ? "clk" : "policy", cpu); 256652ed95dSViresh Kumar return 0; 257652ed95dSViresh Kumar } 258652ed95dSViresh Kumar 259652ed95dSViresh Kumar return clk_get_rate(policy->clk) / 1000; 260652ed95dSViresh Kumar } 261652ed95dSViresh Kumar EXPORT_SYMBOL_GPL(cpufreq_generic_get); 262652ed95dSViresh Kumar 26350e9c852SViresh Kumar /** 26450e9c852SViresh Kumar * cpufreq_cpu_get: returns policy for a cpu and marks it busy. 26550e9c852SViresh Kumar * 26650e9c852SViresh Kumar * @cpu: cpu to find policy for. 26750e9c852SViresh Kumar * 26850e9c852SViresh Kumar * This returns policy for 'cpu', returns NULL if it doesn't exist. 26950e9c852SViresh Kumar * It also increments the kobject reference count to mark it busy and so would 27050e9c852SViresh Kumar * require a corresponding call to cpufreq_cpu_put() to decrement it back. 27150e9c852SViresh Kumar * If corresponding call cpufreq_cpu_put() isn't made, the policy wouldn't be 27250e9c852SViresh Kumar * freed as that depends on the kobj count. 27350e9c852SViresh Kumar * 27450e9c852SViresh Kumar * Return: A valid policy on success, otherwise NULL on failure. 27550e9c852SViresh Kumar */ 2766eed9404SViresh Kumar struct cpufreq_policy *cpufreq_cpu_get(unsigned int cpu) 2771da177e4SLinus Torvalds { 2786eed9404SViresh Kumar struct cpufreq_policy *policy = NULL; 2791da177e4SLinus Torvalds unsigned long flags; 2801da177e4SLinus Torvalds 2811b947c90SViresh Kumar if (WARN_ON(cpu >= nr_cpu_ids)) 2826eed9404SViresh Kumar return NULL; 2836eed9404SViresh Kumar 2841da177e4SLinus Torvalds /* get the cpufreq driver */ 2850d1857a1SNathan Zimmer read_lock_irqsave(&cpufreq_driver_lock, flags); 2861da177e4SLinus Torvalds 2876eed9404SViresh Kumar if (cpufreq_driver) { 2881da177e4SLinus Torvalds /* get the CPU */ 289988bed09SViresh Kumar policy = cpufreq_cpu_get_raw(cpu); 2906eed9404SViresh Kumar if (policy) 2916eed9404SViresh Kumar kobject_get(&policy->kobj); 2926eed9404SViresh Kumar } 2936eed9404SViresh Kumar 2946eed9404SViresh Kumar read_unlock_irqrestore(&cpufreq_driver_lock, flags); 2951da177e4SLinus Torvalds 2963a3e9e06SViresh Kumar return policy; 297a9144436SStephen Boyd } 2981da177e4SLinus Torvalds EXPORT_SYMBOL_GPL(cpufreq_cpu_get); 2991da177e4SLinus Torvalds 30050e9c852SViresh Kumar /** 30150e9c852SViresh Kumar * cpufreq_cpu_put: Decrements the usage count of a policy 30250e9c852SViresh Kumar * 30350e9c852SViresh Kumar * @policy: policy earlier returned by cpufreq_cpu_get(). 30450e9c852SViresh Kumar * 30550e9c852SViresh Kumar * This decrements the kobject reference count incremented earlier by calling 30650e9c852SViresh Kumar * cpufreq_cpu_get(). 30750e9c852SViresh Kumar */ 3083a3e9e06SViresh Kumar void cpufreq_cpu_put(struct cpufreq_policy *policy) 309a9144436SStephen Boyd { 3106eed9404SViresh Kumar kobject_put(&policy->kobj); 311a9144436SStephen Boyd } 3121da177e4SLinus Torvalds EXPORT_SYMBOL_GPL(cpufreq_cpu_put); 3131da177e4SLinus Torvalds 3141da177e4SLinus Torvalds /********************************************************************* 3151da177e4SLinus Torvalds * EXTERNALLY AFFECTING FREQUENCY CHANGES * 3161da177e4SLinus Torvalds *********************************************************************/ 3171da177e4SLinus Torvalds 3181da177e4SLinus Torvalds /** 3191da177e4SLinus Torvalds * adjust_jiffies - adjust the system "loops_per_jiffy" 3201da177e4SLinus Torvalds * 3211da177e4SLinus Torvalds * This function alters the system "loops_per_jiffy" for the clock 3221da177e4SLinus Torvalds * speed change. Note that loops_per_jiffy cannot be updated on SMP 3231da177e4SLinus Torvalds * systems as each CPU might be scaled differently. So, use the arch 3241da177e4SLinus Torvalds * per-CPU loops_per_jiffy value wherever possible. 3251da177e4SLinus Torvalds */ 32639c132eeSViresh Kumar static void adjust_jiffies(unsigned long val, struct cpufreq_freqs *ci) 32739c132eeSViresh Kumar { 3281da177e4SLinus Torvalds #ifndef CONFIG_SMP 3291da177e4SLinus Torvalds static unsigned long l_p_j_ref; 3301da177e4SLinus Torvalds static unsigned int l_p_j_ref_freq; 3311da177e4SLinus Torvalds 3321da177e4SLinus Torvalds if (ci->flags & CPUFREQ_CONST_LOOPS) 3331da177e4SLinus Torvalds return; 3341da177e4SLinus Torvalds 3351da177e4SLinus Torvalds if (!l_p_j_ref_freq) { 3361da177e4SLinus Torvalds l_p_j_ref = loops_per_jiffy; 3371da177e4SLinus Torvalds l_p_j_ref_freq = ci->old; 338e837f9b5SJoe Perches pr_debug("saving %lu as reference value for loops_per_jiffy; freq is %u kHz\n", 339e837f9b5SJoe Perches l_p_j_ref, l_p_j_ref_freq); 3401da177e4SLinus Torvalds } 3410b443eadSViresh Kumar if (val == CPUFREQ_POSTCHANGE && ci->old != ci->new) { 342e08f5f5bSGautham R Shenoy loops_per_jiffy = cpufreq_scale(l_p_j_ref, l_p_j_ref_freq, 343e08f5f5bSGautham R Shenoy ci->new); 344e837f9b5SJoe Perches pr_debug("scaling loops_per_jiffy to %lu for frequency %u kHz\n", 345e837f9b5SJoe Perches loops_per_jiffy, ci->new); 3461da177e4SLinus Torvalds } 3471da177e4SLinus Torvalds #endif 34839c132eeSViresh Kumar } 3491da177e4SLinus Torvalds 3500956df9cSViresh Kumar static void __cpufreq_notify_transition(struct cpufreq_policy *policy, 351b43a7ffbSViresh Kumar struct cpufreq_freqs *freqs, unsigned int state) 3521da177e4SLinus Torvalds { 3531da177e4SLinus Torvalds BUG_ON(irqs_disabled()); 3541da177e4SLinus Torvalds 355d5aaffa9SDirk Brandewie if (cpufreq_disabled()) 356d5aaffa9SDirk Brandewie return; 357d5aaffa9SDirk Brandewie 3581c3d85ddSRafael J. Wysocki freqs->flags = cpufreq_driver->flags; 3592d06d8c4SDominik Brodowski pr_debug("notification %u of frequency transition to %u kHz\n", 360e4472cb3SDave Jones state, freqs->new); 3611da177e4SLinus Torvalds 3621da177e4SLinus Torvalds switch (state) { 363e4472cb3SDave Jones 3641da177e4SLinus Torvalds case CPUFREQ_PRECHANGE: 365e4472cb3SDave Jones /* detect if the driver reported a value as "old frequency" 366e4472cb3SDave Jones * which is not equal to what the cpufreq core thinks is 367e4472cb3SDave Jones * "old frequency". 3681da177e4SLinus Torvalds */ 3691c3d85ddSRafael J. Wysocki if (!(cpufreq_driver->flags & CPUFREQ_CONST_LOOPS)) { 370e4472cb3SDave Jones if ((policy) && (policy->cpu == freqs->cpu) && 371e4472cb3SDave Jones (policy->cur) && (policy->cur != freqs->old)) { 372e837f9b5SJoe Perches pr_debug("Warning: CPU frequency is %u, cpufreq assumed %u kHz\n", 373e4472cb3SDave Jones freqs->old, policy->cur); 374e4472cb3SDave Jones freqs->old = policy->cur; 3751da177e4SLinus Torvalds } 3761da177e4SLinus Torvalds } 377b4dfdbb3SAlan Stern srcu_notifier_call_chain(&cpufreq_transition_notifier_list, 378e4472cb3SDave Jones CPUFREQ_PRECHANGE, freqs); 3791da177e4SLinus Torvalds adjust_jiffies(CPUFREQ_PRECHANGE, freqs); 3801da177e4SLinus Torvalds break; 381e4472cb3SDave Jones 3821da177e4SLinus Torvalds case CPUFREQ_POSTCHANGE: 3831da177e4SLinus Torvalds adjust_jiffies(CPUFREQ_POSTCHANGE, freqs); 384e837f9b5SJoe Perches pr_debug("FREQ: %lu - CPU: %lu\n", 385e837f9b5SJoe Perches (unsigned long)freqs->new, (unsigned long)freqs->cpu); 38625e41933SThomas Renninger trace_cpu_frequency(freqs->new, freqs->cpu); 387b4dfdbb3SAlan Stern srcu_notifier_call_chain(&cpufreq_transition_notifier_list, 388e4472cb3SDave Jones CPUFREQ_POSTCHANGE, freqs); 389e4472cb3SDave Jones if (likely(policy) && likely(policy->cpu == freqs->cpu)) 390e4472cb3SDave Jones policy->cur = freqs->new; 3911da177e4SLinus Torvalds break; 3921da177e4SLinus Torvalds } 3931da177e4SLinus Torvalds } 394bb176f7dSViresh Kumar 395b43a7ffbSViresh Kumar /** 396b43a7ffbSViresh Kumar * cpufreq_notify_transition - call notifier chain and adjust_jiffies 397b43a7ffbSViresh Kumar * on frequency transition. 398b43a7ffbSViresh Kumar * 399b43a7ffbSViresh Kumar * This function calls the transition notifiers and the "adjust_jiffies" 400b43a7ffbSViresh Kumar * function. It is called twice on all CPU frequency changes that have 401b43a7ffbSViresh Kumar * external effects. 402b43a7ffbSViresh Kumar */ 403236a9800SViresh Kumar static void cpufreq_notify_transition(struct cpufreq_policy *policy, 404b43a7ffbSViresh Kumar struct cpufreq_freqs *freqs, unsigned int state) 405b43a7ffbSViresh Kumar { 406b43a7ffbSViresh Kumar for_each_cpu(freqs->cpu, policy->cpus) 407b43a7ffbSViresh Kumar __cpufreq_notify_transition(policy, freqs, state); 408b43a7ffbSViresh Kumar } 4091da177e4SLinus Torvalds 410f7ba3b41SViresh Kumar /* Do post notifications when there are chances that transition has failed */ 411236a9800SViresh Kumar static void cpufreq_notify_post_transition(struct cpufreq_policy *policy, 412f7ba3b41SViresh Kumar struct cpufreq_freqs *freqs, int transition_failed) 413f7ba3b41SViresh Kumar { 414f7ba3b41SViresh Kumar cpufreq_notify_transition(policy, freqs, CPUFREQ_POSTCHANGE); 415f7ba3b41SViresh Kumar if (!transition_failed) 416f7ba3b41SViresh Kumar return; 417f7ba3b41SViresh Kumar 418f7ba3b41SViresh Kumar swap(freqs->old, freqs->new); 419f7ba3b41SViresh Kumar cpufreq_notify_transition(policy, freqs, CPUFREQ_PRECHANGE); 420f7ba3b41SViresh Kumar cpufreq_notify_transition(policy, freqs, CPUFREQ_POSTCHANGE); 421f7ba3b41SViresh Kumar } 422f7ba3b41SViresh Kumar 42312478cf0SSrivatsa S. Bhat void cpufreq_freq_transition_begin(struct cpufreq_policy *policy, 42412478cf0SSrivatsa S. Bhat struct cpufreq_freqs *freqs) 42512478cf0SSrivatsa S. Bhat { 426ca654dc3SSrivatsa S. Bhat 427ca654dc3SSrivatsa S. Bhat /* 428ca654dc3SSrivatsa S. Bhat * Catch double invocations of _begin() which lead to self-deadlock. 429ca654dc3SSrivatsa S. Bhat * ASYNC_NOTIFICATION drivers are left out because the cpufreq core 430ca654dc3SSrivatsa S. Bhat * doesn't invoke _begin() on their behalf, and hence the chances of 431ca654dc3SSrivatsa S. Bhat * double invocations are very low. Moreover, there are scenarios 432ca654dc3SSrivatsa S. Bhat * where these checks can emit false-positive warnings in these 433ca654dc3SSrivatsa S. Bhat * drivers; so we avoid that by skipping them altogether. 434ca654dc3SSrivatsa S. Bhat */ 435ca654dc3SSrivatsa S. Bhat WARN_ON(!(cpufreq_driver->flags & CPUFREQ_ASYNC_NOTIFICATION) 436ca654dc3SSrivatsa S. Bhat && current == policy->transition_task); 437ca654dc3SSrivatsa S. Bhat 43812478cf0SSrivatsa S. Bhat wait: 43912478cf0SSrivatsa S. Bhat wait_event(policy->transition_wait, !policy->transition_ongoing); 44012478cf0SSrivatsa S. Bhat 44112478cf0SSrivatsa S. Bhat spin_lock(&policy->transition_lock); 44212478cf0SSrivatsa S. Bhat 44312478cf0SSrivatsa S. Bhat if (unlikely(policy->transition_ongoing)) { 44412478cf0SSrivatsa S. Bhat spin_unlock(&policy->transition_lock); 44512478cf0SSrivatsa S. Bhat goto wait; 44612478cf0SSrivatsa S. Bhat } 44712478cf0SSrivatsa S. Bhat 44812478cf0SSrivatsa S. Bhat policy->transition_ongoing = true; 449ca654dc3SSrivatsa S. Bhat policy->transition_task = current; 45012478cf0SSrivatsa S. Bhat 45112478cf0SSrivatsa S. Bhat spin_unlock(&policy->transition_lock); 45212478cf0SSrivatsa S. Bhat 45312478cf0SSrivatsa S. Bhat cpufreq_notify_transition(policy, freqs, CPUFREQ_PRECHANGE); 45412478cf0SSrivatsa S. Bhat } 45512478cf0SSrivatsa S. Bhat EXPORT_SYMBOL_GPL(cpufreq_freq_transition_begin); 45612478cf0SSrivatsa S. Bhat 45712478cf0SSrivatsa S. Bhat void cpufreq_freq_transition_end(struct cpufreq_policy *policy, 45812478cf0SSrivatsa S. Bhat struct cpufreq_freqs *freqs, int transition_failed) 45912478cf0SSrivatsa S. Bhat { 46012478cf0SSrivatsa S. Bhat if (unlikely(WARN_ON(!policy->transition_ongoing))) 46112478cf0SSrivatsa S. Bhat return; 46212478cf0SSrivatsa S. Bhat 46312478cf0SSrivatsa S. Bhat cpufreq_notify_post_transition(policy, freqs, transition_failed); 46412478cf0SSrivatsa S. Bhat 46512478cf0SSrivatsa S. Bhat policy->transition_ongoing = false; 466ca654dc3SSrivatsa S. Bhat policy->transition_task = NULL; 46712478cf0SSrivatsa S. Bhat 46812478cf0SSrivatsa S. Bhat wake_up(&policy->transition_wait); 46912478cf0SSrivatsa S. Bhat } 47012478cf0SSrivatsa S. Bhat EXPORT_SYMBOL_GPL(cpufreq_freq_transition_end); 47112478cf0SSrivatsa S. Bhat 4721da177e4SLinus Torvalds 4731da177e4SLinus Torvalds /********************************************************************* 4741da177e4SLinus Torvalds * SYSFS INTERFACE * 4751da177e4SLinus Torvalds *********************************************************************/ 4768a5c74a1SRashika Kheria static ssize_t show_boost(struct kobject *kobj, 4776f19efc0SLukasz Majewski struct attribute *attr, char *buf) 4786f19efc0SLukasz Majewski { 4796f19efc0SLukasz Majewski return sprintf(buf, "%d\n", cpufreq_driver->boost_enabled); 4806f19efc0SLukasz Majewski } 4816f19efc0SLukasz Majewski 4826f19efc0SLukasz Majewski static ssize_t store_boost(struct kobject *kobj, struct attribute *attr, 4836f19efc0SLukasz Majewski const char *buf, size_t count) 4846f19efc0SLukasz Majewski { 4856f19efc0SLukasz Majewski int ret, enable; 4866f19efc0SLukasz Majewski 4876f19efc0SLukasz Majewski ret = sscanf(buf, "%d", &enable); 4886f19efc0SLukasz Majewski if (ret != 1 || enable < 0 || enable > 1) 4896f19efc0SLukasz Majewski return -EINVAL; 4906f19efc0SLukasz Majewski 4916f19efc0SLukasz Majewski if (cpufreq_boost_trigger_state(enable)) { 492e837f9b5SJoe Perches pr_err("%s: Cannot %s BOOST!\n", 493e837f9b5SJoe Perches __func__, enable ? "enable" : "disable"); 4946f19efc0SLukasz Majewski return -EINVAL; 4956f19efc0SLukasz Majewski } 4966f19efc0SLukasz Majewski 497e837f9b5SJoe Perches pr_debug("%s: cpufreq BOOST %s\n", 498e837f9b5SJoe Perches __func__, enable ? "enabled" : "disabled"); 4996f19efc0SLukasz Majewski 5006f19efc0SLukasz Majewski return count; 5016f19efc0SLukasz Majewski } 5026f19efc0SLukasz Majewski define_one_global_rw(boost); 5031da177e4SLinus Torvalds 50442f91fa1SViresh Kumar static struct cpufreq_governor *find_governor(const char *str_governor) 5053bcb09a3SJeremy Fitzhardinge { 5063bcb09a3SJeremy Fitzhardinge struct cpufreq_governor *t; 5073bcb09a3SJeremy Fitzhardinge 508f7b27061SViresh Kumar for_each_governor(t) 5097c4f4539SRasmus Villemoes if (!strncasecmp(str_governor, t->name, CPUFREQ_NAME_LEN)) 5103bcb09a3SJeremy Fitzhardinge return t; 5113bcb09a3SJeremy Fitzhardinge 5123bcb09a3SJeremy Fitzhardinge return NULL; 5133bcb09a3SJeremy Fitzhardinge } 5143bcb09a3SJeremy Fitzhardinge 5151da177e4SLinus Torvalds /** 5161da177e4SLinus Torvalds * cpufreq_parse_governor - parse a governor string 5171da177e4SLinus Torvalds */ 5181da177e4SLinus Torvalds static int cpufreq_parse_governor(char *str_governor, unsigned int *policy, 5191da177e4SLinus Torvalds struct cpufreq_governor **governor) 5201da177e4SLinus Torvalds { 5213bcb09a3SJeremy Fitzhardinge int err = -EINVAL; 5223bcb09a3SJeremy Fitzhardinge 5231c3d85ddSRafael J. Wysocki if (!cpufreq_driver) 5243bcb09a3SJeremy Fitzhardinge goto out; 5253bcb09a3SJeremy Fitzhardinge 5261c3d85ddSRafael J. Wysocki if (cpufreq_driver->setpolicy) { 5277c4f4539SRasmus Villemoes if (!strncasecmp(str_governor, "performance", CPUFREQ_NAME_LEN)) { 5281da177e4SLinus Torvalds *policy = CPUFREQ_POLICY_PERFORMANCE; 5293bcb09a3SJeremy Fitzhardinge err = 0; 5307c4f4539SRasmus Villemoes } else if (!strncasecmp(str_governor, "powersave", 531e08f5f5bSGautham R Shenoy CPUFREQ_NAME_LEN)) { 5321da177e4SLinus Torvalds *policy = CPUFREQ_POLICY_POWERSAVE; 5333bcb09a3SJeremy Fitzhardinge err = 0; 5341da177e4SLinus Torvalds } 5352e1cc3a5SViresh Kumar } else { 5361da177e4SLinus Torvalds struct cpufreq_governor *t; 5373bcb09a3SJeremy Fitzhardinge 5383fc54d37Sakpm@osdl.org mutex_lock(&cpufreq_governor_mutex); 5393bcb09a3SJeremy Fitzhardinge 54042f91fa1SViresh Kumar t = find_governor(str_governor); 5413bcb09a3SJeremy Fitzhardinge 542ea714970SJeremy Fitzhardinge if (t == NULL) { 543ea714970SJeremy Fitzhardinge int ret; 544ea714970SJeremy Fitzhardinge 545ea714970SJeremy Fitzhardinge mutex_unlock(&cpufreq_governor_mutex); 5461a8e1463SKees Cook ret = request_module("cpufreq_%s", str_governor); 547ea714970SJeremy Fitzhardinge mutex_lock(&cpufreq_governor_mutex); 548ea714970SJeremy Fitzhardinge 549ea714970SJeremy Fitzhardinge if (ret == 0) 55042f91fa1SViresh Kumar t = find_governor(str_governor); 551ea714970SJeremy Fitzhardinge } 552ea714970SJeremy Fitzhardinge 5533bcb09a3SJeremy Fitzhardinge if (t != NULL) { 5541da177e4SLinus Torvalds *governor = t; 5553bcb09a3SJeremy Fitzhardinge err = 0; 5561da177e4SLinus Torvalds } 5573bcb09a3SJeremy Fitzhardinge 5583bcb09a3SJeremy Fitzhardinge mutex_unlock(&cpufreq_governor_mutex); 5591da177e4SLinus Torvalds } 5601da177e4SLinus Torvalds out: 5613bcb09a3SJeremy Fitzhardinge return err; 5621da177e4SLinus Torvalds } 5631da177e4SLinus Torvalds 5641da177e4SLinus Torvalds /** 565e08f5f5bSGautham R Shenoy * cpufreq_per_cpu_attr_read() / show_##file_name() - 566e08f5f5bSGautham R Shenoy * print out cpufreq information 5671da177e4SLinus Torvalds * 5681da177e4SLinus Torvalds * Write out information from cpufreq_driver->policy[cpu]; object must be 5691da177e4SLinus Torvalds * "unsigned int". 5701da177e4SLinus Torvalds */ 5711da177e4SLinus Torvalds 5721da177e4SLinus Torvalds #define show_one(file_name, object) \ 5731da177e4SLinus Torvalds static ssize_t show_##file_name \ 5741da177e4SLinus Torvalds (struct cpufreq_policy *policy, char *buf) \ 5751da177e4SLinus Torvalds { \ 5761da177e4SLinus Torvalds return sprintf(buf, "%u\n", policy->object); \ 5771da177e4SLinus Torvalds } 5781da177e4SLinus Torvalds 5791da177e4SLinus Torvalds show_one(cpuinfo_min_freq, cpuinfo.min_freq); 5801da177e4SLinus Torvalds show_one(cpuinfo_max_freq, cpuinfo.max_freq); 581ed129784SThomas Renninger show_one(cpuinfo_transition_latency, cpuinfo.transition_latency); 5821da177e4SLinus Torvalds show_one(scaling_min_freq, min); 5831da177e4SLinus Torvalds show_one(scaling_max_freq, max); 584c034b02eSDirk Brandewie 58509347b29SViresh Kumar static ssize_t show_scaling_cur_freq(struct cpufreq_policy *policy, char *buf) 586c034b02eSDirk Brandewie { 587c034b02eSDirk Brandewie ssize_t ret; 588c034b02eSDirk Brandewie 589c034b02eSDirk Brandewie if (cpufreq_driver && cpufreq_driver->setpolicy && cpufreq_driver->get) 590c034b02eSDirk Brandewie ret = sprintf(buf, "%u\n", cpufreq_driver->get(policy->cpu)); 591c034b02eSDirk Brandewie else 592c034b02eSDirk Brandewie ret = sprintf(buf, "%u\n", policy->cur); 593c034b02eSDirk Brandewie return ret; 594c034b02eSDirk Brandewie } 5951da177e4SLinus Torvalds 596037ce839SViresh Kumar static int cpufreq_set_policy(struct cpufreq_policy *policy, 5973a3e9e06SViresh Kumar struct cpufreq_policy *new_policy); 5987970e08bSThomas Renninger 5991da177e4SLinus Torvalds /** 6001da177e4SLinus Torvalds * cpufreq_per_cpu_attr_write() / store_##file_name() - sysfs write access 6011da177e4SLinus Torvalds */ 6021da177e4SLinus Torvalds #define store_one(file_name, object) \ 6031da177e4SLinus Torvalds static ssize_t store_##file_name \ 6041da177e4SLinus Torvalds (struct cpufreq_policy *policy, const char *buf, size_t count) \ 6051da177e4SLinus Torvalds { \ 606619c144cSVince Hsu int ret, temp; \ 6071da177e4SLinus Torvalds struct cpufreq_policy new_policy; \ 6081da177e4SLinus Torvalds \ 6098fa5b631SViresh Kumar memcpy(&new_policy, policy, sizeof(*policy)); \ 6101da177e4SLinus Torvalds \ 6111da177e4SLinus Torvalds ret = sscanf(buf, "%u", &new_policy.object); \ 6121da177e4SLinus Torvalds if (ret != 1) \ 6131da177e4SLinus Torvalds return -EINVAL; \ 6141da177e4SLinus Torvalds \ 615619c144cSVince Hsu temp = new_policy.object; \ 616037ce839SViresh Kumar ret = cpufreq_set_policy(policy, &new_policy); \ 617619c144cSVince Hsu if (!ret) \ 618619c144cSVince Hsu policy->user_policy.object = temp; \ 6191da177e4SLinus Torvalds \ 6201da177e4SLinus Torvalds return ret ? ret : count; \ 6211da177e4SLinus Torvalds } 6221da177e4SLinus Torvalds 6231da177e4SLinus Torvalds store_one(scaling_min_freq, min); 6241da177e4SLinus Torvalds store_one(scaling_max_freq, max); 6251da177e4SLinus Torvalds 6261da177e4SLinus Torvalds /** 6271da177e4SLinus Torvalds * show_cpuinfo_cur_freq - current CPU frequency as detected by hardware 6281da177e4SLinus Torvalds */ 629e08f5f5bSGautham R Shenoy static ssize_t show_cpuinfo_cur_freq(struct cpufreq_policy *policy, 630e08f5f5bSGautham R Shenoy char *buf) 6311da177e4SLinus Torvalds { 632d92d50a4SViresh Kumar unsigned int cur_freq = __cpufreq_get(policy); 6331da177e4SLinus Torvalds if (!cur_freq) 6341da177e4SLinus Torvalds return sprintf(buf, "<unknown>"); 6351da177e4SLinus Torvalds return sprintf(buf, "%u\n", cur_freq); 6361da177e4SLinus Torvalds } 6371da177e4SLinus Torvalds 6381da177e4SLinus Torvalds /** 6391da177e4SLinus Torvalds * show_scaling_governor - show the current policy for the specified CPU 6401da177e4SLinus Torvalds */ 641905d77cdSDave Jones static ssize_t show_scaling_governor(struct cpufreq_policy *policy, char *buf) 6421da177e4SLinus Torvalds { 6431da177e4SLinus Torvalds if (policy->policy == CPUFREQ_POLICY_POWERSAVE) 6441da177e4SLinus Torvalds return sprintf(buf, "powersave\n"); 6451da177e4SLinus Torvalds else if (policy->policy == CPUFREQ_POLICY_PERFORMANCE) 6461da177e4SLinus Torvalds return sprintf(buf, "performance\n"); 6471da177e4SLinus Torvalds else if (policy->governor) 6484b972f0bSviresh kumar return scnprintf(buf, CPUFREQ_NAME_PLEN, "%s\n", 64929464f28SDave Jones policy->governor->name); 6501da177e4SLinus Torvalds return -EINVAL; 6511da177e4SLinus Torvalds } 6521da177e4SLinus Torvalds 6531da177e4SLinus Torvalds /** 6541da177e4SLinus Torvalds * store_scaling_governor - store policy for the specified CPU 6551da177e4SLinus Torvalds */ 6561da177e4SLinus Torvalds static ssize_t store_scaling_governor(struct cpufreq_policy *policy, 6571da177e4SLinus Torvalds const char *buf, size_t count) 6581da177e4SLinus Torvalds { 6595136fa56SSrivatsa S. Bhat int ret; 6601da177e4SLinus Torvalds char str_governor[16]; 6611da177e4SLinus Torvalds struct cpufreq_policy new_policy; 6621da177e4SLinus Torvalds 6638fa5b631SViresh Kumar memcpy(&new_policy, policy, sizeof(*policy)); 6641da177e4SLinus Torvalds 6651da177e4SLinus Torvalds ret = sscanf(buf, "%15s", str_governor); 6661da177e4SLinus Torvalds if (ret != 1) 6671da177e4SLinus Torvalds return -EINVAL; 6681da177e4SLinus Torvalds 669e08f5f5bSGautham R Shenoy if (cpufreq_parse_governor(str_governor, &new_policy.policy, 670e08f5f5bSGautham R Shenoy &new_policy.governor)) 6711da177e4SLinus Torvalds return -EINVAL; 6721da177e4SLinus Torvalds 673037ce839SViresh Kumar ret = cpufreq_set_policy(policy, &new_policy); 674*88dc4384SViresh Kumar return ret ? ret : count; 6751da177e4SLinus Torvalds } 6761da177e4SLinus Torvalds 6771da177e4SLinus Torvalds /** 6781da177e4SLinus Torvalds * show_scaling_driver - show the cpufreq driver currently loaded 6791da177e4SLinus Torvalds */ 6801da177e4SLinus Torvalds static ssize_t show_scaling_driver(struct cpufreq_policy *policy, char *buf) 6811da177e4SLinus Torvalds { 6821c3d85ddSRafael J. Wysocki return scnprintf(buf, CPUFREQ_NAME_PLEN, "%s\n", cpufreq_driver->name); 6831da177e4SLinus Torvalds } 6841da177e4SLinus Torvalds 6851da177e4SLinus Torvalds /** 6861da177e4SLinus Torvalds * show_scaling_available_governors - show the available CPUfreq governors 6871da177e4SLinus Torvalds */ 6881da177e4SLinus Torvalds static ssize_t show_scaling_available_governors(struct cpufreq_policy *policy, 6891da177e4SLinus Torvalds char *buf) 6901da177e4SLinus Torvalds { 6911da177e4SLinus Torvalds ssize_t i = 0; 6921da177e4SLinus Torvalds struct cpufreq_governor *t; 6931da177e4SLinus Torvalds 6949c0ebcf7SViresh Kumar if (!has_target()) { 6951da177e4SLinus Torvalds i += sprintf(buf, "performance powersave"); 6961da177e4SLinus Torvalds goto out; 6971da177e4SLinus Torvalds } 6981da177e4SLinus Torvalds 699f7b27061SViresh Kumar for_each_governor(t) { 70029464f28SDave Jones if (i >= (ssize_t) ((PAGE_SIZE / sizeof(char)) 70129464f28SDave Jones - (CPUFREQ_NAME_LEN + 2))) 7021da177e4SLinus Torvalds goto out; 7034b972f0bSviresh kumar i += scnprintf(&buf[i], CPUFREQ_NAME_PLEN, "%s ", t->name); 7041da177e4SLinus Torvalds } 7051da177e4SLinus Torvalds out: 7061da177e4SLinus Torvalds i += sprintf(&buf[i], "\n"); 7071da177e4SLinus Torvalds return i; 7081da177e4SLinus Torvalds } 709e8628dd0SDarrick J. Wong 710f4fd3797SLan Tianyu ssize_t cpufreq_show_cpus(const struct cpumask *mask, char *buf) 7111da177e4SLinus Torvalds { 7121da177e4SLinus Torvalds ssize_t i = 0; 7131da177e4SLinus Torvalds unsigned int cpu; 7141da177e4SLinus Torvalds 715835481d9SRusty Russell for_each_cpu(cpu, mask) { 7161da177e4SLinus Torvalds if (i) 7171da177e4SLinus Torvalds i += scnprintf(&buf[i], (PAGE_SIZE - i - 2), " "); 7181da177e4SLinus Torvalds i += scnprintf(&buf[i], (PAGE_SIZE - i - 2), "%u", cpu); 7191da177e4SLinus Torvalds if (i >= (PAGE_SIZE - 5)) 7201da177e4SLinus Torvalds break; 7211da177e4SLinus Torvalds } 7221da177e4SLinus Torvalds i += sprintf(&buf[i], "\n"); 7231da177e4SLinus Torvalds return i; 7241da177e4SLinus Torvalds } 725f4fd3797SLan Tianyu EXPORT_SYMBOL_GPL(cpufreq_show_cpus); 7261da177e4SLinus Torvalds 727e8628dd0SDarrick J. Wong /** 728e8628dd0SDarrick J. Wong * show_related_cpus - show the CPUs affected by each transition even if 729e8628dd0SDarrick J. Wong * hw coordination is in use 730e8628dd0SDarrick J. Wong */ 731e8628dd0SDarrick J. Wong static ssize_t show_related_cpus(struct cpufreq_policy *policy, char *buf) 732e8628dd0SDarrick J. Wong { 733f4fd3797SLan Tianyu return cpufreq_show_cpus(policy->related_cpus, buf); 734e8628dd0SDarrick J. Wong } 735e8628dd0SDarrick J. Wong 736e8628dd0SDarrick J. Wong /** 737e8628dd0SDarrick J. Wong * show_affected_cpus - show the CPUs affected by each transition 738e8628dd0SDarrick J. Wong */ 739e8628dd0SDarrick J. Wong static ssize_t show_affected_cpus(struct cpufreq_policy *policy, char *buf) 740e8628dd0SDarrick J. Wong { 741f4fd3797SLan Tianyu return cpufreq_show_cpus(policy->cpus, buf); 742e8628dd0SDarrick J. Wong } 743e8628dd0SDarrick J. Wong 7449e76988eSVenki Pallipadi static ssize_t store_scaling_setspeed(struct cpufreq_policy *policy, 7459e76988eSVenki Pallipadi const char *buf, size_t count) 7469e76988eSVenki Pallipadi { 7479e76988eSVenki Pallipadi unsigned int freq = 0; 7489e76988eSVenki Pallipadi unsigned int ret; 7499e76988eSVenki Pallipadi 750879000f9SCHIKAMA masaki if (!policy->governor || !policy->governor->store_setspeed) 7519e76988eSVenki Pallipadi return -EINVAL; 7529e76988eSVenki Pallipadi 7539e76988eSVenki Pallipadi ret = sscanf(buf, "%u", &freq); 7549e76988eSVenki Pallipadi if (ret != 1) 7559e76988eSVenki Pallipadi return -EINVAL; 7569e76988eSVenki Pallipadi 7579e76988eSVenki Pallipadi policy->governor->store_setspeed(policy, freq); 7589e76988eSVenki Pallipadi 7599e76988eSVenki Pallipadi return count; 7609e76988eSVenki Pallipadi } 7619e76988eSVenki Pallipadi 7629e76988eSVenki Pallipadi static ssize_t show_scaling_setspeed(struct cpufreq_policy *policy, char *buf) 7639e76988eSVenki Pallipadi { 764879000f9SCHIKAMA masaki if (!policy->governor || !policy->governor->show_setspeed) 7659e76988eSVenki Pallipadi return sprintf(buf, "<unsupported>\n"); 7669e76988eSVenki Pallipadi 7679e76988eSVenki Pallipadi return policy->governor->show_setspeed(policy, buf); 7689e76988eSVenki Pallipadi } 7691da177e4SLinus Torvalds 770e2f74f35SThomas Renninger /** 7718bf1ac72Sviresh kumar * show_bios_limit - show the current cpufreq HW/BIOS limitation 772e2f74f35SThomas Renninger */ 773e2f74f35SThomas Renninger static ssize_t show_bios_limit(struct cpufreq_policy *policy, char *buf) 774e2f74f35SThomas Renninger { 775e2f74f35SThomas Renninger unsigned int limit; 776e2f74f35SThomas Renninger int ret; 7771c3d85ddSRafael J. Wysocki if (cpufreq_driver->bios_limit) { 7781c3d85ddSRafael J. Wysocki ret = cpufreq_driver->bios_limit(policy->cpu, &limit); 779e2f74f35SThomas Renninger if (!ret) 780e2f74f35SThomas Renninger return sprintf(buf, "%u\n", limit); 781e2f74f35SThomas Renninger } 782e2f74f35SThomas Renninger return sprintf(buf, "%u\n", policy->cpuinfo.max_freq); 783e2f74f35SThomas Renninger } 784e2f74f35SThomas Renninger 7856dad2a29SBorislav Petkov cpufreq_freq_attr_ro_perm(cpuinfo_cur_freq, 0400); 7866dad2a29SBorislav Petkov cpufreq_freq_attr_ro(cpuinfo_min_freq); 7876dad2a29SBorislav Petkov cpufreq_freq_attr_ro(cpuinfo_max_freq); 7886dad2a29SBorislav Petkov cpufreq_freq_attr_ro(cpuinfo_transition_latency); 7896dad2a29SBorislav Petkov cpufreq_freq_attr_ro(scaling_available_governors); 7906dad2a29SBorislav Petkov cpufreq_freq_attr_ro(scaling_driver); 7916dad2a29SBorislav Petkov cpufreq_freq_attr_ro(scaling_cur_freq); 7926dad2a29SBorislav Petkov cpufreq_freq_attr_ro(bios_limit); 7936dad2a29SBorislav Petkov cpufreq_freq_attr_ro(related_cpus); 7946dad2a29SBorislav Petkov cpufreq_freq_attr_ro(affected_cpus); 7956dad2a29SBorislav Petkov cpufreq_freq_attr_rw(scaling_min_freq); 7966dad2a29SBorislav Petkov cpufreq_freq_attr_rw(scaling_max_freq); 7976dad2a29SBorislav Petkov cpufreq_freq_attr_rw(scaling_governor); 7986dad2a29SBorislav Petkov cpufreq_freq_attr_rw(scaling_setspeed); 7991da177e4SLinus Torvalds 8001da177e4SLinus Torvalds static struct attribute *default_attrs[] = { 8011da177e4SLinus Torvalds &cpuinfo_min_freq.attr, 8021da177e4SLinus Torvalds &cpuinfo_max_freq.attr, 803ed129784SThomas Renninger &cpuinfo_transition_latency.attr, 8041da177e4SLinus Torvalds &scaling_min_freq.attr, 8051da177e4SLinus Torvalds &scaling_max_freq.attr, 8061da177e4SLinus Torvalds &affected_cpus.attr, 807e8628dd0SDarrick J. Wong &related_cpus.attr, 8081da177e4SLinus Torvalds &scaling_governor.attr, 8091da177e4SLinus Torvalds &scaling_driver.attr, 8101da177e4SLinus Torvalds &scaling_available_governors.attr, 8119e76988eSVenki Pallipadi &scaling_setspeed.attr, 8121da177e4SLinus Torvalds NULL 8131da177e4SLinus Torvalds }; 8141da177e4SLinus Torvalds 8151da177e4SLinus Torvalds #define to_policy(k) container_of(k, struct cpufreq_policy, kobj) 8161da177e4SLinus Torvalds #define to_attr(a) container_of(a, struct freq_attr, attr) 8171da177e4SLinus Torvalds 8181da177e4SLinus Torvalds static ssize_t show(struct kobject *kobj, struct attribute *attr, char *buf) 8191da177e4SLinus Torvalds { 8201da177e4SLinus Torvalds struct cpufreq_policy *policy = to_policy(kobj); 8211da177e4SLinus Torvalds struct freq_attr *fattr = to_attr(attr); 8221b750e3bSViresh Kumar ssize_t ret; 8236eed9404SViresh Kumar 824ad7722daSviresh kumar down_read(&policy->rwsem); 8255a01f2e8SVenkatesh Pallipadi 826e08f5f5bSGautham R Shenoy if (fattr->show) 827e08f5f5bSGautham R Shenoy ret = fattr->show(policy, buf); 828e08f5f5bSGautham R Shenoy else 829e08f5f5bSGautham R Shenoy ret = -EIO; 830e08f5f5bSGautham R Shenoy 831ad7722daSviresh kumar up_read(&policy->rwsem); 8321b750e3bSViresh Kumar 8331da177e4SLinus Torvalds return ret; 8341da177e4SLinus Torvalds } 8351da177e4SLinus Torvalds 8361da177e4SLinus Torvalds static ssize_t store(struct kobject *kobj, struct attribute *attr, 8371da177e4SLinus Torvalds const char *buf, size_t count) 8381da177e4SLinus Torvalds { 8391da177e4SLinus Torvalds struct cpufreq_policy *policy = to_policy(kobj); 8401da177e4SLinus Torvalds struct freq_attr *fattr = to_attr(attr); 841a07530b4SDave Jones ssize_t ret = -EINVAL; 8426eed9404SViresh Kumar 8434f750c93SSrivatsa S. Bhat get_online_cpus(); 8444f750c93SSrivatsa S. Bhat 8454f750c93SSrivatsa S. Bhat if (!cpu_online(policy->cpu)) 8464f750c93SSrivatsa S. Bhat goto unlock; 8474f750c93SSrivatsa S. Bhat 848ad7722daSviresh kumar down_write(&policy->rwsem); 8495a01f2e8SVenkatesh Pallipadi 85011e584cfSViresh Kumar /* Updating inactive policies is invalid, so avoid doing that. */ 85111e584cfSViresh Kumar if (unlikely(policy_is_inactive(policy))) { 85211e584cfSViresh Kumar ret = -EBUSY; 85311e584cfSViresh Kumar goto unlock_policy_rwsem; 85411e584cfSViresh Kumar } 85511e584cfSViresh Kumar 856e08f5f5bSGautham R Shenoy if (fattr->store) 857e08f5f5bSGautham R Shenoy ret = fattr->store(policy, buf, count); 858e08f5f5bSGautham R Shenoy else 859e08f5f5bSGautham R Shenoy ret = -EIO; 860e08f5f5bSGautham R Shenoy 86111e584cfSViresh Kumar unlock_policy_rwsem: 862ad7722daSviresh kumar up_write(&policy->rwsem); 8634f750c93SSrivatsa S. Bhat unlock: 8644f750c93SSrivatsa S. Bhat put_online_cpus(); 8654f750c93SSrivatsa S. Bhat 8661da177e4SLinus Torvalds return ret; 8671da177e4SLinus Torvalds } 8681da177e4SLinus Torvalds 8691da177e4SLinus Torvalds static void cpufreq_sysfs_release(struct kobject *kobj) 8701da177e4SLinus Torvalds { 8711da177e4SLinus Torvalds struct cpufreq_policy *policy = to_policy(kobj); 8722d06d8c4SDominik Brodowski pr_debug("last reference is dropped\n"); 8731da177e4SLinus Torvalds complete(&policy->kobj_unregister); 8741da177e4SLinus Torvalds } 8751da177e4SLinus Torvalds 87652cf25d0SEmese Revfy static const struct sysfs_ops sysfs_ops = { 8771da177e4SLinus Torvalds .show = show, 8781da177e4SLinus Torvalds .store = store, 8791da177e4SLinus Torvalds }; 8801da177e4SLinus Torvalds 8811da177e4SLinus Torvalds static struct kobj_type ktype_cpufreq = { 8821da177e4SLinus Torvalds .sysfs_ops = &sysfs_ops, 8831da177e4SLinus Torvalds .default_attrs = default_attrs, 8841da177e4SLinus Torvalds .release = cpufreq_sysfs_release, 8851da177e4SLinus Torvalds }; 8861da177e4SLinus Torvalds 8872361be23SViresh Kumar struct kobject *cpufreq_global_kobject; 8882361be23SViresh Kumar EXPORT_SYMBOL(cpufreq_global_kobject); 8892361be23SViresh Kumar 8902361be23SViresh Kumar static int cpufreq_global_kobject_usage; 8912361be23SViresh Kumar 8922361be23SViresh Kumar int cpufreq_get_global_kobject(void) 8932361be23SViresh Kumar { 8942361be23SViresh Kumar if (!cpufreq_global_kobject_usage++) 8952361be23SViresh Kumar return kobject_add(cpufreq_global_kobject, 8962361be23SViresh Kumar &cpu_subsys.dev_root->kobj, "%s", "cpufreq"); 8972361be23SViresh Kumar 8982361be23SViresh Kumar return 0; 8992361be23SViresh Kumar } 9002361be23SViresh Kumar EXPORT_SYMBOL(cpufreq_get_global_kobject); 9012361be23SViresh Kumar 9022361be23SViresh Kumar void cpufreq_put_global_kobject(void) 9032361be23SViresh Kumar { 9042361be23SViresh Kumar if (!--cpufreq_global_kobject_usage) 9052361be23SViresh Kumar kobject_del(cpufreq_global_kobject); 9062361be23SViresh Kumar } 9072361be23SViresh Kumar EXPORT_SYMBOL(cpufreq_put_global_kobject); 9082361be23SViresh Kumar 9092361be23SViresh Kumar int cpufreq_sysfs_create_file(const struct attribute *attr) 9102361be23SViresh Kumar { 9112361be23SViresh Kumar int ret = cpufreq_get_global_kobject(); 9122361be23SViresh Kumar 9132361be23SViresh Kumar if (!ret) { 9142361be23SViresh Kumar ret = sysfs_create_file(cpufreq_global_kobject, attr); 9152361be23SViresh Kumar if (ret) 9162361be23SViresh Kumar cpufreq_put_global_kobject(); 9172361be23SViresh Kumar } 9182361be23SViresh Kumar 9192361be23SViresh Kumar return ret; 9202361be23SViresh Kumar } 9212361be23SViresh Kumar EXPORT_SYMBOL(cpufreq_sysfs_create_file); 9222361be23SViresh Kumar 9232361be23SViresh Kumar void cpufreq_sysfs_remove_file(const struct attribute *attr) 9242361be23SViresh Kumar { 9252361be23SViresh Kumar sysfs_remove_file(cpufreq_global_kobject, attr); 9262361be23SViresh Kumar cpufreq_put_global_kobject(); 9272361be23SViresh Kumar } 9282361be23SViresh Kumar EXPORT_SYMBOL(cpufreq_sysfs_remove_file); 9292361be23SViresh Kumar 93087549141SViresh Kumar static int add_cpu_dev_symlink(struct cpufreq_policy *policy, int cpu) 93187549141SViresh Kumar { 93287549141SViresh Kumar struct device *cpu_dev; 93387549141SViresh Kumar 93487549141SViresh Kumar pr_debug("%s: Adding symlink for CPU: %u\n", __func__, cpu); 93587549141SViresh Kumar 93687549141SViresh Kumar if (!policy) 93787549141SViresh Kumar return 0; 93887549141SViresh Kumar 93987549141SViresh Kumar cpu_dev = get_cpu_device(cpu); 94087549141SViresh Kumar if (WARN_ON(!cpu_dev)) 94187549141SViresh Kumar return 0; 94287549141SViresh Kumar 94387549141SViresh Kumar return sysfs_create_link(&cpu_dev->kobj, &policy->kobj, "cpufreq"); 94487549141SViresh Kumar } 94587549141SViresh Kumar 94687549141SViresh Kumar static void remove_cpu_dev_symlink(struct cpufreq_policy *policy, int cpu) 94787549141SViresh Kumar { 94887549141SViresh Kumar struct device *cpu_dev; 94987549141SViresh Kumar 95087549141SViresh Kumar pr_debug("%s: Removing symlink for CPU: %u\n", __func__, cpu); 95187549141SViresh Kumar 95287549141SViresh Kumar cpu_dev = get_cpu_device(cpu); 95387549141SViresh Kumar if (WARN_ON(!cpu_dev)) 95487549141SViresh Kumar return; 95587549141SViresh Kumar 95687549141SViresh Kumar sysfs_remove_link(&cpu_dev->kobj, "cpufreq"); 95787549141SViresh Kumar } 95887549141SViresh Kumar 95987549141SViresh Kumar /* Add/remove symlinks for all related CPUs */ 960308b60e7SViresh Kumar static int cpufreq_add_dev_symlink(struct cpufreq_policy *policy) 96119d6f7ecSDave Jones { 96219d6f7ecSDave Jones unsigned int j; 96319d6f7ecSDave Jones int ret = 0; 96419d6f7ecSDave Jones 96587549141SViresh Kumar /* Some related CPUs might not be present (physically hotplugged) */ 966559ed407SRafael J. Wysocki for_each_cpu(j, policy->real_cpus) { 9679d16f207SSaravana Kannan if (j == policy->kobj_cpu) 96819d6f7ecSDave Jones continue; 96919d6f7ecSDave Jones 97087549141SViresh Kumar ret = add_cpu_dev_symlink(policy, j); 97171c3461eSRafael J. Wysocki if (ret) 97271c3461eSRafael J. Wysocki break; 97319d6f7ecSDave Jones } 97487549141SViresh Kumar 97519d6f7ecSDave Jones return ret; 97619d6f7ecSDave Jones } 97719d6f7ecSDave Jones 97887549141SViresh Kumar static void cpufreq_remove_dev_symlink(struct cpufreq_policy *policy) 97987549141SViresh Kumar { 98087549141SViresh Kumar unsigned int j; 98187549141SViresh Kumar 98287549141SViresh Kumar /* Some related CPUs might not be present (physically hotplugged) */ 983559ed407SRafael J. Wysocki for_each_cpu(j, policy->real_cpus) { 98487549141SViresh Kumar if (j == policy->kobj_cpu) 98587549141SViresh Kumar continue; 98687549141SViresh Kumar 98787549141SViresh Kumar remove_cpu_dev_symlink(policy, j); 98887549141SViresh Kumar } 98987549141SViresh Kumar } 99087549141SViresh Kumar 991d9612a49SRafael J. Wysocki static int cpufreq_add_dev_interface(struct cpufreq_policy *policy) 992909a694eSDave Jones { 993909a694eSDave Jones struct freq_attr **drv_attr; 994909a694eSDave Jones int ret = 0; 995909a694eSDave Jones 996909a694eSDave Jones /* set up files for this cpu device */ 9971c3d85ddSRafael J. Wysocki drv_attr = cpufreq_driver->attr; 998f13f1184SViresh Kumar while (drv_attr && *drv_attr) { 999909a694eSDave Jones ret = sysfs_create_file(&policy->kobj, &((*drv_attr)->attr)); 1000909a694eSDave Jones if (ret) 10016d4e81edSTomeu Vizoso return ret; 1002909a694eSDave Jones drv_attr++; 1003909a694eSDave Jones } 10041c3d85ddSRafael J. Wysocki if (cpufreq_driver->get) { 1005909a694eSDave Jones ret = sysfs_create_file(&policy->kobj, &cpuinfo_cur_freq.attr); 1006909a694eSDave Jones if (ret) 10076d4e81edSTomeu Vizoso return ret; 1008909a694eSDave Jones } 1009c034b02eSDirk Brandewie 1010909a694eSDave Jones ret = sysfs_create_file(&policy->kobj, &scaling_cur_freq.attr); 1011909a694eSDave Jones if (ret) 10126d4e81edSTomeu Vizoso return ret; 1013c034b02eSDirk Brandewie 10141c3d85ddSRafael J. Wysocki if (cpufreq_driver->bios_limit) { 1015e2f74f35SThomas Renninger ret = sysfs_create_file(&policy->kobj, &bios_limit.attr); 1016e2f74f35SThomas Renninger if (ret) 10176d4e81edSTomeu Vizoso return ret; 1018e2f74f35SThomas Renninger } 1019909a694eSDave Jones 10206d4e81edSTomeu Vizoso return cpufreq_add_dev_symlink(policy); 1021e18f1682SSrivatsa S. Bhat } 1022e18f1682SSrivatsa S. Bhat 10237f0fa40fSViresh Kumar static int cpufreq_init_policy(struct cpufreq_policy *policy) 1024e18f1682SSrivatsa S. Bhat { 10256e2c89d1Sviresh kumar struct cpufreq_governor *gov = NULL; 1026e18f1682SSrivatsa S. Bhat struct cpufreq_policy new_policy; 1027e18f1682SSrivatsa S. Bhat 1028d5b73cd8SViresh Kumar memcpy(&new_policy, policy, sizeof(*policy)); 1029a27a9ab7SJason Baron 10306e2c89d1Sviresh kumar /* Update governor of new_policy to the governor used before hotplug */ 10314573237bSViresh Kumar gov = find_governor(policy->last_governor); 10326e2c89d1Sviresh kumar if (gov) 10336e2c89d1Sviresh kumar pr_debug("Restoring governor %s for cpu %d\n", 10346e2c89d1Sviresh kumar policy->governor->name, policy->cpu); 10356e2c89d1Sviresh kumar else 10366e2c89d1Sviresh kumar gov = CPUFREQ_DEFAULT_GOVERNOR; 10376e2c89d1Sviresh kumar 10386e2c89d1Sviresh kumar new_policy.governor = gov; 10396e2c89d1Sviresh kumar 1040a27a9ab7SJason Baron /* Use the default policy if its valid. */ 1041a27a9ab7SJason Baron if (cpufreq_driver->setpolicy) 10426e2c89d1Sviresh kumar cpufreq_parse_governor(gov->name, &new_policy.policy, NULL); 1043ecf7e461SDave Jones 1044ecf7e461SDave Jones /* set default policy */ 10457f0fa40fSViresh Kumar return cpufreq_set_policy(policy, &new_policy); 1046909a694eSDave Jones } 1047909a694eSDave Jones 1048d9612a49SRafael J. Wysocki static int cpufreq_add_policy_cpu(struct cpufreq_policy *policy, unsigned int cpu) 1049fcf80582SViresh Kumar { 10509c0ebcf7SViresh Kumar int ret = 0; 1051fcf80582SViresh Kumar 1052bb29ae15SViresh Kumar /* Has this CPU been taken care of already? */ 1053bb29ae15SViresh Kumar if (cpumask_test_cpu(cpu, policy->cpus)) 1054bb29ae15SViresh Kumar return 0; 1055bb29ae15SViresh Kumar 10569c0ebcf7SViresh Kumar if (has_target()) { 10573de9bdebSViresh Kumar ret = __cpufreq_governor(policy, CPUFREQ_GOV_STOP); 10583de9bdebSViresh Kumar if (ret) { 10593de9bdebSViresh Kumar pr_err("%s: Failed to stop governor\n", __func__); 10603de9bdebSViresh Kumar return ret; 10613de9bdebSViresh Kumar } 10623de9bdebSViresh Kumar } 1063fcf80582SViresh Kumar 1064ad7722daSviresh kumar down_write(&policy->rwsem); 1065fcf80582SViresh Kumar cpumask_set_cpu(cpu, policy->cpus); 1066ad7722daSviresh kumar up_write(&policy->rwsem); 10672eaa3e2dSViresh Kumar 10689c0ebcf7SViresh Kumar if (has_target()) { 1069e5c87b76SStratos Karafotis ret = __cpufreq_governor(policy, CPUFREQ_GOV_START); 1070e5c87b76SStratos Karafotis if (!ret) 1071e5c87b76SStratos Karafotis ret = __cpufreq_governor(policy, CPUFREQ_GOV_LIMITS); 1072e5c87b76SStratos Karafotis 1073e5c87b76SStratos Karafotis if (ret) { 10743de9bdebSViresh Kumar pr_err("%s: Failed to start governor\n", __func__); 10753de9bdebSViresh Kumar return ret; 10763de9bdebSViresh Kumar } 1077820c6ca2SViresh Kumar } 1078fcf80582SViresh Kumar 107987549141SViresh Kumar return 0; 1080fcf80582SViresh Kumar } 10811da177e4SLinus Torvalds 1082a34e63b1SRafael J. Wysocki static struct cpufreq_policy *cpufreq_policy_alloc(unsigned int cpu) 1083e9698cc5SSrivatsa S. Bhat { 1084a34e63b1SRafael J. Wysocki struct device *dev = get_cpu_device(cpu); 1085e9698cc5SSrivatsa S. Bhat struct cpufreq_policy *policy; 10862fc3384dSViresh Kumar int ret; 1087e9698cc5SSrivatsa S. Bhat 1088a34e63b1SRafael J. Wysocki if (WARN_ON(!dev)) 1089a34e63b1SRafael J. Wysocki return NULL; 1090a34e63b1SRafael J. Wysocki 1091e9698cc5SSrivatsa S. Bhat policy = kzalloc(sizeof(*policy), GFP_KERNEL); 1092e9698cc5SSrivatsa S. Bhat if (!policy) 1093e9698cc5SSrivatsa S. Bhat return NULL; 1094e9698cc5SSrivatsa S. Bhat 1095e9698cc5SSrivatsa S. Bhat if (!alloc_cpumask_var(&policy->cpus, GFP_KERNEL)) 1096e9698cc5SSrivatsa S. Bhat goto err_free_policy; 1097e9698cc5SSrivatsa S. Bhat 1098e9698cc5SSrivatsa S. Bhat if (!zalloc_cpumask_var(&policy->related_cpus, GFP_KERNEL)) 1099e9698cc5SSrivatsa S. Bhat goto err_free_cpumask; 1100e9698cc5SSrivatsa S. Bhat 1101559ed407SRafael J. Wysocki if (!zalloc_cpumask_var(&policy->real_cpus, GFP_KERNEL)) 1102559ed407SRafael J. Wysocki goto err_free_rcpumask; 1103559ed407SRafael J. Wysocki 11042fc3384dSViresh Kumar ret = kobject_init_and_add(&policy->kobj, &ktype_cpufreq, &dev->kobj, 11052fc3384dSViresh Kumar "cpufreq"); 11062fc3384dSViresh Kumar if (ret) { 11072fc3384dSViresh Kumar pr_err("%s: failed to init policy->kobj: %d\n", __func__, ret); 1108559ed407SRafael J. Wysocki goto err_free_real_cpus; 11092fc3384dSViresh Kumar } 11102fc3384dSViresh Kumar 1111c88a1f8bSLukasz Majewski INIT_LIST_HEAD(&policy->policy_list); 1112ad7722daSviresh kumar init_rwsem(&policy->rwsem); 111312478cf0SSrivatsa S. Bhat spin_lock_init(&policy->transition_lock); 111412478cf0SSrivatsa S. Bhat init_waitqueue_head(&policy->transition_wait); 1115818c5712SViresh Kumar init_completion(&policy->kobj_unregister); 1116818c5712SViresh Kumar INIT_WORK(&policy->update, handle_update); 1117ad7722daSviresh kumar 1118a34e63b1SRafael J. Wysocki policy->cpu = cpu; 111987549141SViresh Kumar 112087549141SViresh Kumar /* Set this once on allocation */ 1121a34e63b1SRafael J. Wysocki policy->kobj_cpu = cpu; 112287549141SViresh Kumar 1123e9698cc5SSrivatsa S. Bhat return policy; 1124e9698cc5SSrivatsa S. Bhat 1125559ed407SRafael J. Wysocki err_free_real_cpus: 1126559ed407SRafael J. Wysocki free_cpumask_var(policy->real_cpus); 11272fc3384dSViresh Kumar err_free_rcpumask: 11282fc3384dSViresh Kumar free_cpumask_var(policy->related_cpus); 1129e9698cc5SSrivatsa S. Bhat err_free_cpumask: 1130e9698cc5SSrivatsa S. Bhat free_cpumask_var(policy->cpus); 1131e9698cc5SSrivatsa S. Bhat err_free_policy: 1132e9698cc5SSrivatsa S. Bhat kfree(policy); 1133e9698cc5SSrivatsa S. Bhat 1134e9698cc5SSrivatsa S. Bhat return NULL; 1135e9698cc5SSrivatsa S. Bhat } 1136e9698cc5SSrivatsa S. Bhat 11372fc3384dSViresh Kumar static void cpufreq_policy_put_kobj(struct cpufreq_policy *policy, bool notify) 113842f921a6SViresh Kumar { 113942f921a6SViresh Kumar struct kobject *kobj; 114042f921a6SViresh Kumar struct completion *cmp; 114142f921a6SViresh Kumar 11422fc3384dSViresh Kumar if (notify) 1143fcd7af91SViresh Kumar blocking_notifier_call_chain(&cpufreq_policy_notifier_list, 1144fcd7af91SViresh Kumar CPUFREQ_REMOVE_POLICY, policy); 1145fcd7af91SViresh Kumar 114687549141SViresh Kumar down_write(&policy->rwsem); 114787549141SViresh Kumar cpufreq_remove_dev_symlink(policy); 114842f921a6SViresh Kumar kobj = &policy->kobj; 114942f921a6SViresh Kumar cmp = &policy->kobj_unregister; 115087549141SViresh Kumar up_write(&policy->rwsem); 115142f921a6SViresh Kumar kobject_put(kobj); 115242f921a6SViresh Kumar 115342f921a6SViresh Kumar /* 115442f921a6SViresh Kumar * We need to make sure that the underlying kobj is 115542f921a6SViresh Kumar * actually not referenced anymore by anybody before we 115642f921a6SViresh Kumar * proceed with unloading. 115742f921a6SViresh Kumar */ 115842f921a6SViresh Kumar pr_debug("waiting for dropping of refcount\n"); 115942f921a6SViresh Kumar wait_for_completion(cmp); 116042f921a6SViresh Kumar pr_debug("wait complete\n"); 116142f921a6SViresh Kumar } 116242f921a6SViresh Kumar 11633654c5ccSViresh Kumar static void cpufreq_policy_free(struct cpufreq_policy *policy, bool notify) 1164e9698cc5SSrivatsa S. Bhat { 1165988bed09SViresh Kumar unsigned long flags; 1166988bed09SViresh Kumar int cpu; 1167988bed09SViresh Kumar 1168988bed09SViresh Kumar /* Remove policy from list */ 1169988bed09SViresh Kumar write_lock_irqsave(&cpufreq_driver_lock, flags); 1170988bed09SViresh Kumar list_del(&policy->policy_list); 1171988bed09SViresh Kumar 1172988bed09SViresh Kumar for_each_cpu(cpu, policy->related_cpus) 1173988bed09SViresh Kumar per_cpu(cpufreq_cpu_data, cpu) = NULL; 1174988bed09SViresh Kumar write_unlock_irqrestore(&cpufreq_driver_lock, flags); 1175988bed09SViresh Kumar 11763654c5ccSViresh Kumar cpufreq_policy_put_kobj(policy, notify); 1177559ed407SRafael J. Wysocki free_cpumask_var(policy->real_cpus); 1178e9698cc5SSrivatsa S. Bhat free_cpumask_var(policy->related_cpus); 1179e9698cc5SSrivatsa S. Bhat free_cpumask_var(policy->cpus); 1180e9698cc5SSrivatsa S. Bhat kfree(policy); 1181e9698cc5SSrivatsa S. Bhat } 1182e9698cc5SSrivatsa S. Bhat 11830b275352SRafael J. Wysocki static int cpufreq_online(unsigned int cpu) 11841da177e4SLinus Torvalds { 11857f0c020aSViresh Kumar struct cpufreq_policy *policy; 1186194d99c7SRafael J. Wysocki bool new_policy; 11870b275352SRafael J. Wysocki unsigned long flags; 11880b275352SRafael J. Wysocki unsigned int j; 11890b275352SRafael J. Wysocki int ret; 1190c32b6b8eSAshok Raj 11910b275352SRafael J. Wysocki pr_debug("%s: bringing CPU%u online\n", __func__, cpu); 119287549141SViresh Kumar 1193bb29ae15SViresh Kumar /* Check if this CPU already has a policy to manage it */ 11949104bb26SViresh Kumar policy = per_cpu(cpufreq_cpu_data, cpu); 119511ce707eSRafael J. Wysocki if (policy) { 11969104bb26SViresh Kumar WARN_ON(!cpumask_test_cpu(cpu, policy->related_cpus)); 119711ce707eSRafael J. Wysocki if (!policy_is_inactive(policy)) 1198d9612a49SRafael J. Wysocki return cpufreq_add_policy_cpu(policy, cpu); 11991da177e4SLinus Torvalds 120011ce707eSRafael J. Wysocki /* This is the only online CPU for the policy. Start over. */ 1201194d99c7SRafael J. Wysocki new_policy = false; 120211ce707eSRafael J. Wysocki down_write(&policy->rwsem); 120311ce707eSRafael J. Wysocki policy->cpu = cpu; 120411ce707eSRafael J. Wysocki policy->governor = NULL; 120511ce707eSRafael J. Wysocki up_write(&policy->rwsem); 120611ce707eSRafael J. Wysocki } else { 1207194d99c7SRafael J. Wysocki new_policy = true; 1208a34e63b1SRafael J. Wysocki policy = cpufreq_policy_alloc(cpu); 1209059019a3SDave Jones if (!policy) 1210d4d854d6SRafael J. Wysocki return -ENOMEM; 121172368d12SRafael J. Wysocki } 12120d66b91eSSrivatsa S. Bhat 1213835481d9SRusty Russell cpumask_copy(policy->cpus, cpumask_of(cpu)); 12141da177e4SLinus Torvalds 12151da177e4SLinus Torvalds /* call driver. From then on the cpufreq must be able 12161da177e4SLinus Torvalds * to accept all calls to ->verify and ->setpolicy for this CPU 12171da177e4SLinus Torvalds */ 12181c3d85ddSRafael J. Wysocki ret = cpufreq_driver->init(policy); 12191da177e4SLinus Torvalds if (ret) { 12202d06d8c4SDominik Brodowski pr_debug("initialization failed\n"); 12218101f997SViresh Kumar goto out_free_policy; 12221da177e4SLinus Torvalds } 1223643ae6e8SViresh Kumar 12246d4e81edSTomeu Vizoso down_write(&policy->rwsem); 12256d4e81edSTomeu Vizoso 1226194d99c7SRafael J. Wysocki if (new_policy) { 12274d1f3a5bSRafael J. Wysocki /* related_cpus should at least include policy->cpus. */ 12285a7e56a5SViresh Kumar cpumask_or(policy->related_cpus, policy->related_cpus, policy->cpus); 12294d1f3a5bSRafael J. Wysocki /* Remember CPUs present at the policy creation time. */ 1230559ed407SRafael J. Wysocki cpumask_and(policy->real_cpus, policy->cpus, cpu_present_mask); 12314d1f3a5bSRafael J. Wysocki } 1232559ed407SRafael J. Wysocki 12335a7e56a5SViresh Kumar /* 12345a7e56a5SViresh Kumar * affected cpus must always be the one, which are online. We aren't 12355a7e56a5SViresh Kumar * managing offline cpus here. 12365a7e56a5SViresh Kumar */ 12375a7e56a5SViresh Kumar cpumask_and(policy->cpus, policy->cpus, cpu_online_mask); 12385a7e56a5SViresh Kumar 1239194d99c7SRafael J. Wysocki if (new_policy) { 12405a7e56a5SViresh Kumar policy->user_policy.min = policy->min; 12415a7e56a5SViresh Kumar policy->user_policy.max = policy->max; 12426d4e81edSTomeu Vizoso 1243652ed95dSViresh Kumar write_lock_irqsave(&cpufreq_driver_lock, flags); 1244988bed09SViresh Kumar for_each_cpu(j, policy->related_cpus) 1245652ed95dSViresh Kumar per_cpu(cpufreq_cpu_data, j) = policy; 1246652ed95dSViresh Kumar write_unlock_irqrestore(&cpufreq_driver_lock, flags); 1247988bed09SViresh Kumar } 1248652ed95dSViresh Kumar 12492ed99e39SRafael J. Wysocki if (cpufreq_driver->get && !cpufreq_driver->setpolicy) { 1250da60ce9fSViresh Kumar policy->cur = cpufreq_driver->get(policy->cpu); 1251da60ce9fSViresh Kumar if (!policy->cur) { 1252da60ce9fSViresh Kumar pr_err("%s: ->get() failed\n", __func__); 12538101f997SViresh Kumar goto out_exit_policy; 1254da60ce9fSViresh Kumar } 1255da60ce9fSViresh Kumar } 1256da60ce9fSViresh Kumar 1257d3916691SViresh Kumar /* 1258d3916691SViresh Kumar * Sometimes boot loaders set CPU frequency to a value outside of 1259d3916691SViresh Kumar * frequency table present with cpufreq core. In such cases CPU might be 1260d3916691SViresh Kumar * unstable if it has to run on that frequency for long duration of time 1261d3916691SViresh Kumar * and so its better to set it to a frequency which is specified in 1262d3916691SViresh Kumar * freq-table. This also makes cpufreq stats inconsistent as 1263d3916691SViresh Kumar * cpufreq-stats would fail to register because current frequency of CPU 1264d3916691SViresh Kumar * isn't found in freq-table. 1265d3916691SViresh Kumar * 1266d3916691SViresh Kumar * Because we don't want this change to effect boot process badly, we go 1267d3916691SViresh Kumar * for the next freq which is >= policy->cur ('cur' must be set by now, 1268d3916691SViresh Kumar * otherwise we will end up setting freq to lowest of the table as 'cur' 1269d3916691SViresh Kumar * is initialized to zero). 1270d3916691SViresh Kumar * 1271d3916691SViresh Kumar * We are passing target-freq as "policy->cur - 1" otherwise 1272d3916691SViresh Kumar * __cpufreq_driver_target() would simply fail, as policy->cur will be 1273d3916691SViresh Kumar * equal to target-freq. 1274d3916691SViresh Kumar */ 1275d3916691SViresh Kumar if ((cpufreq_driver->flags & CPUFREQ_NEED_INITIAL_FREQ_CHECK) 1276d3916691SViresh Kumar && has_target()) { 1277d3916691SViresh Kumar /* Are we running at unknown frequency ? */ 1278d3916691SViresh Kumar ret = cpufreq_frequency_table_get_index(policy, policy->cur); 1279d3916691SViresh Kumar if (ret == -EINVAL) { 1280d3916691SViresh Kumar /* Warn user and fix it */ 1281d3916691SViresh Kumar pr_warn("%s: CPU%d: Running at unlisted freq: %u KHz\n", 1282d3916691SViresh Kumar __func__, policy->cpu, policy->cur); 1283d3916691SViresh Kumar ret = __cpufreq_driver_target(policy, policy->cur - 1, 1284d3916691SViresh Kumar CPUFREQ_RELATION_L); 1285d3916691SViresh Kumar 1286d3916691SViresh Kumar /* 1287d3916691SViresh Kumar * Reaching here after boot in a few seconds may not 1288d3916691SViresh Kumar * mean that system will remain stable at "unknown" 1289d3916691SViresh Kumar * frequency for longer duration. Hence, a BUG_ON(). 1290d3916691SViresh Kumar */ 1291d3916691SViresh Kumar BUG_ON(ret); 1292d3916691SViresh Kumar pr_warn("%s: CPU%d: Unlisted initial frequency changed to: %u KHz\n", 1293d3916691SViresh Kumar __func__, policy->cpu, policy->cur); 1294d3916691SViresh Kumar } 1295d3916691SViresh Kumar } 1296d3916691SViresh Kumar 1297a1531acdSThomas Renninger blocking_notifier_call_chain(&cpufreq_policy_notifier_list, 1298a1531acdSThomas Renninger CPUFREQ_START, policy); 1299a1531acdSThomas Renninger 1300194d99c7SRafael J. Wysocki if (new_policy) { 1301d9612a49SRafael J. Wysocki ret = cpufreq_add_dev_interface(policy); 130219d6f7ecSDave Jones if (ret) 13038101f997SViresh Kumar goto out_exit_policy; 1304fcd7af91SViresh Kumar blocking_notifier_call_chain(&cpufreq_policy_notifier_list, 1305fcd7af91SViresh Kumar CPUFREQ_CREATE_POLICY, policy); 1306c88a1f8bSLukasz Majewski 1307c88a1f8bSLukasz Majewski write_lock_irqsave(&cpufreq_driver_lock, flags); 1308c88a1f8bSLukasz Majewski list_add(&policy->policy_list, &cpufreq_policy_list); 1309c88a1f8bSLukasz Majewski write_unlock_irqrestore(&cpufreq_driver_lock, flags); 1310988bed09SViresh Kumar } 13118ff69732SDave Jones 13127f0fa40fSViresh Kumar ret = cpufreq_init_policy(policy); 13137f0fa40fSViresh Kumar if (ret) { 13147f0fa40fSViresh Kumar pr_err("%s: Failed to initialize policy for cpu: %d (%d)\n", 13157f0fa40fSViresh Kumar __func__, cpu, ret); 1316194d99c7SRafael J. Wysocki /* cpufreq_policy_free() will notify based on this */ 1317194d99c7SRafael J. Wysocki new_policy = false; 1318194d99c7SRafael J. Wysocki goto out_exit_policy; 13197f0fa40fSViresh Kumar } 1320e18f1682SSrivatsa S. Bhat 13214e97b631SViresh Kumar up_write(&policy->rwsem); 132208fd8c1cSViresh Kumar 1323038c5b3eSGreg Kroah-Hartman kobject_uevent(&policy->kobj, KOBJ_ADD); 13247c45cf31SViresh Kumar 13257c45cf31SViresh Kumar /* Callback for handling stuff after policy is ready */ 13267c45cf31SViresh Kumar if (cpufreq_driver->ready) 13277c45cf31SViresh Kumar cpufreq_driver->ready(policy); 13287c45cf31SViresh Kumar 13292d06d8c4SDominik Brodowski pr_debug("initialization complete\n"); 13301da177e4SLinus Torvalds 13311da177e4SLinus Torvalds return 0; 13321da177e4SLinus Torvalds 13338101f997SViresh Kumar out_exit_policy: 13347106e02bSPrarit Bhargava up_write(&policy->rwsem); 13357106e02bSPrarit Bhargava 1336da60ce9fSViresh Kumar if (cpufreq_driver->exit) 1337da60ce9fSViresh Kumar cpufreq_driver->exit(policy); 13388101f997SViresh Kumar out_free_policy: 1339194d99c7SRafael J. Wysocki cpufreq_policy_free(policy, !new_policy); 13401da177e4SLinus Torvalds return ret; 13411da177e4SLinus Torvalds } 13421da177e4SLinus Torvalds 13430b275352SRafael J. Wysocki /** 13440b275352SRafael J. Wysocki * cpufreq_add_dev - the cpufreq interface for a CPU device. 13450b275352SRafael J. Wysocki * @dev: CPU device. 13460b275352SRafael J. Wysocki * @sif: Subsystem interface structure pointer (not used) 13470b275352SRafael J. Wysocki */ 13480b275352SRafael J. Wysocki static int cpufreq_add_dev(struct device *dev, struct subsys_interface *sif) 13490b275352SRafael J. Wysocki { 13500b275352SRafael J. Wysocki unsigned cpu = dev->id; 13510b275352SRafael J. Wysocki int ret; 13520b275352SRafael J. Wysocki 13530b275352SRafael J. Wysocki dev_dbg(dev, "%s: adding CPU%u\n", __func__, cpu); 13540b275352SRafael J. Wysocki 13550b275352SRafael J. Wysocki if (cpu_online(cpu)) { 13560b275352SRafael J. Wysocki ret = cpufreq_online(cpu); 13570b275352SRafael J. Wysocki } else { 13580b275352SRafael J. Wysocki /* 13590b275352SRafael J. Wysocki * A hotplug notifier will follow and we will handle it as CPU 13600b275352SRafael J. Wysocki * online then. For now, just create the sysfs link, unless 13610b275352SRafael J. Wysocki * there is no policy or the link is already present. 13620b275352SRafael J. Wysocki */ 13630b275352SRafael J. Wysocki struct cpufreq_policy *policy = per_cpu(cpufreq_cpu_data, cpu); 13640b275352SRafael J. Wysocki 13650b275352SRafael J. Wysocki ret = policy && !cpumask_test_and_set_cpu(cpu, policy->real_cpus) 13660b275352SRafael J. Wysocki ? add_cpu_dev_symlink(policy, cpu) : 0; 13670b275352SRafael J. Wysocki } 13680b275352SRafael J. Wysocki 13690b275352SRafael J. Wysocki return ret; 13700b275352SRafael J. Wysocki } 13710b275352SRafael J. Wysocki 137215c0b4d2SRafael J. Wysocki static void cpufreq_offline_prepare(unsigned int cpu) 13731da177e4SLinus Torvalds { 13743a3e9e06SViresh Kumar struct cpufreq_policy *policy; 13751da177e4SLinus Torvalds 1376b8eed8afSViresh Kumar pr_debug("%s: unregistering CPU %u\n", __func__, cpu); 13771da177e4SLinus Torvalds 1378988bed09SViresh Kumar policy = cpufreq_cpu_get_raw(cpu); 13793a3e9e06SViresh Kumar if (!policy) { 1380b8eed8afSViresh Kumar pr_debug("%s: No cpu_data found\n", __func__); 138115c0b4d2SRafael J. Wysocki return; 13821da177e4SLinus Torvalds } 13831da177e4SLinus Torvalds 13849c0ebcf7SViresh Kumar if (has_target()) { 138515c0b4d2SRafael J. Wysocki int ret = __cpufreq_governor(policy, CPUFREQ_GOV_STOP); 1386559ed407SRafael J. Wysocki if (ret) 13873de9bdebSViresh Kumar pr_err("%s: Failed to stop governor\n", __func__); 1388db5f2995SViresh Kumar } 13891da177e4SLinus Torvalds 13904573237bSViresh Kumar down_write(&policy->rwsem); 13919591becbSViresh Kumar cpumask_clear_cpu(cpu, policy->cpus); 13924573237bSViresh Kumar 13939591becbSViresh Kumar if (policy_is_inactive(policy)) { 13949591becbSViresh Kumar if (has_target()) 13954573237bSViresh Kumar strncpy(policy->last_governor, policy->governor->name, 13964573237bSViresh Kumar CPUFREQ_NAME_LEN); 13979591becbSViresh Kumar } else if (cpu == policy->cpu) { 13989591becbSViresh Kumar /* Nominate new CPU */ 13999591becbSViresh Kumar policy->cpu = cpumask_any(policy->cpus); 14009591becbSViresh Kumar } 14014573237bSViresh Kumar up_write(&policy->rwsem); 14021da177e4SLinus Torvalds 14039591becbSViresh Kumar /* Start governor again for active policy */ 14049591becbSViresh Kumar if (!policy_is_inactive(policy)) { 14059591becbSViresh Kumar if (has_target()) { 140615c0b4d2SRafael J. Wysocki int ret = __cpufreq_governor(policy, CPUFREQ_GOV_START); 14079591becbSViresh Kumar if (!ret) 14089591becbSViresh Kumar ret = __cpufreq_governor(policy, CPUFREQ_GOV_LIMITS); 140987549141SViresh Kumar 14109591becbSViresh Kumar if (ret) 14119591becbSViresh Kumar pr_err("%s: Failed to start governor\n", __func__); 14129591becbSViresh Kumar } 14139591becbSViresh Kumar } else if (cpufreq_driver->stop_cpu) { 1414367dc4aaSDirk Brandewie cpufreq_driver->stop_cpu(policy); 14159591becbSViresh Kumar } 1416cedb70afSSrivatsa S. Bhat } 1417cedb70afSSrivatsa S. Bhat 141815c0b4d2SRafael J. Wysocki static void cpufreq_offline_finish(unsigned int cpu) 1419cedb70afSSrivatsa S. Bhat { 14209591becbSViresh Kumar struct cpufreq_policy *policy = per_cpu(cpufreq_cpu_data, cpu); 1421cedb70afSSrivatsa S. Bhat 1422cedb70afSSrivatsa S. Bhat if (!policy) { 1423cedb70afSSrivatsa S. Bhat pr_debug("%s: No cpu_data found\n", __func__); 142415c0b4d2SRafael J. Wysocki return; 1425cedb70afSSrivatsa S. Bhat } 1426cedb70afSSrivatsa S. Bhat 14279591becbSViresh Kumar /* Only proceed for inactive policies */ 14289591becbSViresh Kumar if (!policy_is_inactive(policy)) 142915c0b4d2SRafael J. Wysocki return; 143087549141SViresh Kumar 143187549141SViresh Kumar /* If cpu is last user of policy, free policy */ 143287549141SViresh Kumar if (has_target()) { 143315c0b4d2SRafael J. Wysocki int ret = __cpufreq_governor(policy, CPUFREQ_GOV_POLICY_EXIT); 1434559ed407SRafael J. Wysocki if (ret) 143587549141SViresh Kumar pr_err("%s: Failed to exit governor\n", __func__); 14363de9bdebSViresh Kumar } 14372a998599SRafael J. Wysocki 14388414809cSSrivatsa S. Bhat /* 14398414809cSSrivatsa S. Bhat * Perform the ->exit() even during light-weight tear-down, 14408414809cSSrivatsa S. Bhat * since this is a core component, and is essential for the 14418414809cSSrivatsa S. Bhat * subsequent light-weight ->init() to succeed. 14428414809cSSrivatsa S. Bhat */ 14431c3d85ddSRafael J. Wysocki if (cpufreq_driver->exit) 14443a3e9e06SViresh Kumar cpufreq_driver->exit(policy); 14451da177e4SLinus Torvalds } 14461da177e4SLinus Torvalds 1447cedb70afSSrivatsa S. Bhat /** 144827a862e9SViresh Kumar * cpufreq_remove_dev - remove a CPU device 1449cedb70afSSrivatsa S. Bhat * 1450cedb70afSSrivatsa S. Bhat * Removes the cpufreq interface for a CPU device. 1451cedb70afSSrivatsa S. Bhat */ 14528a25a2fdSKay Sievers static int cpufreq_remove_dev(struct device *dev, struct subsys_interface *sif) 14535a01f2e8SVenkatesh Pallipadi { 14548a25a2fdSKay Sievers unsigned int cpu = dev->id; 145587549141SViresh Kumar struct cpufreq_policy *policy = per_cpu(cpufreq_cpu_data, cpu); 145687549141SViresh Kumar 145787549141SViresh Kumar if (!policy) 1458ec28297aSVenki Pallipadi return 0; 1459ec28297aSVenki Pallipadi 1460559ed407SRafael J. Wysocki if (cpu_online(cpu)) { 146115c0b4d2SRafael J. Wysocki cpufreq_offline_prepare(cpu); 146215c0b4d2SRafael J. Wysocki cpufreq_offline_finish(cpu); 146387549141SViresh Kumar } 146487549141SViresh Kumar 1465559ed407SRafael J. Wysocki cpumask_clear_cpu(cpu, policy->real_cpus); 1466559ed407SRafael J. Wysocki 1467559ed407SRafael J. Wysocki if (cpumask_empty(policy->real_cpus)) { 14683654c5ccSViresh Kumar cpufreq_policy_free(policy, true); 146987549141SViresh Kumar return 0; 147087549141SViresh Kumar } 147187549141SViresh Kumar 1472559ed407SRafael J. Wysocki if (cpu != policy->kobj_cpu) { 1473559ed407SRafael J. Wysocki remove_cpu_dev_symlink(policy, cpu); 1474559ed407SRafael J. Wysocki } else { 1475559ed407SRafael J. Wysocki /* 1476559ed407SRafael J. Wysocki * The CPU owning the policy object is going away. Move it to 1477559ed407SRafael J. Wysocki * another suitable CPU. 1478559ed407SRafael J. Wysocki */ 1479559ed407SRafael J. Wysocki unsigned int new_cpu = cpumask_first(policy->real_cpus); 1480559ed407SRafael J. Wysocki struct device *new_dev = get_cpu_device(new_cpu); 148127a862e9SViresh Kumar 1482559ed407SRafael J. Wysocki dev_dbg(dev, "%s: Moving policy object to CPU%u\n", __func__, new_cpu); 148327a862e9SViresh Kumar 1484559ed407SRafael J. Wysocki sysfs_remove_link(&new_dev->kobj, "cpufreq"); 1485559ed407SRafael J. Wysocki policy->kobj_cpu = new_cpu; 1486559ed407SRafael J. Wysocki WARN_ON(kobject_move(&policy->kobj, &new_dev->kobj)); 1487559ed407SRafael J. Wysocki } 1488559ed407SRafael J. Wysocki 1489559ed407SRafael J. Wysocki return 0; 14905a01f2e8SVenkatesh Pallipadi } 14915a01f2e8SVenkatesh Pallipadi 149265f27f38SDavid Howells static void handle_update(struct work_struct *work) 14931da177e4SLinus Torvalds { 149465f27f38SDavid Howells struct cpufreq_policy *policy = 149565f27f38SDavid Howells container_of(work, struct cpufreq_policy, update); 149665f27f38SDavid Howells unsigned int cpu = policy->cpu; 14972d06d8c4SDominik Brodowski pr_debug("handle_update for cpu %u called\n", cpu); 14981da177e4SLinus Torvalds cpufreq_update_policy(cpu); 14991da177e4SLinus Torvalds } 15001da177e4SLinus Torvalds 15011da177e4SLinus Torvalds /** 1502bb176f7dSViresh Kumar * cpufreq_out_of_sync - If actual and saved CPU frequency differs, we're 1503bb176f7dSViresh Kumar * in deep trouble. 1504a1e1dc41SViresh Kumar * @policy: policy managing CPUs 15051da177e4SLinus Torvalds * @new_freq: CPU frequency the CPU actually runs at 15061da177e4SLinus Torvalds * 150729464f28SDave Jones * We adjust to current frequency first, and need to clean up later. 150829464f28SDave Jones * So either call to cpufreq_update_policy() or schedule handle_update()). 15091da177e4SLinus Torvalds */ 1510a1e1dc41SViresh Kumar static void cpufreq_out_of_sync(struct cpufreq_policy *policy, 1511e08f5f5bSGautham R Shenoy unsigned int new_freq) 15121da177e4SLinus Torvalds { 15131da177e4SLinus Torvalds struct cpufreq_freqs freqs; 1514b43a7ffbSViresh Kumar 1515e837f9b5SJoe Perches pr_debug("Warning: CPU frequency out of sync: cpufreq and timing core thinks of %u, is %u kHz\n", 1516a1e1dc41SViresh Kumar policy->cur, new_freq); 15171da177e4SLinus Torvalds 1518a1e1dc41SViresh Kumar freqs.old = policy->cur; 15191da177e4SLinus Torvalds freqs.new = new_freq; 1520b43a7ffbSViresh Kumar 15218fec051eSViresh Kumar cpufreq_freq_transition_begin(policy, &freqs); 15228fec051eSViresh Kumar cpufreq_freq_transition_end(policy, &freqs, 0); 15231da177e4SLinus Torvalds } 15241da177e4SLinus Torvalds 15251da177e4SLinus Torvalds /** 15264ab70df4SDhaval Giani * cpufreq_quick_get - get the CPU frequency (in kHz) from policy->cur 152795235ca2SVenkatesh Pallipadi * @cpu: CPU number 152895235ca2SVenkatesh Pallipadi * 152995235ca2SVenkatesh Pallipadi * This is the last known freq, without actually getting it from the driver. 153095235ca2SVenkatesh Pallipadi * Return value will be same as what is shown in scaling_cur_freq in sysfs. 153195235ca2SVenkatesh Pallipadi */ 153295235ca2SVenkatesh Pallipadi unsigned int cpufreq_quick_get(unsigned int cpu) 153395235ca2SVenkatesh Pallipadi { 15349e21ba8bSDirk Brandewie struct cpufreq_policy *policy; 1535e08f5f5bSGautham R Shenoy unsigned int ret_freq = 0; 153695235ca2SVenkatesh Pallipadi 15371c3d85ddSRafael J. Wysocki if (cpufreq_driver && cpufreq_driver->setpolicy && cpufreq_driver->get) 15381c3d85ddSRafael J. Wysocki return cpufreq_driver->get(cpu); 15399e21ba8bSDirk Brandewie 15409e21ba8bSDirk Brandewie policy = cpufreq_cpu_get(cpu); 154195235ca2SVenkatesh Pallipadi if (policy) { 1542e08f5f5bSGautham R Shenoy ret_freq = policy->cur; 154395235ca2SVenkatesh Pallipadi cpufreq_cpu_put(policy); 154495235ca2SVenkatesh Pallipadi } 154595235ca2SVenkatesh Pallipadi 15464d34a67dSDave Jones return ret_freq; 154795235ca2SVenkatesh Pallipadi } 154895235ca2SVenkatesh Pallipadi EXPORT_SYMBOL(cpufreq_quick_get); 154995235ca2SVenkatesh Pallipadi 15503d737108SJesse Barnes /** 15513d737108SJesse Barnes * cpufreq_quick_get_max - get the max reported CPU frequency for this CPU 15523d737108SJesse Barnes * @cpu: CPU number 15533d737108SJesse Barnes * 15543d737108SJesse Barnes * Just return the max possible frequency for a given CPU. 15553d737108SJesse Barnes */ 15563d737108SJesse Barnes unsigned int cpufreq_quick_get_max(unsigned int cpu) 15573d737108SJesse Barnes { 15583d737108SJesse Barnes struct cpufreq_policy *policy = cpufreq_cpu_get(cpu); 15593d737108SJesse Barnes unsigned int ret_freq = 0; 15603d737108SJesse Barnes 15613d737108SJesse Barnes if (policy) { 15623d737108SJesse Barnes ret_freq = policy->max; 15633d737108SJesse Barnes cpufreq_cpu_put(policy); 15643d737108SJesse Barnes } 15653d737108SJesse Barnes 15663d737108SJesse Barnes return ret_freq; 15673d737108SJesse Barnes } 15683d737108SJesse Barnes EXPORT_SYMBOL(cpufreq_quick_get_max); 15693d737108SJesse Barnes 1570d92d50a4SViresh Kumar static unsigned int __cpufreq_get(struct cpufreq_policy *policy) 15711da177e4SLinus Torvalds { 1572e08f5f5bSGautham R Shenoy unsigned int ret_freq = 0; 15731da177e4SLinus Torvalds 15741c3d85ddSRafael J. Wysocki if (!cpufreq_driver->get) 15754d34a67dSDave Jones return ret_freq; 15761da177e4SLinus Torvalds 1577d92d50a4SViresh Kumar ret_freq = cpufreq_driver->get(policy->cpu); 15781da177e4SLinus Torvalds 157911e584cfSViresh Kumar /* Updating inactive policies is invalid, so avoid doing that. */ 158011e584cfSViresh Kumar if (unlikely(policy_is_inactive(policy))) 158111e584cfSViresh Kumar return ret_freq; 158211e584cfSViresh Kumar 1583e08f5f5bSGautham R Shenoy if (ret_freq && policy->cur && 15841c3d85ddSRafael J. Wysocki !(cpufreq_driver->flags & CPUFREQ_CONST_LOOPS)) { 1585e08f5f5bSGautham R Shenoy /* verify no discrepancy between actual and 1586e08f5f5bSGautham R Shenoy saved value exists */ 1587e08f5f5bSGautham R Shenoy if (unlikely(ret_freq != policy->cur)) { 1588a1e1dc41SViresh Kumar cpufreq_out_of_sync(policy, ret_freq); 15891da177e4SLinus Torvalds schedule_work(&policy->update); 15901da177e4SLinus Torvalds } 15911da177e4SLinus Torvalds } 15921da177e4SLinus Torvalds 15934d34a67dSDave Jones return ret_freq; 15945a01f2e8SVenkatesh Pallipadi } 15951da177e4SLinus Torvalds 15965a01f2e8SVenkatesh Pallipadi /** 15975a01f2e8SVenkatesh Pallipadi * cpufreq_get - get the current CPU frequency (in kHz) 15985a01f2e8SVenkatesh Pallipadi * @cpu: CPU number 15995a01f2e8SVenkatesh Pallipadi * 16005a01f2e8SVenkatesh Pallipadi * Get the CPU current (static) CPU frequency 16015a01f2e8SVenkatesh Pallipadi */ 16025a01f2e8SVenkatesh Pallipadi unsigned int cpufreq_get(unsigned int cpu) 16035a01f2e8SVenkatesh Pallipadi { 1604999976e0SAaron Plattner struct cpufreq_policy *policy = cpufreq_cpu_get(cpu); 16055a01f2e8SVenkatesh Pallipadi unsigned int ret_freq = 0; 16065a01f2e8SVenkatesh Pallipadi 1607999976e0SAaron Plattner if (policy) { 1608ad7722daSviresh kumar down_read(&policy->rwsem); 1609d92d50a4SViresh Kumar ret_freq = __cpufreq_get(policy); 1610ad7722daSviresh kumar up_read(&policy->rwsem); 1611999976e0SAaron Plattner 1612999976e0SAaron Plattner cpufreq_cpu_put(policy); 1613999976e0SAaron Plattner } 16146eed9404SViresh Kumar 16154d34a67dSDave Jones return ret_freq; 16161da177e4SLinus Torvalds } 16171da177e4SLinus Torvalds EXPORT_SYMBOL(cpufreq_get); 16181da177e4SLinus Torvalds 16198a25a2fdSKay Sievers static struct subsys_interface cpufreq_interface = { 16208a25a2fdSKay Sievers .name = "cpufreq", 16218a25a2fdSKay Sievers .subsys = &cpu_subsys, 16228a25a2fdSKay Sievers .add_dev = cpufreq_add_dev, 16238a25a2fdSKay Sievers .remove_dev = cpufreq_remove_dev, 1624e00e56dfSRafael J. Wysocki }; 1625e00e56dfSRafael J. Wysocki 1626e28867eaSViresh Kumar /* 1627e28867eaSViresh Kumar * In case platform wants some specific frequency to be configured 1628e28867eaSViresh Kumar * during suspend.. 162942d4dc3fSBenjamin Herrenschmidt */ 1630e28867eaSViresh Kumar int cpufreq_generic_suspend(struct cpufreq_policy *policy) 163142d4dc3fSBenjamin Herrenschmidt { 1632e28867eaSViresh Kumar int ret; 16334bc5d341SDave Jones 1634e28867eaSViresh Kumar if (!policy->suspend_freq) { 1635e28867eaSViresh Kumar pr_err("%s: suspend_freq can't be zero\n", __func__); 1636e28867eaSViresh Kumar return -EINVAL; 163742d4dc3fSBenjamin Herrenschmidt } 163842d4dc3fSBenjamin Herrenschmidt 1639e28867eaSViresh Kumar pr_debug("%s: Setting suspend-freq: %u\n", __func__, 1640e28867eaSViresh Kumar policy->suspend_freq); 1641e28867eaSViresh Kumar 1642e28867eaSViresh Kumar ret = __cpufreq_driver_target(policy, policy->suspend_freq, 1643e28867eaSViresh Kumar CPUFREQ_RELATION_H); 1644e28867eaSViresh Kumar if (ret) 1645e28867eaSViresh Kumar pr_err("%s: unable to set suspend-freq: %u. err: %d\n", 1646e28867eaSViresh Kumar __func__, policy->suspend_freq, ret); 1647e28867eaSViresh Kumar 1648c9060494SDave Jones return ret; 164942d4dc3fSBenjamin Herrenschmidt } 1650e28867eaSViresh Kumar EXPORT_SYMBOL(cpufreq_generic_suspend); 165142d4dc3fSBenjamin Herrenschmidt 165242d4dc3fSBenjamin Herrenschmidt /** 16532f0aea93SViresh Kumar * cpufreq_suspend() - Suspend CPUFreq governors 16541da177e4SLinus Torvalds * 16552f0aea93SViresh Kumar * Called during system wide Suspend/Hibernate cycles for suspending governors 16562f0aea93SViresh Kumar * as some platforms can't change frequency after this point in suspend cycle. 16572f0aea93SViresh Kumar * Because some of the devices (like: i2c, regulators, etc) they use for 16582f0aea93SViresh Kumar * changing frequency are suspended quickly after this point. 16591da177e4SLinus Torvalds */ 16602f0aea93SViresh Kumar void cpufreq_suspend(void) 16611da177e4SLinus Torvalds { 16623a3e9e06SViresh Kumar struct cpufreq_policy *policy; 16631da177e4SLinus Torvalds 16642f0aea93SViresh Kumar if (!cpufreq_driver) 1665e00e56dfSRafael J. Wysocki return; 16661da177e4SLinus Torvalds 16672f0aea93SViresh Kumar if (!has_target()) 1668b1b12babSViresh Kumar goto suspend; 16691da177e4SLinus Torvalds 16702f0aea93SViresh Kumar pr_debug("%s: Suspending Governors\n", __func__); 16712f0aea93SViresh Kumar 1672f963735aSViresh Kumar for_each_active_policy(policy) { 16732f0aea93SViresh Kumar if (__cpufreq_governor(policy, CPUFREQ_GOV_STOP)) 16742f0aea93SViresh Kumar pr_err("%s: Failed to stop governor for policy: %p\n", 16752f0aea93SViresh Kumar __func__, policy); 16762f0aea93SViresh Kumar else if (cpufreq_driver->suspend 16772f0aea93SViresh Kumar && cpufreq_driver->suspend(policy)) 16782f0aea93SViresh Kumar pr_err("%s: Failed to suspend driver: %p\n", __func__, 16792f0aea93SViresh Kumar policy); 16801da177e4SLinus Torvalds } 1681b1b12babSViresh Kumar 1682b1b12babSViresh Kumar suspend: 1683b1b12babSViresh Kumar cpufreq_suspended = true; 16841da177e4SLinus Torvalds } 16851da177e4SLinus Torvalds 16861da177e4SLinus Torvalds /** 16872f0aea93SViresh Kumar * cpufreq_resume() - Resume CPUFreq governors 16881da177e4SLinus Torvalds * 16892f0aea93SViresh Kumar * Called during system wide Suspend/Hibernate cycle for resuming governors that 16902f0aea93SViresh Kumar * are suspended with cpufreq_suspend(). 16911da177e4SLinus Torvalds */ 16922f0aea93SViresh Kumar void cpufreq_resume(void) 16931da177e4SLinus Torvalds { 16941da177e4SLinus Torvalds struct cpufreq_policy *policy; 16951da177e4SLinus Torvalds 16962f0aea93SViresh Kumar if (!cpufreq_driver) 16971da177e4SLinus Torvalds return; 16981da177e4SLinus Torvalds 16998e30444eSLan Tianyu cpufreq_suspended = false; 17008e30444eSLan Tianyu 17012f0aea93SViresh Kumar if (!has_target()) 17022f0aea93SViresh Kumar return; 17031da177e4SLinus Torvalds 17042f0aea93SViresh Kumar pr_debug("%s: Resuming Governors\n", __func__); 17052f0aea93SViresh Kumar 1706f963735aSViresh Kumar for_each_active_policy(policy) { 17070c5aa405SViresh Kumar if (cpufreq_driver->resume && cpufreq_driver->resume(policy)) 17080c5aa405SViresh Kumar pr_err("%s: Failed to resume driver: %p\n", __func__, 17090c5aa405SViresh Kumar policy); 17100c5aa405SViresh Kumar else if (__cpufreq_governor(policy, CPUFREQ_GOV_START) 17112f0aea93SViresh Kumar || __cpufreq_governor(policy, CPUFREQ_GOV_LIMITS)) 17122f0aea93SViresh Kumar pr_err("%s: Failed to start governor for policy: %p\n", 17132f0aea93SViresh Kumar __func__, policy); 1714c75de0acSViresh Kumar } 17152f0aea93SViresh Kumar 17162f0aea93SViresh Kumar /* 1717c75de0acSViresh Kumar * schedule call cpufreq_update_policy() for first-online CPU, as that 1718c75de0acSViresh Kumar * wouldn't be hotplugged-out on suspend. It will verify that the 1719c75de0acSViresh Kumar * current freq is in sync with what we believe it to be. 17202f0aea93SViresh Kumar */ 1721c75de0acSViresh Kumar policy = cpufreq_cpu_get_raw(cpumask_first(cpu_online_mask)); 1722c75de0acSViresh Kumar if (WARN_ON(!policy)) 1723c75de0acSViresh Kumar return; 1724c75de0acSViresh Kumar 17253a3e9e06SViresh Kumar schedule_work(&policy->update); 17261da177e4SLinus Torvalds } 17271da177e4SLinus Torvalds 17289d95046eSBorislav Petkov /** 17299d95046eSBorislav Petkov * cpufreq_get_current_driver - return current driver's name 17309d95046eSBorislav Petkov * 17319d95046eSBorislav Petkov * Return the name string of the currently loaded cpufreq driver 17329d95046eSBorislav Petkov * or NULL, if none. 17339d95046eSBorislav Petkov */ 17349d95046eSBorislav Petkov const char *cpufreq_get_current_driver(void) 17359d95046eSBorislav Petkov { 17361c3d85ddSRafael J. Wysocki if (cpufreq_driver) 17371c3d85ddSRafael J. Wysocki return cpufreq_driver->name; 17381c3d85ddSRafael J. Wysocki 17391c3d85ddSRafael J. Wysocki return NULL; 17409d95046eSBorislav Petkov } 17419d95046eSBorislav Petkov EXPORT_SYMBOL_GPL(cpufreq_get_current_driver); 17421da177e4SLinus Torvalds 174351315cdfSThomas Petazzoni /** 174451315cdfSThomas Petazzoni * cpufreq_get_driver_data - return current driver data 174551315cdfSThomas Petazzoni * 174651315cdfSThomas Petazzoni * Return the private data of the currently loaded cpufreq 174751315cdfSThomas Petazzoni * driver, or NULL if no cpufreq driver is loaded. 174851315cdfSThomas Petazzoni */ 174951315cdfSThomas Petazzoni void *cpufreq_get_driver_data(void) 175051315cdfSThomas Petazzoni { 175151315cdfSThomas Petazzoni if (cpufreq_driver) 175251315cdfSThomas Petazzoni return cpufreq_driver->driver_data; 175351315cdfSThomas Petazzoni 175451315cdfSThomas Petazzoni return NULL; 175551315cdfSThomas Petazzoni } 175651315cdfSThomas Petazzoni EXPORT_SYMBOL_GPL(cpufreq_get_driver_data); 175751315cdfSThomas Petazzoni 17581da177e4SLinus Torvalds /********************************************************************* 17591da177e4SLinus Torvalds * NOTIFIER LISTS INTERFACE * 17601da177e4SLinus Torvalds *********************************************************************/ 17611da177e4SLinus Torvalds 17621da177e4SLinus Torvalds /** 17631da177e4SLinus Torvalds * cpufreq_register_notifier - register a driver with cpufreq 17641da177e4SLinus Torvalds * @nb: notifier function to register 17651da177e4SLinus Torvalds * @list: CPUFREQ_TRANSITION_NOTIFIER or CPUFREQ_POLICY_NOTIFIER 17661da177e4SLinus Torvalds * 17671da177e4SLinus Torvalds * Add a driver to one of two lists: either a list of drivers that 17681da177e4SLinus Torvalds * are notified about clock rate changes (once before and once after 17691da177e4SLinus Torvalds * the transition), or a list of drivers that are notified about 17701da177e4SLinus Torvalds * changes in cpufreq policy. 17711da177e4SLinus Torvalds * 17721da177e4SLinus Torvalds * This function may sleep, and has the same return conditions as 1773e041c683SAlan Stern * blocking_notifier_chain_register. 17741da177e4SLinus Torvalds */ 17751da177e4SLinus Torvalds int cpufreq_register_notifier(struct notifier_block *nb, unsigned int list) 17761da177e4SLinus Torvalds { 17771da177e4SLinus Torvalds int ret; 17781da177e4SLinus Torvalds 1779d5aaffa9SDirk Brandewie if (cpufreq_disabled()) 1780d5aaffa9SDirk Brandewie return -EINVAL; 1781d5aaffa9SDirk Brandewie 178274212ca4SCesar Eduardo Barros WARN_ON(!init_cpufreq_transition_notifier_list_called); 178374212ca4SCesar Eduardo Barros 17841da177e4SLinus Torvalds switch (list) { 17851da177e4SLinus Torvalds case CPUFREQ_TRANSITION_NOTIFIER: 1786b4dfdbb3SAlan Stern ret = srcu_notifier_chain_register( 1787e041c683SAlan Stern &cpufreq_transition_notifier_list, nb); 17881da177e4SLinus Torvalds break; 17891da177e4SLinus Torvalds case CPUFREQ_POLICY_NOTIFIER: 1790e041c683SAlan Stern ret = blocking_notifier_chain_register( 1791e041c683SAlan Stern &cpufreq_policy_notifier_list, nb); 17921da177e4SLinus Torvalds break; 17931da177e4SLinus Torvalds default: 17941da177e4SLinus Torvalds ret = -EINVAL; 17951da177e4SLinus Torvalds } 17961da177e4SLinus Torvalds 17971da177e4SLinus Torvalds return ret; 17981da177e4SLinus Torvalds } 17991da177e4SLinus Torvalds EXPORT_SYMBOL(cpufreq_register_notifier); 18001da177e4SLinus Torvalds 18011da177e4SLinus Torvalds /** 18021da177e4SLinus Torvalds * cpufreq_unregister_notifier - unregister a driver with cpufreq 18031da177e4SLinus Torvalds * @nb: notifier block to be unregistered 18041da177e4SLinus Torvalds * @list: CPUFREQ_TRANSITION_NOTIFIER or CPUFREQ_POLICY_NOTIFIER 18051da177e4SLinus Torvalds * 18061da177e4SLinus Torvalds * Remove a driver from the CPU frequency notifier list. 18071da177e4SLinus Torvalds * 18081da177e4SLinus Torvalds * This function may sleep, and has the same return conditions as 1809e041c683SAlan Stern * blocking_notifier_chain_unregister. 18101da177e4SLinus Torvalds */ 18111da177e4SLinus Torvalds int cpufreq_unregister_notifier(struct notifier_block *nb, unsigned int list) 18121da177e4SLinus Torvalds { 18131da177e4SLinus Torvalds int ret; 18141da177e4SLinus Torvalds 1815d5aaffa9SDirk Brandewie if (cpufreq_disabled()) 1816d5aaffa9SDirk Brandewie return -EINVAL; 1817d5aaffa9SDirk Brandewie 18181da177e4SLinus Torvalds switch (list) { 18191da177e4SLinus Torvalds case CPUFREQ_TRANSITION_NOTIFIER: 1820b4dfdbb3SAlan Stern ret = srcu_notifier_chain_unregister( 1821e041c683SAlan Stern &cpufreq_transition_notifier_list, nb); 18221da177e4SLinus Torvalds break; 18231da177e4SLinus Torvalds case CPUFREQ_POLICY_NOTIFIER: 1824e041c683SAlan Stern ret = blocking_notifier_chain_unregister( 1825e041c683SAlan Stern &cpufreq_policy_notifier_list, nb); 18261da177e4SLinus Torvalds break; 18271da177e4SLinus Torvalds default: 18281da177e4SLinus Torvalds ret = -EINVAL; 18291da177e4SLinus Torvalds } 18301da177e4SLinus Torvalds 18311da177e4SLinus Torvalds return ret; 18321da177e4SLinus Torvalds } 18331da177e4SLinus Torvalds EXPORT_SYMBOL(cpufreq_unregister_notifier); 18341da177e4SLinus Torvalds 18351da177e4SLinus Torvalds 18361da177e4SLinus Torvalds /********************************************************************* 18371da177e4SLinus Torvalds * GOVERNORS * 18381da177e4SLinus Torvalds *********************************************************************/ 18391da177e4SLinus Torvalds 18401c03a2d0SViresh Kumar /* Must set freqs->new to intermediate frequency */ 18411c03a2d0SViresh Kumar static int __target_intermediate(struct cpufreq_policy *policy, 18421c03a2d0SViresh Kumar struct cpufreq_freqs *freqs, int index) 18431c03a2d0SViresh Kumar { 18441c03a2d0SViresh Kumar int ret; 18451c03a2d0SViresh Kumar 18461c03a2d0SViresh Kumar freqs->new = cpufreq_driver->get_intermediate(policy, index); 18471c03a2d0SViresh Kumar 18481c03a2d0SViresh Kumar /* We don't need to switch to intermediate freq */ 18491c03a2d0SViresh Kumar if (!freqs->new) 18501c03a2d0SViresh Kumar return 0; 18511c03a2d0SViresh Kumar 18521c03a2d0SViresh Kumar pr_debug("%s: cpu: %d, switching to intermediate freq: oldfreq: %u, intermediate freq: %u\n", 18531c03a2d0SViresh Kumar __func__, policy->cpu, freqs->old, freqs->new); 18541c03a2d0SViresh Kumar 18551c03a2d0SViresh Kumar cpufreq_freq_transition_begin(policy, freqs); 18561c03a2d0SViresh Kumar ret = cpufreq_driver->target_intermediate(policy, index); 18571c03a2d0SViresh Kumar cpufreq_freq_transition_end(policy, freqs, ret); 18581c03a2d0SViresh Kumar 18591c03a2d0SViresh Kumar if (ret) 18601c03a2d0SViresh Kumar pr_err("%s: Failed to change to intermediate frequency: %d\n", 18611c03a2d0SViresh Kumar __func__, ret); 18621c03a2d0SViresh Kumar 18631c03a2d0SViresh Kumar return ret; 18641c03a2d0SViresh Kumar } 18651c03a2d0SViresh Kumar 18668d65775dSViresh Kumar static int __target_index(struct cpufreq_policy *policy, 18678d65775dSViresh Kumar struct cpufreq_frequency_table *freq_table, int index) 18688d65775dSViresh Kumar { 18691c03a2d0SViresh Kumar struct cpufreq_freqs freqs = {.old = policy->cur, .flags = 0}; 18701c03a2d0SViresh Kumar unsigned int intermediate_freq = 0; 18718d65775dSViresh Kumar int retval = -EINVAL; 18728d65775dSViresh Kumar bool notify; 18738d65775dSViresh Kumar 18748d65775dSViresh Kumar notify = !(cpufreq_driver->flags & CPUFREQ_ASYNC_NOTIFICATION); 18758d65775dSViresh Kumar if (notify) { 18761c03a2d0SViresh Kumar /* Handle switching to intermediate frequency */ 18771c03a2d0SViresh Kumar if (cpufreq_driver->get_intermediate) { 18781c03a2d0SViresh Kumar retval = __target_intermediate(policy, &freqs, index); 18791c03a2d0SViresh Kumar if (retval) 18801c03a2d0SViresh Kumar return retval; 18818d65775dSViresh Kumar 18821c03a2d0SViresh Kumar intermediate_freq = freqs.new; 18831c03a2d0SViresh Kumar /* Set old freq to intermediate */ 18841c03a2d0SViresh Kumar if (intermediate_freq) 18851c03a2d0SViresh Kumar freqs.old = freqs.new; 18861c03a2d0SViresh Kumar } 18871c03a2d0SViresh Kumar 18881c03a2d0SViresh Kumar freqs.new = freq_table[index].frequency; 18898d65775dSViresh Kumar pr_debug("%s: cpu: %d, oldfreq: %u, new freq: %u\n", 18908d65775dSViresh Kumar __func__, policy->cpu, freqs.old, freqs.new); 18918d65775dSViresh Kumar 18928d65775dSViresh Kumar cpufreq_freq_transition_begin(policy, &freqs); 18938d65775dSViresh Kumar } 18948d65775dSViresh Kumar 18958d65775dSViresh Kumar retval = cpufreq_driver->target_index(policy, index); 18968d65775dSViresh Kumar if (retval) 18978d65775dSViresh Kumar pr_err("%s: Failed to change cpu frequency: %d\n", __func__, 18988d65775dSViresh Kumar retval); 18998d65775dSViresh Kumar 19001c03a2d0SViresh Kumar if (notify) { 19018d65775dSViresh Kumar cpufreq_freq_transition_end(policy, &freqs, retval); 19028d65775dSViresh Kumar 19031c03a2d0SViresh Kumar /* 19041c03a2d0SViresh Kumar * Failed after setting to intermediate freq? Driver should have 19051c03a2d0SViresh Kumar * reverted back to initial frequency and so should we. Check 19061c03a2d0SViresh Kumar * here for intermediate_freq instead of get_intermediate, in 190758405af6SShailendra Verma * case we haven't switched to intermediate freq at all. 19081c03a2d0SViresh Kumar */ 19091c03a2d0SViresh Kumar if (unlikely(retval && intermediate_freq)) { 19101c03a2d0SViresh Kumar freqs.old = intermediate_freq; 19111c03a2d0SViresh Kumar freqs.new = policy->restore_freq; 19121c03a2d0SViresh Kumar cpufreq_freq_transition_begin(policy, &freqs); 19131c03a2d0SViresh Kumar cpufreq_freq_transition_end(policy, &freqs, 0); 19141c03a2d0SViresh Kumar } 19151c03a2d0SViresh Kumar } 19161c03a2d0SViresh Kumar 19178d65775dSViresh Kumar return retval; 19188d65775dSViresh Kumar } 19198d65775dSViresh Kumar 19201da177e4SLinus Torvalds int __cpufreq_driver_target(struct cpufreq_policy *policy, 19211da177e4SLinus Torvalds unsigned int target_freq, 19221da177e4SLinus Torvalds unsigned int relation) 19231da177e4SLinus Torvalds { 19247249924eSViresh Kumar unsigned int old_target_freq = target_freq; 19258d65775dSViresh Kumar int retval = -EINVAL; 1926c32b6b8eSAshok Raj 1927a7b422cdSKonrad Rzeszutek Wilk if (cpufreq_disabled()) 1928a7b422cdSKonrad Rzeszutek Wilk return -ENODEV; 1929a7b422cdSKonrad Rzeszutek Wilk 19307249924eSViresh Kumar /* Make sure that target_freq is within supported range */ 19317249924eSViresh Kumar if (target_freq > policy->max) 19327249924eSViresh Kumar target_freq = policy->max; 19337249924eSViresh Kumar if (target_freq < policy->min) 19347249924eSViresh Kumar target_freq = policy->min; 19357249924eSViresh Kumar 19367249924eSViresh Kumar pr_debug("target for CPU %u: %u kHz, relation %u, requested %u kHz\n", 19377249924eSViresh Kumar policy->cpu, target_freq, relation, old_target_freq); 19385a1c0228SViresh Kumar 19399c0ebcf7SViresh Kumar /* 19409c0ebcf7SViresh Kumar * This might look like a redundant call as we are checking it again 19419c0ebcf7SViresh Kumar * after finding index. But it is left intentionally for cases where 19429c0ebcf7SViresh Kumar * exactly same freq is called again and so we can save on few function 19439c0ebcf7SViresh Kumar * calls. 19449c0ebcf7SViresh Kumar */ 19455a1c0228SViresh Kumar if (target_freq == policy->cur) 19465a1c0228SViresh Kumar return 0; 19475a1c0228SViresh Kumar 19481c03a2d0SViresh Kumar /* Save last value to restore later on errors */ 19491c03a2d0SViresh Kumar policy->restore_freq = policy->cur; 19501c03a2d0SViresh Kumar 19511c3d85ddSRafael J. Wysocki if (cpufreq_driver->target) 19521c3d85ddSRafael J. Wysocki retval = cpufreq_driver->target(policy, target_freq, relation); 19539c0ebcf7SViresh Kumar else if (cpufreq_driver->target_index) { 19549c0ebcf7SViresh Kumar struct cpufreq_frequency_table *freq_table; 19559c0ebcf7SViresh Kumar int index; 195690d45d17SAshok Raj 19579c0ebcf7SViresh Kumar freq_table = cpufreq_frequency_get_table(policy->cpu); 19589c0ebcf7SViresh Kumar if (unlikely(!freq_table)) { 19599c0ebcf7SViresh Kumar pr_err("%s: Unable to find freq_table\n", __func__); 19609c0ebcf7SViresh Kumar goto out; 19619c0ebcf7SViresh Kumar } 19629c0ebcf7SViresh Kumar 19639c0ebcf7SViresh Kumar retval = cpufreq_frequency_table_target(policy, freq_table, 19649c0ebcf7SViresh Kumar target_freq, relation, &index); 19659c0ebcf7SViresh Kumar if (unlikely(retval)) { 19669c0ebcf7SViresh Kumar pr_err("%s: Unable to find matching freq\n", __func__); 19679c0ebcf7SViresh Kumar goto out; 19689c0ebcf7SViresh Kumar } 19699c0ebcf7SViresh Kumar 1970d4019f0aSViresh Kumar if (freq_table[index].frequency == policy->cur) { 19719c0ebcf7SViresh Kumar retval = 0; 1972d4019f0aSViresh Kumar goto out; 1973d4019f0aSViresh Kumar } 1974d4019f0aSViresh Kumar 19758d65775dSViresh Kumar retval = __target_index(policy, freq_table, index); 19769c0ebcf7SViresh Kumar } 19779c0ebcf7SViresh Kumar 19789c0ebcf7SViresh Kumar out: 19791da177e4SLinus Torvalds return retval; 19801da177e4SLinus Torvalds } 19811da177e4SLinus Torvalds EXPORT_SYMBOL_GPL(__cpufreq_driver_target); 19821da177e4SLinus Torvalds 19831da177e4SLinus Torvalds int cpufreq_driver_target(struct cpufreq_policy *policy, 19841da177e4SLinus Torvalds unsigned int target_freq, 19851da177e4SLinus Torvalds unsigned int relation) 19861da177e4SLinus Torvalds { 1987f1829e4aSJulia Lawall int ret = -EINVAL; 19881da177e4SLinus Torvalds 1989ad7722daSviresh kumar down_write(&policy->rwsem); 19901da177e4SLinus Torvalds 19911da177e4SLinus Torvalds ret = __cpufreq_driver_target(policy, target_freq, relation); 19921da177e4SLinus Torvalds 1993ad7722daSviresh kumar up_write(&policy->rwsem); 19941da177e4SLinus Torvalds 19951da177e4SLinus Torvalds return ret; 19961da177e4SLinus Torvalds } 19971da177e4SLinus Torvalds EXPORT_SYMBOL_GPL(cpufreq_driver_target); 19981da177e4SLinus Torvalds 1999e08f5f5bSGautham R Shenoy static int __cpufreq_governor(struct cpufreq_policy *policy, 2000e08f5f5bSGautham R Shenoy unsigned int event) 20011da177e4SLinus Torvalds { 2002cc993cabSDave Jones int ret; 20036afde10cSThomas Renninger 20046afde10cSThomas Renninger /* Only must be defined when default governor is known to have latency 20056afde10cSThomas Renninger restrictions, like e.g. conservative or ondemand. 20066afde10cSThomas Renninger That this is the case is already ensured in Kconfig 20076afde10cSThomas Renninger */ 20086afde10cSThomas Renninger #ifdef CONFIG_CPU_FREQ_GOV_PERFORMANCE 20096afde10cSThomas Renninger struct cpufreq_governor *gov = &cpufreq_gov_performance; 20106afde10cSThomas Renninger #else 20116afde10cSThomas Renninger struct cpufreq_governor *gov = NULL; 20126afde10cSThomas Renninger #endif 20131c256245SThomas Renninger 20142f0aea93SViresh Kumar /* Don't start any governor operations if we are entering suspend */ 20152f0aea93SViresh Kumar if (cpufreq_suspended) 20162f0aea93SViresh Kumar return 0; 2017cb57720bSEthan Zhao /* 2018cb57720bSEthan Zhao * Governor might not be initiated here if ACPI _PPC changed 2019cb57720bSEthan Zhao * notification happened, so check it. 2020cb57720bSEthan Zhao */ 2021cb57720bSEthan Zhao if (!policy->governor) 2022cb57720bSEthan Zhao return -EINVAL; 20232f0aea93SViresh Kumar 20241c256245SThomas Renninger if (policy->governor->max_transition_latency && 20251c256245SThomas Renninger policy->cpuinfo.transition_latency > 20261c256245SThomas Renninger policy->governor->max_transition_latency) { 20276afde10cSThomas Renninger if (!gov) 20286afde10cSThomas Renninger return -EINVAL; 20296afde10cSThomas Renninger else { 2030e837f9b5SJoe Perches pr_warn("%s governor failed, too long transition latency of HW, fallback to %s governor\n", 2031e837f9b5SJoe Perches policy->governor->name, gov->name); 20321c256245SThomas Renninger policy->governor = gov; 20331c256245SThomas Renninger } 20346afde10cSThomas Renninger } 20351da177e4SLinus Torvalds 2036fe492f3fSViresh Kumar if (event == CPUFREQ_GOV_POLICY_INIT) 20371da177e4SLinus Torvalds if (!try_module_get(policy->governor->owner)) 20381da177e4SLinus Torvalds return -EINVAL; 20391da177e4SLinus Torvalds 20402d06d8c4SDominik Brodowski pr_debug("__cpufreq_governor for CPU %u, event %u\n", 2041e08f5f5bSGautham R Shenoy policy->cpu, event); 204295731ebbSXiaoguang Chen 204395731ebbSXiaoguang Chen mutex_lock(&cpufreq_governor_lock); 204456d07db2SSrivatsa S. Bhat if ((policy->governor_enabled && event == CPUFREQ_GOV_START) 2045f73d3933SViresh Kumar || (!policy->governor_enabled 2046f73d3933SViresh Kumar && (event == CPUFREQ_GOV_LIMITS || event == CPUFREQ_GOV_STOP))) { 204795731ebbSXiaoguang Chen mutex_unlock(&cpufreq_governor_lock); 204895731ebbSXiaoguang Chen return -EBUSY; 204995731ebbSXiaoguang Chen } 205095731ebbSXiaoguang Chen 205195731ebbSXiaoguang Chen if (event == CPUFREQ_GOV_STOP) 205295731ebbSXiaoguang Chen policy->governor_enabled = false; 205395731ebbSXiaoguang Chen else if (event == CPUFREQ_GOV_START) 205495731ebbSXiaoguang Chen policy->governor_enabled = true; 205595731ebbSXiaoguang Chen 205695731ebbSXiaoguang Chen mutex_unlock(&cpufreq_governor_lock); 205795731ebbSXiaoguang Chen 20581da177e4SLinus Torvalds ret = policy->governor->governor(policy, event); 20591da177e4SLinus Torvalds 20604d5dcc42SViresh Kumar if (!ret) { 20614d5dcc42SViresh Kumar if (event == CPUFREQ_GOV_POLICY_INIT) 20628e53695fSViresh Kumar policy->governor->initialized++; 20634d5dcc42SViresh Kumar else if (event == CPUFREQ_GOV_POLICY_EXIT) 20648e53695fSViresh Kumar policy->governor->initialized--; 206595731ebbSXiaoguang Chen } else { 206695731ebbSXiaoguang Chen /* Restore original values */ 206795731ebbSXiaoguang Chen mutex_lock(&cpufreq_governor_lock); 206895731ebbSXiaoguang Chen if (event == CPUFREQ_GOV_STOP) 206995731ebbSXiaoguang Chen policy->governor_enabled = true; 207095731ebbSXiaoguang Chen else if (event == CPUFREQ_GOV_START) 207195731ebbSXiaoguang Chen policy->governor_enabled = false; 207295731ebbSXiaoguang Chen mutex_unlock(&cpufreq_governor_lock); 20734d5dcc42SViresh Kumar } 2074b394058fSViresh Kumar 2075fe492f3fSViresh Kumar if (((event == CPUFREQ_GOV_POLICY_INIT) && ret) || 2076fe492f3fSViresh Kumar ((event == CPUFREQ_GOV_POLICY_EXIT) && !ret)) 20771da177e4SLinus Torvalds module_put(policy->governor->owner); 20781da177e4SLinus Torvalds 20791da177e4SLinus Torvalds return ret; 20801da177e4SLinus Torvalds } 20811da177e4SLinus Torvalds 20821da177e4SLinus Torvalds int cpufreq_register_governor(struct cpufreq_governor *governor) 20831da177e4SLinus Torvalds { 20843bcb09a3SJeremy Fitzhardinge int err; 20851da177e4SLinus Torvalds 20861da177e4SLinus Torvalds if (!governor) 20871da177e4SLinus Torvalds return -EINVAL; 20881da177e4SLinus Torvalds 2089a7b422cdSKonrad Rzeszutek Wilk if (cpufreq_disabled()) 2090a7b422cdSKonrad Rzeszutek Wilk return -ENODEV; 2091a7b422cdSKonrad Rzeszutek Wilk 20923fc54d37Sakpm@osdl.org mutex_lock(&cpufreq_governor_mutex); 20931da177e4SLinus Torvalds 2094b394058fSViresh Kumar governor->initialized = 0; 20953bcb09a3SJeremy Fitzhardinge err = -EBUSY; 209642f91fa1SViresh Kumar if (!find_governor(governor->name)) { 20973bcb09a3SJeremy Fitzhardinge err = 0; 20981da177e4SLinus Torvalds list_add(&governor->governor_list, &cpufreq_governor_list); 20993bcb09a3SJeremy Fitzhardinge } 21001da177e4SLinus Torvalds 21013fc54d37Sakpm@osdl.org mutex_unlock(&cpufreq_governor_mutex); 21023bcb09a3SJeremy Fitzhardinge return err; 21031da177e4SLinus Torvalds } 21041da177e4SLinus Torvalds EXPORT_SYMBOL_GPL(cpufreq_register_governor); 21051da177e4SLinus Torvalds 21061da177e4SLinus Torvalds void cpufreq_unregister_governor(struct cpufreq_governor *governor) 21071da177e4SLinus Torvalds { 21084573237bSViresh Kumar struct cpufreq_policy *policy; 21094573237bSViresh Kumar unsigned long flags; 211090e41bacSPrarit Bhargava 21111da177e4SLinus Torvalds if (!governor) 21121da177e4SLinus Torvalds return; 21131da177e4SLinus Torvalds 2114a7b422cdSKonrad Rzeszutek Wilk if (cpufreq_disabled()) 2115a7b422cdSKonrad Rzeszutek Wilk return; 2116a7b422cdSKonrad Rzeszutek Wilk 21174573237bSViresh Kumar /* clear last_governor for all inactive policies */ 21184573237bSViresh Kumar read_lock_irqsave(&cpufreq_driver_lock, flags); 21194573237bSViresh Kumar for_each_inactive_policy(policy) { 212018bf3a12SViresh Kumar if (!strcmp(policy->last_governor, governor->name)) { 212118bf3a12SViresh Kumar policy->governor = NULL; 21224573237bSViresh Kumar strcpy(policy->last_governor, "\0"); 212390e41bacSPrarit Bhargava } 212418bf3a12SViresh Kumar } 21254573237bSViresh Kumar read_unlock_irqrestore(&cpufreq_driver_lock, flags); 212690e41bacSPrarit Bhargava 21273fc54d37Sakpm@osdl.org mutex_lock(&cpufreq_governor_mutex); 21281da177e4SLinus Torvalds list_del(&governor->governor_list); 21293fc54d37Sakpm@osdl.org mutex_unlock(&cpufreq_governor_mutex); 21301da177e4SLinus Torvalds return; 21311da177e4SLinus Torvalds } 21321da177e4SLinus Torvalds EXPORT_SYMBOL_GPL(cpufreq_unregister_governor); 21331da177e4SLinus Torvalds 21341da177e4SLinus Torvalds 21351da177e4SLinus Torvalds /********************************************************************* 21361da177e4SLinus Torvalds * POLICY INTERFACE * 21371da177e4SLinus Torvalds *********************************************************************/ 21381da177e4SLinus Torvalds 21391da177e4SLinus Torvalds /** 21401da177e4SLinus Torvalds * cpufreq_get_policy - get the current cpufreq_policy 214129464f28SDave Jones * @policy: struct cpufreq_policy into which the current cpufreq_policy 214229464f28SDave Jones * is written 21431da177e4SLinus Torvalds * 21441da177e4SLinus Torvalds * Reads the current cpufreq policy. 21451da177e4SLinus Torvalds */ 21461da177e4SLinus Torvalds int cpufreq_get_policy(struct cpufreq_policy *policy, unsigned int cpu) 21471da177e4SLinus Torvalds { 21481da177e4SLinus Torvalds struct cpufreq_policy *cpu_policy; 21491da177e4SLinus Torvalds if (!policy) 21501da177e4SLinus Torvalds return -EINVAL; 21511da177e4SLinus Torvalds 21521da177e4SLinus Torvalds cpu_policy = cpufreq_cpu_get(cpu); 21531da177e4SLinus Torvalds if (!cpu_policy) 21541da177e4SLinus Torvalds return -EINVAL; 21551da177e4SLinus Torvalds 2156d5b73cd8SViresh Kumar memcpy(policy, cpu_policy, sizeof(*policy)); 21571da177e4SLinus Torvalds 21581da177e4SLinus Torvalds cpufreq_cpu_put(cpu_policy); 21591da177e4SLinus Torvalds return 0; 21601da177e4SLinus Torvalds } 21611da177e4SLinus Torvalds EXPORT_SYMBOL(cpufreq_get_policy); 21621da177e4SLinus Torvalds 2163153d7f3fSArjan van de Ven /* 2164037ce839SViresh Kumar * policy : current policy. 2165037ce839SViresh Kumar * new_policy: policy to be set. 2166153d7f3fSArjan van de Ven */ 2167037ce839SViresh Kumar static int cpufreq_set_policy(struct cpufreq_policy *policy, 21683a3e9e06SViresh Kumar struct cpufreq_policy *new_policy) 21691da177e4SLinus Torvalds { 2170d9a789c7SRafael J. Wysocki struct cpufreq_governor *old_gov; 2171d9a789c7SRafael J. Wysocki int ret; 21721da177e4SLinus Torvalds 2173e837f9b5SJoe Perches pr_debug("setting new policy for CPU %u: %u - %u kHz\n", 2174e837f9b5SJoe Perches new_policy->cpu, new_policy->min, new_policy->max); 21751da177e4SLinus Torvalds 2176d5b73cd8SViresh Kumar memcpy(&new_policy->cpuinfo, &policy->cpuinfo, sizeof(policy->cpuinfo)); 21771da177e4SLinus Torvalds 2178fba9573bSPan Xinhui /* 2179fba9573bSPan Xinhui * This check works well when we store new min/max freq attributes, 2180fba9573bSPan Xinhui * because new_policy is a copy of policy with one field updated. 2181fba9573bSPan Xinhui */ 2182fba9573bSPan Xinhui if (new_policy->min > new_policy->max) 2183d9a789c7SRafael J. Wysocki return -EINVAL; 21849c9a43edSMattia Dongili 21851da177e4SLinus Torvalds /* verify the cpu speed can be set within this limit */ 21863a3e9e06SViresh Kumar ret = cpufreq_driver->verify(new_policy); 21871da177e4SLinus Torvalds if (ret) 2188d9a789c7SRafael J. Wysocki return ret; 21891da177e4SLinus Torvalds 21901da177e4SLinus Torvalds /* adjust if necessary - all reasons */ 2191e041c683SAlan Stern blocking_notifier_call_chain(&cpufreq_policy_notifier_list, 21923a3e9e06SViresh Kumar CPUFREQ_ADJUST, new_policy); 21931da177e4SLinus Torvalds 2194bb176f7dSViresh Kumar /* 2195bb176f7dSViresh Kumar * verify the cpu speed can be set within this limit, which might be 2196bb176f7dSViresh Kumar * different to the first one 2197bb176f7dSViresh Kumar */ 21983a3e9e06SViresh Kumar ret = cpufreq_driver->verify(new_policy); 2199e041c683SAlan Stern if (ret) 2200d9a789c7SRafael J. Wysocki return ret; 22011da177e4SLinus Torvalds 22021da177e4SLinus Torvalds /* notification of the new policy */ 2203e041c683SAlan Stern blocking_notifier_call_chain(&cpufreq_policy_notifier_list, 22043a3e9e06SViresh Kumar CPUFREQ_NOTIFY, new_policy); 22051da177e4SLinus Torvalds 22063a3e9e06SViresh Kumar policy->min = new_policy->min; 22073a3e9e06SViresh Kumar policy->max = new_policy->max; 22081da177e4SLinus Torvalds 22092d06d8c4SDominik Brodowski pr_debug("new min and max freqs are %u - %u kHz\n", 22103a3e9e06SViresh Kumar policy->min, policy->max); 22111da177e4SLinus Torvalds 22121c3d85ddSRafael J. Wysocki if (cpufreq_driver->setpolicy) { 22133a3e9e06SViresh Kumar policy->policy = new_policy->policy; 22142d06d8c4SDominik Brodowski pr_debug("setting range\n"); 2215d9a789c7SRafael J. Wysocki return cpufreq_driver->setpolicy(new_policy); 2216d9a789c7SRafael J. Wysocki } 2217d9a789c7SRafael J. Wysocki 2218d9a789c7SRafael J. Wysocki if (new_policy->governor == policy->governor) 2219d9a789c7SRafael J. Wysocki goto out; 22201da177e4SLinus Torvalds 22212d06d8c4SDominik Brodowski pr_debug("governor switch\n"); 22221da177e4SLinus Torvalds 2223d9a789c7SRafael J. Wysocki /* save old, working values */ 2224d9a789c7SRafael J. Wysocki old_gov = policy->governor; 22251da177e4SLinus Torvalds /* end old governor */ 2226d9a789c7SRafael J. Wysocki if (old_gov) { 22274bc384aeSViresh Kumar ret = __cpufreq_governor(policy, CPUFREQ_GOV_STOP); 22284bc384aeSViresh Kumar if (ret) { 22294bc384aeSViresh Kumar /* This can happen due to race with other operations */ 22304bc384aeSViresh Kumar pr_debug("%s: Failed to Stop Governor: %s (%d)\n", 22314bc384aeSViresh Kumar __func__, old_gov->name, ret); 22324bc384aeSViresh Kumar return ret; 22334bc384aeSViresh Kumar } 22344bc384aeSViresh Kumar 2235ad7722daSviresh kumar up_write(&policy->rwsem); 22364bc384aeSViresh Kumar ret = __cpufreq_governor(policy, CPUFREQ_GOV_POLICY_EXIT); 2237ad7722daSviresh kumar down_write(&policy->rwsem); 22384bc384aeSViresh Kumar 22394bc384aeSViresh Kumar if (ret) { 22404bc384aeSViresh Kumar pr_err("%s: Failed to Exit Governor: %s (%d)\n", 22414bc384aeSViresh Kumar __func__, old_gov->name, ret); 22424bc384aeSViresh Kumar return ret; 22434bc384aeSViresh Kumar } 22447bd353a9SViresh Kumar } 22451da177e4SLinus Torvalds 22461da177e4SLinus Torvalds /* start new governor */ 22473a3e9e06SViresh Kumar policy->governor = new_policy->governor; 22484bc384aeSViresh Kumar ret = __cpufreq_governor(policy, CPUFREQ_GOV_POLICY_INIT); 22494bc384aeSViresh Kumar if (!ret) { 22504bc384aeSViresh Kumar ret = __cpufreq_governor(policy, CPUFREQ_GOV_START); 22514bc384aeSViresh Kumar if (!ret) 2252d9a789c7SRafael J. Wysocki goto out; 2253d9a789c7SRafael J. Wysocki 2254ad7722daSviresh kumar up_write(&policy->rwsem); 2255d9a789c7SRafael J. Wysocki __cpufreq_governor(policy, CPUFREQ_GOV_POLICY_EXIT); 2256ad7722daSviresh kumar down_write(&policy->rwsem); 2257955ef483SViresh Kumar } 22587bd353a9SViresh Kumar 22591da177e4SLinus Torvalds /* new governor failed, so re-start old one */ 2260d9a789c7SRafael J. Wysocki pr_debug("starting governor %s failed\n", policy->governor->name); 22611da177e4SLinus Torvalds if (old_gov) { 22623a3e9e06SViresh Kumar policy->governor = old_gov; 22634bc384aeSViresh Kumar if (__cpufreq_governor(policy, CPUFREQ_GOV_POLICY_INIT)) 22644bc384aeSViresh Kumar policy->governor = NULL; 22654bc384aeSViresh Kumar else 2266d9a789c7SRafael J. Wysocki __cpufreq_governor(policy, CPUFREQ_GOV_START); 22671da177e4SLinus Torvalds } 22681da177e4SLinus Torvalds 22694bc384aeSViresh Kumar return ret; 2270d9a789c7SRafael J. Wysocki 2271d9a789c7SRafael J. Wysocki out: 2272d9a789c7SRafael J. Wysocki pr_debug("governor: change or update limits\n"); 2273d9a789c7SRafael J. Wysocki return __cpufreq_governor(policy, CPUFREQ_GOV_LIMITS); 22741da177e4SLinus Torvalds } 22751da177e4SLinus Torvalds 22761da177e4SLinus Torvalds /** 22771da177e4SLinus Torvalds * cpufreq_update_policy - re-evaluate an existing cpufreq policy 22781da177e4SLinus Torvalds * @cpu: CPU which shall be re-evaluated 22791da177e4SLinus Torvalds * 228025985edcSLucas De Marchi * Useful for policy notifiers which have different necessities 22811da177e4SLinus Torvalds * at different times. 22821da177e4SLinus Torvalds */ 22831da177e4SLinus Torvalds int cpufreq_update_policy(unsigned int cpu) 22841da177e4SLinus Torvalds { 22853a3e9e06SViresh Kumar struct cpufreq_policy *policy = cpufreq_cpu_get(cpu); 22863a3e9e06SViresh Kumar struct cpufreq_policy new_policy; 2287f1829e4aSJulia Lawall int ret; 22881da177e4SLinus Torvalds 2289fefa8ff8SAaron Plattner if (!policy) 2290fefa8ff8SAaron Plattner return -ENODEV; 22911da177e4SLinus Torvalds 2292ad7722daSviresh kumar down_write(&policy->rwsem); 22931da177e4SLinus Torvalds 22942d06d8c4SDominik Brodowski pr_debug("updating policy for CPU %u\n", cpu); 2295d5b73cd8SViresh Kumar memcpy(&new_policy, policy, sizeof(*policy)); 22963a3e9e06SViresh Kumar new_policy.min = policy->user_policy.min; 22973a3e9e06SViresh Kumar new_policy.max = policy->user_policy.max; 22981da177e4SLinus Torvalds 2299bb176f7dSViresh Kumar /* 2300bb176f7dSViresh Kumar * BIOS might change freq behind our back 2301bb176f7dSViresh Kumar * -> ask driver for current freq and notify governors about a change 2302bb176f7dSViresh Kumar */ 23032ed99e39SRafael J. Wysocki if (cpufreq_driver->get && !cpufreq_driver->setpolicy) { 23043a3e9e06SViresh Kumar new_policy.cur = cpufreq_driver->get(cpu); 2305bd0fa9bbSViresh Kumar if (WARN_ON(!new_policy.cur)) { 2306bd0fa9bbSViresh Kumar ret = -EIO; 2307fefa8ff8SAaron Plattner goto unlock; 2308bd0fa9bbSViresh Kumar } 2309bd0fa9bbSViresh Kumar 23103a3e9e06SViresh Kumar if (!policy->cur) { 2311e837f9b5SJoe Perches pr_debug("Driver did not initialize current freq\n"); 23123a3e9e06SViresh Kumar policy->cur = new_policy.cur; 2313a85f7bd3SThomas Renninger } else { 23149c0ebcf7SViresh Kumar if (policy->cur != new_policy.cur && has_target()) 2315a1e1dc41SViresh Kumar cpufreq_out_of_sync(policy, new_policy.cur); 23160961dd0dSThomas Renninger } 2317a85f7bd3SThomas Renninger } 23180961dd0dSThomas Renninger 2319037ce839SViresh Kumar ret = cpufreq_set_policy(policy, &new_policy); 23201da177e4SLinus Torvalds 2321fefa8ff8SAaron Plattner unlock: 2322ad7722daSviresh kumar up_write(&policy->rwsem); 23235a01f2e8SVenkatesh Pallipadi 23243a3e9e06SViresh Kumar cpufreq_cpu_put(policy); 23251da177e4SLinus Torvalds return ret; 23261da177e4SLinus Torvalds } 23271da177e4SLinus Torvalds EXPORT_SYMBOL(cpufreq_update_policy); 23281da177e4SLinus Torvalds 23292760984fSPaul Gortmaker static int cpufreq_cpu_callback(struct notifier_block *nfb, 2330c32b6b8eSAshok Raj unsigned long action, void *hcpu) 2331c32b6b8eSAshok Raj { 2332c32b6b8eSAshok Raj unsigned int cpu = (unsigned long)hcpu; 2333c32b6b8eSAshok Raj 23345302c3fbSSrivatsa S. Bhat switch (action & ~CPU_TASKS_FROZEN) { 2335c32b6b8eSAshok Raj case CPU_ONLINE: 23360b275352SRafael J. Wysocki cpufreq_online(cpu); 2337c32b6b8eSAshok Raj break; 23385302c3fbSSrivatsa S. Bhat 2339c32b6b8eSAshok Raj case CPU_DOWN_PREPARE: 234015c0b4d2SRafael J. Wysocki cpufreq_offline_prepare(cpu); 23411aee40acSSrivatsa S. Bhat break; 23421aee40acSSrivatsa S. Bhat 23431aee40acSSrivatsa S. Bhat case CPU_POST_DEAD: 234415c0b4d2SRafael J. Wysocki cpufreq_offline_finish(cpu); 2345c32b6b8eSAshok Raj break; 23465302c3fbSSrivatsa S. Bhat 23475a01f2e8SVenkatesh Pallipadi case CPU_DOWN_FAILED: 23480b275352SRafael J. Wysocki cpufreq_online(cpu); 2349c32b6b8eSAshok Raj break; 2350c32b6b8eSAshok Raj } 2351c32b6b8eSAshok Raj return NOTIFY_OK; 2352c32b6b8eSAshok Raj } 2353c32b6b8eSAshok Raj 23549c36f746SNeal Buckendahl static struct notifier_block __refdata cpufreq_cpu_notifier = { 2355c32b6b8eSAshok Raj .notifier_call = cpufreq_cpu_callback, 2356c32b6b8eSAshok Raj }; 23571da177e4SLinus Torvalds 23581da177e4SLinus Torvalds /********************************************************************* 23596f19efc0SLukasz Majewski * BOOST * 23606f19efc0SLukasz Majewski *********************************************************************/ 23616f19efc0SLukasz Majewski static int cpufreq_boost_set_sw(int state) 23626f19efc0SLukasz Majewski { 23636f19efc0SLukasz Majewski struct cpufreq_frequency_table *freq_table; 23646f19efc0SLukasz Majewski struct cpufreq_policy *policy; 23656f19efc0SLukasz Majewski int ret = -EINVAL; 23666f19efc0SLukasz Majewski 2367f963735aSViresh Kumar for_each_active_policy(policy) { 23686f19efc0SLukasz Majewski freq_table = cpufreq_frequency_get_table(policy->cpu); 23696f19efc0SLukasz Majewski if (freq_table) { 23706f19efc0SLukasz Majewski ret = cpufreq_frequency_table_cpuinfo(policy, 23716f19efc0SLukasz Majewski freq_table); 23726f19efc0SLukasz Majewski if (ret) { 23736f19efc0SLukasz Majewski pr_err("%s: Policy frequency update failed\n", 23746f19efc0SLukasz Majewski __func__); 23756f19efc0SLukasz Majewski break; 23766f19efc0SLukasz Majewski } 23776f19efc0SLukasz Majewski policy->user_policy.max = policy->max; 23786f19efc0SLukasz Majewski __cpufreq_governor(policy, CPUFREQ_GOV_LIMITS); 23796f19efc0SLukasz Majewski } 23806f19efc0SLukasz Majewski } 23816f19efc0SLukasz Majewski 23826f19efc0SLukasz Majewski return ret; 23836f19efc0SLukasz Majewski } 23846f19efc0SLukasz Majewski 23856f19efc0SLukasz Majewski int cpufreq_boost_trigger_state(int state) 23866f19efc0SLukasz Majewski { 23876f19efc0SLukasz Majewski unsigned long flags; 23886f19efc0SLukasz Majewski int ret = 0; 23896f19efc0SLukasz Majewski 23906f19efc0SLukasz Majewski if (cpufreq_driver->boost_enabled == state) 23916f19efc0SLukasz Majewski return 0; 23926f19efc0SLukasz Majewski 23936f19efc0SLukasz Majewski write_lock_irqsave(&cpufreq_driver_lock, flags); 23946f19efc0SLukasz Majewski cpufreq_driver->boost_enabled = state; 23956f19efc0SLukasz Majewski write_unlock_irqrestore(&cpufreq_driver_lock, flags); 23966f19efc0SLukasz Majewski 23976f19efc0SLukasz Majewski ret = cpufreq_driver->set_boost(state); 23986f19efc0SLukasz Majewski if (ret) { 23996f19efc0SLukasz Majewski write_lock_irqsave(&cpufreq_driver_lock, flags); 24006f19efc0SLukasz Majewski cpufreq_driver->boost_enabled = !state; 24016f19efc0SLukasz Majewski write_unlock_irqrestore(&cpufreq_driver_lock, flags); 24026f19efc0SLukasz Majewski 2403e837f9b5SJoe Perches pr_err("%s: Cannot %s BOOST\n", 2404e837f9b5SJoe Perches __func__, state ? "enable" : "disable"); 24056f19efc0SLukasz Majewski } 24066f19efc0SLukasz Majewski 24076f19efc0SLukasz Majewski return ret; 24086f19efc0SLukasz Majewski } 24096f19efc0SLukasz Majewski 24106f19efc0SLukasz Majewski int cpufreq_boost_supported(void) 24116f19efc0SLukasz Majewski { 24126f19efc0SLukasz Majewski if (likely(cpufreq_driver)) 24136f19efc0SLukasz Majewski return cpufreq_driver->boost_supported; 24146f19efc0SLukasz Majewski 24156f19efc0SLukasz Majewski return 0; 24166f19efc0SLukasz Majewski } 24176f19efc0SLukasz Majewski EXPORT_SYMBOL_GPL(cpufreq_boost_supported); 24186f19efc0SLukasz Majewski 24196f19efc0SLukasz Majewski int cpufreq_boost_enabled(void) 24206f19efc0SLukasz Majewski { 24216f19efc0SLukasz Majewski return cpufreq_driver->boost_enabled; 24226f19efc0SLukasz Majewski } 24236f19efc0SLukasz Majewski EXPORT_SYMBOL_GPL(cpufreq_boost_enabled); 24246f19efc0SLukasz Majewski 24256f19efc0SLukasz Majewski /********************************************************************* 24261da177e4SLinus Torvalds * REGISTER / UNREGISTER CPUFREQ DRIVER * 24271da177e4SLinus Torvalds *********************************************************************/ 24281da177e4SLinus Torvalds 24291da177e4SLinus Torvalds /** 24301da177e4SLinus Torvalds * cpufreq_register_driver - register a CPU Frequency driver 24311da177e4SLinus Torvalds * @driver_data: A struct cpufreq_driver containing the values# 24321da177e4SLinus Torvalds * submitted by the CPU Frequency driver. 24331da177e4SLinus Torvalds * 24341da177e4SLinus Torvalds * Registers a CPU Frequency driver to this core code. This code 24351da177e4SLinus Torvalds * returns zero on success, -EBUSY when another driver got here first 24361da177e4SLinus Torvalds * (and isn't unregistered in the meantime). 24371da177e4SLinus Torvalds * 24381da177e4SLinus Torvalds */ 2439221dee28SLinus Torvalds int cpufreq_register_driver(struct cpufreq_driver *driver_data) 24401da177e4SLinus Torvalds { 24411da177e4SLinus Torvalds unsigned long flags; 24421da177e4SLinus Torvalds int ret; 24431da177e4SLinus Torvalds 2444a7b422cdSKonrad Rzeszutek Wilk if (cpufreq_disabled()) 2445a7b422cdSKonrad Rzeszutek Wilk return -ENODEV; 2446a7b422cdSKonrad Rzeszutek Wilk 24471da177e4SLinus Torvalds if (!driver_data || !driver_data->verify || !driver_data->init || 24489c0ebcf7SViresh Kumar !(driver_data->setpolicy || driver_data->target_index || 24499832235fSRafael J. Wysocki driver_data->target) || 24509832235fSRafael J. Wysocki (driver_data->setpolicy && (driver_data->target_index || 24511c03a2d0SViresh Kumar driver_data->target)) || 24521c03a2d0SViresh Kumar (!!driver_data->get_intermediate != !!driver_data->target_intermediate)) 24531da177e4SLinus Torvalds return -EINVAL; 24541da177e4SLinus Torvalds 24552d06d8c4SDominik Brodowski pr_debug("trying to register driver %s\n", driver_data->name); 24561da177e4SLinus Torvalds 2457fdd320daSRafael J. Wysocki /* Protect against concurrent CPU online/offline. */ 2458fdd320daSRafael J. Wysocki get_online_cpus(); 2459fdd320daSRafael J. Wysocki 24600d1857a1SNathan Zimmer write_lock_irqsave(&cpufreq_driver_lock, flags); 24611c3d85ddSRafael J. Wysocki if (cpufreq_driver) { 24620d1857a1SNathan Zimmer write_unlock_irqrestore(&cpufreq_driver_lock, flags); 2463fdd320daSRafael J. Wysocki ret = -EEXIST; 2464fdd320daSRafael J. Wysocki goto out; 24651da177e4SLinus Torvalds } 24661c3d85ddSRafael J. Wysocki cpufreq_driver = driver_data; 24670d1857a1SNathan Zimmer write_unlock_irqrestore(&cpufreq_driver_lock, flags); 24681da177e4SLinus Torvalds 2469bc68b7dfSViresh Kumar if (driver_data->setpolicy) 2470bc68b7dfSViresh Kumar driver_data->flags |= CPUFREQ_CONST_LOOPS; 2471bc68b7dfSViresh Kumar 24726f19efc0SLukasz Majewski if (cpufreq_boost_supported()) { 24736f19efc0SLukasz Majewski /* 24746f19efc0SLukasz Majewski * Check if driver provides function to enable boost - 24756f19efc0SLukasz Majewski * if not, use cpufreq_boost_set_sw as default 24766f19efc0SLukasz Majewski */ 24776f19efc0SLukasz Majewski if (!cpufreq_driver->set_boost) 24786f19efc0SLukasz Majewski cpufreq_driver->set_boost = cpufreq_boost_set_sw; 24796f19efc0SLukasz Majewski 24806f19efc0SLukasz Majewski ret = cpufreq_sysfs_create_file(&boost.attr); 24816f19efc0SLukasz Majewski if (ret) { 24826f19efc0SLukasz Majewski pr_err("%s: cannot register global BOOST sysfs file\n", 24836f19efc0SLukasz Majewski __func__); 24846f19efc0SLukasz Majewski goto err_null_driver; 24856f19efc0SLukasz Majewski } 24866f19efc0SLukasz Majewski } 24876f19efc0SLukasz Majewski 24888a25a2fdSKay Sievers ret = subsys_interface_register(&cpufreq_interface); 24898f5bc2abSJiri Slaby if (ret) 24906f19efc0SLukasz Majewski goto err_boost_unreg; 24911da177e4SLinus Torvalds 2492ce1bcfe9SViresh Kumar if (!(cpufreq_driver->flags & CPUFREQ_STICKY) && 2493ce1bcfe9SViresh Kumar list_empty(&cpufreq_policy_list)) { 24941da177e4SLinus Torvalds /* if all ->init() calls failed, unregister */ 2495ce1bcfe9SViresh Kumar pr_debug("%s: No CPU initialized for driver %s\n", __func__, 2496e08f5f5bSGautham R Shenoy driver_data->name); 24978a25a2fdSKay Sievers goto err_if_unreg; 24981da177e4SLinus Torvalds } 24991da177e4SLinus Torvalds 250065edc68cSChandra Seetharaman register_hotcpu_notifier(&cpufreq_cpu_notifier); 25012d06d8c4SDominik Brodowski pr_debug("driver %s up and running\n", driver_data->name); 25021da177e4SLinus Torvalds 2503fdd320daSRafael J. Wysocki out: 2504fdd320daSRafael J. Wysocki put_online_cpus(); 2505fdd320daSRafael J. Wysocki return ret; 2506fdd320daSRafael J. Wysocki 25078a25a2fdSKay Sievers err_if_unreg: 25088a25a2fdSKay Sievers subsys_interface_unregister(&cpufreq_interface); 25096f19efc0SLukasz Majewski err_boost_unreg: 25106f19efc0SLukasz Majewski if (cpufreq_boost_supported()) 25116f19efc0SLukasz Majewski cpufreq_sysfs_remove_file(&boost.attr); 25128f5bc2abSJiri Slaby err_null_driver: 25130d1857a1SNathan Zimmer write_lock_irqsave(&cpufreq_driver_lock, flags); 25141c3d85ddSRafael J. Wysocki cpufreq_driver = NULL; 25150d1857a1SNathan Zimmer write_unlock_irqrestore(&cpufreq_driver_lock, flags); 2516fdd320daSRafael J. Wysocki goto out; 25171da177e4SLinus Torvalds } 25181da177e4SLinus Torvalds EXPORT_SYMBOL_GPL(cpufreq_register_driver); 25191da177e4SLinus Torvalds 25201da177e4SLinus Torvalds /** 25211da177e4SLinus Torvalds * cpufreq_unregister_driver - unregister the current CPUFreq driver 25221da177e4SLinus Torvalds * 25231da177e4SLinus Torvalds * Unregister the current CPUFreq driver. Only call this if you have 25241da177e4SLinus Torvalds * the right to do so, i.e. if you have succeeded in initialising before! 25251da177e4SLinus Torvalds * Returns zero if successful, and -EINVAL if the cpufreq_driver is 25261da177e4SLinus Torvalds * currently not initialised. 25271da177e4SLinus Torvalds */ 2528221dee28SLinus Torvalds int cpufreq_unregister_driver(struct cpufreq_driver *driver) 25291da177e4SLinus Torvalds { 25301da177e4SLinus Torvalds unsigned long flags; 25311da177e4SLinus Torvalds 25321c3d85ddSRafael J. Wysocki if (!cpufreq_driver || (driver != cpufreq_driver)) 25331da177e4SLinus Torvalds return -EINVAL; 25341da177e4SLinus Torvalds 25352d06d8c4SDominik Brodowski pr_debug("unregistering driver %s\n", driver->name); 25361da177e4SLinus Torvalds 2537454d3a25SSebastian Andrzej Siewior /* Protect against concurrent cpu hotplug */ 2538454d3a25SSebastian Andrzej Siewior get_online_cpus(); 25398a25a2fdSKay Sievers subsys_interface_unregister(&cpufreq_interface); 25406f19efc0SLukasz Majewski if (cpufreq_boost_supported()) 25416f19efc0SLukasz Majewski cpufreq_sysfs_remove_file(&boost.attr); 25426f19efc0SLukasz Majewski 254365edc68cSChandra Seetharaman unregister_hotcpu_notifier(&cpufreq_cpu_notifier); 25441da177e4SLinus Torvalds 25450d1857a1SNathan Zimmer write_lock_irqsave(&cpufreq_driver_lock, flags); 25466eed9404SViresh Kumar 25471c3d85ddSRafael J. Wysocki cpufreq_driver = NULL; 25486eed9404SViresh Kumar 25490d1857a1SNathan Zimmer write_unlock_irqrestore(&cpufreq_driver_lock, flags); 2550454d3a25SSebastian Andrzej Siewior put_online_cpus(); 25511da177e4SLinus Torvalds 25521da177e4SLinus Torvalds return 0; 25531da177e4SLinus Torvalds } 25541da177e4SLinus Torvalds EXPORT_SYMBOL_GPL(cpufreq_unregister_driver); 25555a01f2e8SVenkatesh Pallipadi 255690de2a4aSDoug Anderson /* 255790de2a4aSDoug Anderson * Stop cpufreq at shutdown to make sure it isn't holding any locks 255890de2a4aSDoug Anderson * or mutexes when secondary CPUs are halted. 255990de2a4aSDoug Anderson */ 256090de2a4aSDoug Anderson static struct syscore_ops cpufreq_syscore_ops = { 256190de2a4aSDoug Anderson .shutdown = cpufreq_suspend, 256290de2a4aSDoug Anderson }; 256390de2a4aSDoug Anderson 25645a01f2e8SVenkatesh Pallipadi static int __init cpufreq_core_init(void) 25655a01f2e8SVenkatesh Pallipadi { 2566a7b422cdSKonrad Rzeszutek Wilk if (cpufreq_disabled()) 2567a7b422cdSKonrad Rzeszutek Wilk return -ENODEV; 2568a7b422cdSKonrad Rzeszutek Wilk 25692361be23SViresh Kumar cpufreq_global_kobject = kobject_create(); 25708aa84ad8SThomas Renninger BUG_ON(!cpufreq_global_kobject); 25718aa84ad8SThomas Renninger 257290de2a4aSDoug Anderson register_syscore_ops(&cpufreq_syscore_ops); 257390de2a4aSDoug Anderson 25745a01f2e8SVenkatesh Pallipadi return 0; 25755a01f2e8SVenkatesh Pallipadi } 25765a01f2e8SVenkatesh Pallipadi core_initcall(cpufreq_core_init); 2577