11da177e4SLinus Torvalds /* 21da177e4SLinus Torvalds * linux/drivers/cpufreq/cpufreq.c 31da177e4SLinus Torvalds * 41da177e4SLinus Torvalds * Copyright (C) 2001 Russell King 51da177e4SLinus Torvalds * (C) 2002 - 2003 Dominik Brodowski <linux@brodo.de> 6bb176f7dSViresh Kumar * (C) 2013 Viresh Kumar <viresh.kumar@linaro.org> 71da177e4SLinus Torvalds * 8c32b6b8eSAshok Raj * Oct 2005 - Ashok Raj <ashok.raj@intel.com> 9c32b6b8eSAshok Raj * Added handling for CPU hotplug 108ff69732SDave Jones * Feb 2006 - Jacob Shin <jacob.shin@amd.com> 118ff69732SDave Jones * Fix handling for CPU hotplug -- affected CPUs 12c32b6b8eSAshok Raj * 131da177e4SLinus Torvalds * This program is free software; you can redistribute it and/or modify 141da177e4SLinus Torvalds * it under the terms of the GNU General Public License version 2 as 151da177e4SLinus Torvalds * published by the Free Software Foundation. 161da177e4SLinus Torvalds */ 171da177e4SLinus Torvalds 18db701151SViresh Kumar #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 19db701151SViresh Kumar 205ff0a268SViresh Kumar #include <linux/cpu.h> 211da177e4SLinus Torvalds #include <linux/cpufreq.h> 221da177e4SLinus Torvalds #include <linux/delay.h> 231da177e4SLinus Torvalds #include <linux/device.h> 245ff0a268SViresh Kumar #include <linux/init.h> 255ff0a268SViresh Kumar #include <linux/kernel_stat.h> 265ff0a268SViresh Kumar #include <linux/module.h> 273fc54d37Sakpm@osdl.org #include <linux/mutex.h> 285ff0a268SViresh Kumar #include <linux/slab.h> 292f0aea93SViresh Kumar #include <linux/suspend.h> 3090de2a4aSDoug Anderson #include <linux/syscore_ops.h> 315ff0a268SViresh Kumar #include <linux/tick.h> 326f4f2723SThomas Renninger #include <trace/events/power.h> 336f4f2723SThomas Renninger 34b4f0676fSViresh Kumar /* Macros to iterate over lists */ 35b4f0676fSViresh Kumar /* Iterate over online CPUs policies */ 36b4f0676fSViresh Kumar static LIST_HEAD(cpufreq_policy_list); 37b4f0676fSViresh Kumar #define for_each_policy(__policy) \ 38b4f0676fSViresh Kumar list_for_each_entry(__policy, &cpufreq_policy_list, policy_list) 39b4f0676fSViresh Kumar 40f7b27061SViresh Kumar /* Iterate over governors */ 41f7b27061SViresh Kumar static LIST_HEAD(cpufreq_governor_list); 42f7b27061SViresh Kumar #define for_each_governor(__governor) \ 43f7b27061SViresh Kumar list_for_each_entry(__governor, &cpufreq_governor_list, governor_list) 44f7b27061SViresh Kumar 451da177e4SLinus Torvalds /** 46cd878479SDave Jones * The "cpufreq driver" - the arch- or hardware-dependent low 471da177e4SLinus Torvalds * level driver of CPUFreq support, and its spinlock. This lock 481da177e4SLinus Torvalds * also protects the cpufreq_cpu_data array. 491da177e4SLinus Torvalds */ 501c3d85ddSRafael J. Wysocki static struct cpufreq_driver *cpufreq_driver; 517a6aedfaSMike Travis static DEFINE_PER_CPU(struct cpufreq_policy *, cpufreq_cpu_data); 528414809cSSrivatsa S. Bhat static DEFINE_PER_CPU(struct cpufreq_policy *, cpufreq_cpu_data_fallback); 53bb176f7dSViresh Kumar static DEFINE_RWLOCK(cpufreq_driver_lock); 546f1e4efdSJane Li DEFINE_MUTEX(cpufreq_governor_lock); 55bb176f7dSViresh Kumar 56084f3493SThomas Renninger /* This one keeps track of the previously set governor of a removed CPU */ 57e77b89f1SDmitry Monakhov static DEFINE_PER_CPU(char[CPUFREQ_NAME_LEN], cpufreq_cpu_governor); 581da177e4SLinus Torvalds 592f0aea93SViresh Kumar /* Flag to suspend/resume CPUFreq governors */ 602f0aea93SViresh Kumar static bool cpufreq_suspended; 611da177e4SLinus Torvalds 629c0ebcf7SViresh Kumar static inline bool has_target(void) 639c0ebcf7SViresh Kumar { 649c0ebcf7SViresh Kumar return cpufreq_driver->target_index || cpufreq_driver->target; 659c0ebcf7SViresh Kumar } 669c0ebcf7SViresh Kumar 675a01f2e8SVenkatesh Pallipadi /* 686eed9404SViresh Kumar * rwsem to guarantee that cpufreq driver module doesn't unload during critical 696eed9404SViresh Kumar * sections 706eed9404SViresh Kumar */ 716eed9404SViresh Kumar static DECLARE_RWSEM(cpufreq_rwsem); 726eed9404SViresh Kumar 731da177e4SLinus Torvalds /* internal prototypes */ 7429464f28SDave Jones static int __cpufreq_governor(struct cpufreq_policy *policy, 7529464f28SDave Jones unsigned int event); 76d92d50a4SViresh Kumar static unsigned int __cpufreq_get(struct cpufreq_policy *policy); 7765f27f38SDavid Howells static void handle_update(struct work_struct *work); 781da177e4SLinus Torvalds 791da177e4SLinus Torvalds /** 801da177e4SLinus Torvalds * Two notifier lists: the "policy" list is involved in the 811da177e4SLinus Torvalds * validation process for a new CPU frequency policy; the 821da177e4SLinus Torvalds * "transition" list for kernel code that needs to handle 831da177e4SLinus Torvalds * changes to devices when the CPU clock speed changes. 841da177e4SLinus Torvalds * The mutex locks both lists. 851da177e4SLinus Torvalds */ 86e041c683SAlan Stern static BLOCKING_NOTIFIER_HEAD(cpufreq_policy_notifier_list); 87b4dfdbb3SAlan Stern static struct srcu_notifier_head cpufreq_transition_notifier_list; 881da177e4SLinus Torvalds 8974212ca4SCesar Eduardo Barros static bool init_cpufreq_transition_notifier_list_called; 90b4dfdbb3SAlan Stern static int __init init_cpufreq_transition_notifier_list(void) 91b4dfdbb3SAlan Stern { 92b4dfdbb3SAlan Stern srcu_init_notifier_head(&cpufreq_transition_notifier_list); 9374212ca4SCesar Eduardo Barros init_cpufreq_transition_notifier_list_called = true; 94b4dfdbb3SAlan Stern return 0; 95b4dfdbb3SAlan Stern } 96b3438f82SLinus Torvalds pure_initcall(init_cpufreq_transition_notifier_list); 971da177e4SLinus Torvalds 98a7b422cdSKonrad Rzeszutek Wilk static int off __read_mostly; 99da584455SViresh Kumar static int cpufreq_disabled(void) 100a7b422cdSKonrad Rzeszutek Wilk { 101a7b422cdSKonrad Rzeszutek Wilk return off; 102a7b422cdSKonrad Rzeszutek Wilk } 103a7b422cdSKonrad Rzeszutek Wilk void disable_cpufreq(void) 104a7b422cdSKonrad Rzeszutek Wilk { 105a7b422cdSKonrad Rzeszutek Wilk off = 1; 106a7b422cdSKonrad Rzeszutek Wilk } 1073fc54d37Sakpm@osdl.org static DEFINE_MUTEX(cpufreq_governor_mutex); 1081da177e4SLinus Torvalds 1094d5dcc42SViresh Kumar bool have_governor_per_policy(void) 1104d5dcc42SViresh Kumar { 1110b981e70SViresh Kumar return !!(cpufreq_driver->flags & CPUFREQ_HAVE_GOVERNOR_PER_POLICY); 1124d5dcc42SViresh Kumar } 1133f869d6dSViresh Kumar EXPORT_SYMBOL_GPL(have_governor_per_policy); 1144d5dcc42SViresh Kumar 115944e9a03SViresh Kumar struct kobject *get_governor_parent_kobj(struct cpufreq_policy *policy) 116944e9a03SViresh Kumar { 117944e9a03SViresh Kumar if (have_governor_per_policy()) 118944e9a03SViresh Kumar return &policy->kobj; 119944e9a03SViresh Kumar else 120944e9a03SViresh Kumar return cpufreq_global_kobject; 121944e9a03SViresh Kumar } 122944e9a03SViresh Kumar EXPORT_SYMBOL_GPL(get_governor_parent_kobj); 123944e9a03SViresh Kumar 12472a4ce34SViresh Kumar static inline u64 get_cpu_idle_time_jiffy(unsigned int cpu, u64 *wall) 12572a4ce34SViresh Kumar { 12672a4ce34SViresh Kumar u64 idle_time; 12772a4ce34SViresh Kumar u64 cur_wall_time; 12872a4ce34SViresh Kumar u64 busy_time; 12972a4ce34SViresh Kumar 13072a4ce34SViresh Kumar cur_wall_time = jiffies64_to_cputime64(get_jiffies_64()); 13172a4ce34SViresh Kumar 13272a4ce34SViresh Kumar busy_time = kcpustat_cpu(cpu).cpustat[CPUTIME_USER]; 13372a4ce34SViresh Kumar busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_SYSTEM]; 13472a4ce34SViresh Kumar busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_IRQ]; 13572a4ce34SViresh Kumar busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_SOFTIRQ]; 13672a4ce34SViresh Kumar busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_STEAL]; 13772a4ce34SViresh Kumar busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_NICE]; 13872a4ce34SViresh Kumar 13972a4ce34SViresh Kumar idle_time = cur_wall_time - busy_time; 14072a4ce34SViresh Kumar if (wall) 14172a4ce34SViresh Kumar *wall = cputime_to_usecs(cur_wall_time); 14272a4ce34SViresh Kumar 14372a4ce34SViresh Kumar return cputime_to_usecs(idle_time); 14472a4ce34SViresh Kumar } 14572a4ce34SViresh Kumar 14672a4ce34SViresh Kumar u64 get_cpu_idle_time(unsigned int cpu, u64 *wall, int io_busy) 14772a4ce34SViresh Kumar { 14872a4ce34SViresh Kumar u64 idle_time = get_cpu_idle_time_us(cpu, io_busy ? wall : NULL); 14972a4ce34SViresh Kumar 15072a4ce34SViresh Kumar if (idle_time == -1ULL) 15172a4ce34SViresh Kumar return get_cpu_idle_time_jiffy(cpu, wall); 15272a4ce34SViresh Kumar else if (!io_busy) 15372a4ce34SViresh Kumar idle_time += get_cpu_iowait_time_us(cpu, wall); 15472a4ce34SViresh Kumar 15572a4ce34SViresh Kumar return idle_time; 15672a4ce34SViresh Kumar } 15772a4ce34SViresh Kumar EXPORT_SYMBOL_GPL(get_cpu_idle_time); 15872a4ce34SViresh Kumar 15970e9e778SViresh Kumar /* 16070e9e778SViresh Kumar * This is a generic cpufreq init() routine which can be used by cpufreq 16170e9e778SViresh Kumar * drivers of SMP systems. It will do following: 16270e9e778SViresh Kumar * - validate & show freq table passed 16370e9e778SViresh Kumar * - set policies transition latency 16470e9e778SViresh Kumar * - policy->cpus with all possible CPUs 16570e9e778SViresh Kumar */ 16670e9e778SViresh Kumar int cpufreq_generic_init(struct cpufreq_policy *policy, 16770e9e778SViresh Kumar struct cpufreq_frequency_table *table, 16870e9e778SViresh Kumar unsigned int transition_latency) 16970e9e778SViresh Kumar { 17070e9e778SViresh Kumar int ret; 17170e9e778SViresh Kumar 17270e9e778SViresh Kumar ret = cpufreq_table_validate_and_show(policy, table); 17370e9e778SViresh Kumar if (ret) { 17470e9e778SViresh Kumar pr_err("%s: invalid frequency table: %d\n", __func__, ret); 17570e9e778SViresh Kumar return ret; 17670e9e778SViresh Kumar } 17770e9e778SViresh Kumar 17870e9e778SViresh Kumar policy->cpuinfo.transition_latency = transition_latency; 17970e9e778SViresh Kumar 18070e9e778SViresh Kumar /* 18170e9e778SViresh Kumar * The driver only supports the SMP configuartion where all processors 18270e9e778SViresh Kumar * share the clock and voltage and clock. 18370e9e778SViresh Kumar */ 18470e9e778SViresh Kumar cpumask_setall(policy->cpus); 18570e9e778SViresh Kumar 18670e9e778SViresh Kumar return 0; 18770e9e778SViresh Kumar } 18870e9e778SViresh Kumar EXPORT_SYMBOL_GPL(cpufreq_generic_init); 18970e9e778SViresh Kumar 190652ed95dSViresh Kumar unsigned int cpufreq_generic_get(unsigned int cpu) 191652ed95dSViresh Kumar { 192652ed95dSViresh Kumar struct cpufreq_policy *policy = per_cpu(cpufreq_cpu_data, cpu); 193652ed95dSViresh Kumar 194652ed95dSViresh Kumar if (!policy || IS_ERR(policy->clk)) { 195e837f9b5SJoe Perches pr_err("%s: No %s associated to cpu: %d\n", 196e837f9b5SJoe Perches __func__, policy ? "clk" : "policy", cpu); 197652ed95dSViresh Kumar return 0; 198652ed95dSViresh Kumar } 199652ed95dSViresh Kumar 200652ed95dSViresh Kumar return clk_get_rate(policy->clk) / 1000; 201652ed95dSViresh Kumar } 202652ed95dSViresh Kumar EXPORT_SYMBOL_GPL(cpufreq_generic_get); 203652ed95dSViresh Kumar 204e0b3165bSViresh Kumar /* Only for cpufreq core internal use */ 205e0b3165bSViresh Kumar struct cpufreq_policy *cpufreq_cpu_get_raw(unsigned int cpu) 206e0b3165bSViresh Kumar { 207e0b3165bSViresh Kumar return per_cpu(cpufreq_cpu_data, cpu); 208e0b3165bSViresh Kumar } 209e0b3165bSViresh Kumar 21050e9c852SViresh Kumar /** 21150e9c852SViresh Kumar * cpufreq_cpu_get: returns policy for a cpu and marks it busy. 21250e9c852SViresh Kumar * 21350e9c852SViresh Kumar * @cpu: cpu to find policy for. 21450e9c852SViresh Kumar * 21550e9c852SViresh Kumar * This returns policy for 'cpu', returns NULL if it doesn't exist. 21650e9c852SViresh Kumar * It also increments the kobject reference count to mark it busy and so would 21750e9c852SViresh Kumar * require a corresponding call to cpufreq_cpu_put() to decrement it back. 21850e9c852SViresh Kumar * If corresponding call cpufreq_cpu_put() isn't made, the policy wouldn't be 21950e9c852SViresh Kumar * freed as that depends on the kobj count. 22050e9c852SViresh Kumar * 22150e9c852SViresh Kumar * It also takes a read-lock of 'cpufreq_rwsem' and doesn't put it back if a 22250e9c852SViresh Kumar * valid policy is found. This is done to make sure the driver doesn't get 22350e9c852SViresh Kumar * unregistered while the policy is being used. 22450e9c852SViresh Kumar * 22550e9c852SViresh Kumar * Return: A valid policy on success, otherwise NULL on failure. 22650e9c852SViresh Kumar */ 2276eed9404SViresh Kumar struct cpufreq_policy *cpufreq_cpu_get(unsigned int cpu) 2281da177e4SLinus Torvalds { 2296eed9404SViresh Kumar struct cpufreq_policy *policy = NULL; 2301da177e4SLinus Torvalds unsigned long flags; 2311da177e4SLinus Torvalds 232*1b947c90SViresh Kumar if (WARN_ON(cpu >= nr_cpu_ids)) 2336eed9404SViresh Kumar return NULL; 2346eed9404SViresh Kumar 2356eed9404SViresh Kumar if (!down_read_trylock(&cpufreq_rwsem)) 2366eed9404SViresh Kumar return NULL; 2371da177e4SLinus Torvalds 2381da177e4SLinus Torvalds /* get the cpufreq driver */ 2390d1857a1SNathan Zimmer read_lock_irqsave(&cpufreq_driver_lock, flags); 2401da177e4SLinus Torvalds 2416eed9404SViresh Kumar if (cpufreq_driver) { 2421da177e4SLinus Torvalds /* get the CPU */ 2433a3e9e06SViresh Kumar policy = per_cpu(cpufreq_cpu_data, cpu); 2446eed9404SViresh Kumar if (policy) 2456eed9404SViresh Kumar kobject_get(&policy->kobj); 2466eed9404SViresh Kumar } 2476eed9404SViresh Kumar 2486eed9404SViresh Kumar read_unlock_irqrestore(&cpufreq_driver_lock, flags); 2491da177e4SLinus Torvalds 2503a3e9e06SViresh Kumar if (!policy) 2516eed9404SViresh Kumar up_read(&cpufreq_rwsem); 2521da177e4SLinus Torvalds 2533a3e9e06SViresh Kumar return policy; 254a9144436SStephen Boyd } 2551da177e4SLinus Torvalds EXPORT_SYMBOL_GPL(cpufreq_cpu_get); 2561da177e4SLinus Torvalds 25750e9c852SViresh Kumar /** 25850e9c852SViresh Kumar * cpufreq_cpu_put: Decrements the usage count of a policy 25950e9c852SViresh Kumar * 26050e9c852SViresh Kumar * @policy: policy earlier returned by cpufreq_cpu_get(). 26150e9c852SViresh Kumar * 26250e9c852SViresh Kumar * This decrements the kobject reference count incremented earlier by calling 26350e9c852SViresh Kumar * cpufreq_cpu_get(). 26450e9c852SViresh Kumar * 26550e9c852SViresh Kumar * It also drops the read-lock of 'cpufreq_rwsem' taken at cpufreq_cpu_get(). 26650e9c852SViresh Kumar */ 2673a3e9e06SViresh Kumar void cpufreq_cpu_put(struct cpufreq_policy *policy) 268a9144436SStephen Boyd { 2696eed9404SViresh Kumar kobject_put(&policy->kobj); 2706eed9404SViresh Kumar up_read(&cpufreq_rwsem); 271a9144436SStephen Boyd } 2721da177e4SLinus Torvalds EXPORT_SYMBOL_GPL(cpufreq_cpu_put); 2731da177e4SLinus Torvalds 2741da177e4SLinus Torvalds /********************************************************************* 2751da177e4SLinus Torvalds * EXTERNALLY AFFECTING FREQUENCY CHANGES * 2761da177e4SLinus Torvalds *********************************************************************/ 2771da177e4SLinus Torvalds 2781da177e4SLinus Torvalds /** 2791da177e4SLinus Torvalds * adjust_jiffies - adjust the system "loops_per_jiffy" 2801da177e4SLinus Torvalds * 2811da177e4SLinus Torvalds * This function alters the system "loops_per_jiffy" for the clock 2821da177e4SLinus Torvalds * speed change. Note that loops_per_jiffy cannot be updated on SMP 2831da177e4SLinus Torvalds * systems as each CPU might be scaled differently. So, use the arch 2841da177e4SLinus Torvalds * per-CPU loops_per_jiffy value wherever possible. 2851da177e4SLinus Torvalds */ 28639c132eeSViresh Kumar static void adjust_jiffies(unsigned long val, struct cpufreq_freqs *ci) 28739c132eeSViresh Kumar { 2881da177e4SLinus Torvalds #ifndef CONFIG_SMP 2891da177e4SLinus Torvalds static unsigned long l_p_j_ref; 2901da177e4SLinus Torvalds static unsigned int l_p_j_ref_freq; 2911da177e4SLinus Torvalds 2921da177e4SLinus Torvalds if (ci->flags & CPUFREQ_CONST_LOOPS) 2931da177e4SLinus Torvalds return; 2941da177e4SLinus Torvalds 2951da177e4SLinus Torvalds if (!l_p_j_ref_freq) { 2961da177e4SLinus Torvalds l_p_j_ref = loops_per_jiffy; 2971da177e4SLinus Torvalds l_p_j_ref_freq = ci->old; 298e837f9b5SJoe Perches pr_debug("saving %lu as reference value for loops_per_jiffy; freq is %u kHz\n", 299e837f9b5SJoe Perches l_p_j_ref, l_p_j_ref_freq); 3001da177e4SLinus Torvalds } 3010b443eadSViresh Kumar if (val == CPUFREQ_POSTCHANGE && ci->old != ci->new) { 302e08f5f5bSGautham R Shenoy loops_per_jiffy = cpufreq_scale(l_p_j_ref, l_p_j_ref_freq, 303e08f5f5bSGautham R Shenoy ci->new); 304e837f9b5SJoe Perches pr_debug("scaling loops_per_jiffy to %lu for frequency %u kHz\n", 305e837f9b5SJoe Perches loops_per_jiffy, ci->new); 3061da177e4SLinus Torvalds } 3071da177e4SLinus Torvalds #endif 30839c132eeSViresh Kumar } 3091da177e4SLinus Torvalds 3100956df9cSViresh Kumar static void __cpufreq_notify_transition(struct cpufreq_policy *policy, 311b43a7ffbSViresh Kumar struct cpufreq_freqs *freqs, unsigned int state) 3121da177e4SLinus Torvalds { 3131da177e4SLinus Torvalds BUG_ON(irqs_disabled()); 3141da177e4SLinus Torvalds 315d5aaffa9SDirk Brandewie if (cpufreq_disabled()) 316d5aaffa9SDirk Brandewie return; 317d5aaffa9SDirk Brandewie 3181c3d85ddSRafael J. Wysocki freqs->flags = cpufreq_driver->flags; 3192d06d8c4SDominik Brodowski pr_debug("notification %u of frequency transition to %u kHz\n", 320e4472cb3SDave Jones state, freqs->new); 3211da177e4SLinus Torvalds 3221da177e4SLinus Torvalds switch (state) { 323e4472cb3SDave Jones 3241da177e4SLinus Torvalds case CPUFREQ_PRECHANGE: 325e4472cb3SDave Jones /* detect if the driver reported a value as "old frequency" 326e4472cb3SDave Jones * which is not equal to what the cpufreq core thinks is 327e4472cb3SDave Jones * "old frequency". 3281da177e4SLinus Torvalds */ 3291c3d85ddSRafael J. Wysocki if (!(cpufreq_driver->flags & CPUFREQ_CONST_LOOPS)) { 330e4472cb3SDave Jones if ((policy) && (policy->cpu == freqs->cpu) && 331e4472cb3SDave Jones (policy->cur) && (policy->cur != freqs->old)) { 332e837f9b5SJoe Perches pr_debug("Warning: CPU frequency is %u, cpufreq assumed %u kHz\n", 333e4472cb3SDave Jones freqs->old, policy->cur); 334e4472cb3SDave Jones freqs->old = policy->cur; 3351da177e4SLinus Torvalds } 3361da177e4SLinus Torvalds } 337b4dfdbb3SAlan Stern srcu_notifier_call_chain(&cpufreq_transition_notifier_list, 338e4472cb3SDave Jones CPUFREQ_PRECHANGE, freqs); 3391da177e4SLinus Torvalds adjust_jiffies(CPUFREQ_PRECHANGE, freqs); 3401da177e4SLinus Torvalds break; 341e4472cb3SDave Jones 3421da177e4SLinus Torvalds case CPUFREQ_POSTCHANGE: 3431da177e4SLinus Torvalds adjust_jiffies(CPUFREQ_POSTCHANGE, freqs); 344e837f9b5SJoe Perches pr_debug("FREQ: %lu - CPU: %lu\n", 345e837f9b5SJoe Perches (unsigned long)freqs->new, (unsigned long)freqs->cpu); 34625e41933SThomas Renninger trace_cpu_frequency(freqs->new, freqs->cpu); 347b4dfdbb3SAlan Stern srcu_notifier_call_chain(&cpufreq_transition_notifier_list, 348e4472cb3SDave Jones CPUFREQ_POSTCHANGE, freqs); 349e4472cb3SDave Jones if (likely(policy) && likely(policy->cpu == freqs->cpu)) 350e4472cb3SDave Jones policy->cur = freqs->new; 3511da177e4SLinus Torvalds break; 3521da177e4SLinus Torvalds } 3531da177e4SLinus Torvalds } 354bb176f7dSViresh Kumar 355b43a7ffbSViresh Kumar /** 356b43a7ffbSViresh Kumar * cpufreq_notify_transition - call notifier chain and adjust_jiffies 357b43a7ffbSViresh Kumar * on frequency transition. 358b43a7ffbSViresh Kumar * 359b43a7ffbSViresh Kumar * This function calls the transition notifiers and the "adjust_jiffies" 360b43a7ffbSViresh Kumar * function. It is called twice on all CPU frequency changes that have 361b43a7ffbSViresh Kumar * external effects. 362b43a7ffbSViresh Kumar */ 363236a9800SViresh Kumar static void cpufreq_notify_transition(struct cpufreq_policy *policy, 364b43a7ffbSViresh Kumar struct cpufreq_freqs *freqs, unsigned int state) 365b43a7ffbSViresh Kumar { 366b43a7ffbSViresh Kumar for_each_cpu(freqs->cpu, policy->cpus) 367b43a7ffbSViresh Kumar __cpufreq_notify_transition(policy, freqs, state); 368b43a7ffbSViresh Kumar } 3691da177e4SLinus Torvalds 370f7ba3b41SViresh Kumar /* Do post notifications when there are chances that transition has failed */ 371236a9800SViresh Kumar static void cpufreq_notify_post_transition(struct cpufreq_policy *policy, 372f7ba3b41SViresh Kumar struct cpufreq_freqs *freqs, int transition_failed) 373f7ba3b41SViresh Kumar { 374f7ba3b41SViresh Kumar cpufreq_notify_transition(policy, freqs, CPUFREQ_POSTCHANGE); 375f7ba3b41SViresh Kumar if (!transition_failed) 376f7ba3b41SViresh Kumar return; 377f7ba3b41SViresh Kumar 378f7ba3b41SViresh Kumar swap(freqs->old, freqs->new); 379f7ba3b41SViresh Kumar cpufreq_notify_transition(policy, freqs, CPUFREQ_PRECHANGE); 380f7ba3b41SViresh Kumar cpufreq_notify_transition(policy, freqs, CPUFREQ_POSTCHANGE); 381f7ba3b41SViresh Kumar } 382f7ba3b41SViresh Kumar 38312478cf0SSrivatsa S. Bhat void cpufreq_freq_transition_begin(struct cpufreq_policy *policy, 38412478cf0SSrivatsa S. Bhat struct cpufreq_freqs *freqs) 38512478cf0SSrivatsa S. Bhat { 386ca654dc3SSrivatsa S. Bhat 387ca654dc3SSrivatsa S. Bhat /* 388ca654dc3SSrivatsa S. Bhat * Catch double invocations of _begin() which lead to self-deadlock. 389ca654dc3SSrivatsa S. Bhat * ASYNC_NOTIFICATION drivers are left out because the cpufreq core 390ca654dc3SSrivatsa S. Bhat * doesn't invoke _begin() on their behalf, and hence the chances of 391ca654dc3SSrivatsa S. Bhat * double invocations are very low. Moreover, there are scenarios 392ca654dc3SSrivatsa S. Bhat * where these checks can emit false-positive warnings in these 393ca654dc3SSrivatsa S. Bhat * drivers; so we avoid that by skipping them altogether. 394ca654dc3SSrivatsa S. Bhat */ 395ca654dc3SSrivatsa S. Bhat WARN_ON(!(cpufreq_driver->flags & CPUFREQ_ASYNC_NOTIFICATION) 396ca654dc3SSrivatsa S. Bhat && current == policy->transition_task); 397ca654dc3SSrivatsa S. Bhat 39812478cf0SSrivatsa S. Bhat wait: 39912478cf0SSrivatsa S. Bhat wait_event(policy->transition_wait, !policy->transition_ongoing); 40012478cf0SSrivatsa S. Bhat 40112478cf0SSrivatsa S. Bhat spin_lock(&policy->transition_lock); 40212478cf0SSrivatsa S. Bhat 40312478cf0SSrivatsa S. Bhat if (unlikely(policy->transition_ongoing)) { 40412478cf0SSrivatsa S. Bhat spin_unlock(&policy->transition_lock); 40512478cf0SSrivatsa S. Bhat goto wait; 40612478cf0SSrivatsa S. Bhat } 40712478cf0SSrivatsa S. Bhat 40812478cf0SSrivatsa S. Bhat policy->transition_ongoing = true; 409ca654dc3SSrivatsa S. Bhat policy->transition_task = current; 41012478cf0SSrivatsa S. Bhat 41112478cf0SSrivatsa S. Bhat spin_unlock(&policy->transition_lock); 41212478cf0SSrivatsa S. Bhat 41312478cf0SSrivatsa S. Bhat cpufreq_notify_transition(policy, freqs, CPUFREQ_PRECHANGE); 41412478cf0SSrivatsa S. Bhat } 41512478cf0SSrivatsa S. Bhat EXPORT_SYMBOL_GPL(cpufreq_freq_transition_begin); 41612478cf0SSrivatsa S. Bhat 41712478cf0SSrivatsa S. Bhat void cpufreq_freq_transition_end(struct cpufreq_policy *policy, 41812478cf0SSrivatsa S. Bhat struct cpufreq_freqs *freqs, int transition_failed) 41912478cf0SSrivatsa S. Bhat { 42012478cf0SSrivatsa S. Bhat if (unlikely(WARN_ON(!policy->transition_ongoing))) 42112478cf0SSrivatsa S. Bhat return; 42212478cf0SSrivatsa S. Bhat 42312478cf0SSrivatsa S. Bhat cpufreq_notify_post_transition(policy, freqs, transition_failed); 42412478cf0SSrivatsa S. Bhat 42512478cf0SSrivatsa S. Bhat policy->transition_ongoing = false; 426ca654dc3SSrivatsa S. Bhat policy->transition_task = NULL; 42712478cf0SSrivatsa S. Bhat 42812478cf0SSrivatsa S. Bhat wake_up(&policy->transition_wait); 42912478cf0SSrivatsa S. Bhat } 43012478cf0SSrivatsa S. Bhat EXPORT_SYMBOL_GPL(cpufreq_freq_transition_end); 43112478cf0SSrivatsa S. Bhat 4321da177e4SLinus Torvalds 4331da177e4SLinus Torvalds /********************************************************************* 4341da177e4SLinus Torvalds * SYSFS INTERFACE * 4351da177e4SLinus Torvalds *********************************************************************/ 4368a5c74a1SRashika Kheria static ssize_t show_boost(struct kobject *kobj, 4376f19efc0SLukasz Majewski struct attribute *attr, char *buf) 4386f19efc0SLukasz Majewski { 4396f19efc0SLukasz Majewski return sprintf(buf, "%d\n", cpufreq_driver->boost_enabled); 4406f19efc0SLukasz Majewski } 4416f19efc0SLukasz Majewski 4426f19efc0SLukasz Majewski static ssize_t store_boost(struct kobject *kobj, struct attribute *attr, 4436f19efc0SLukasz Majewski const char *buf, size_t count) 4446f19efc0SLukasz Majewski { 4456f19efc0SLukasz Majewski int ret, enable; 4466f19efc0SLukasz Majewski 4476f19efc0SLukasz Majewski ret = sscanf(buf, "%d", &enable); 4486f19efc0SLukasz Majewski if (ret != 1 || enable < 0 || enable > 1) 4496f19efc0SLukasz Majewski return -EINVAL; 4506f19efc0SLukasz Majewski 4516f19efc0SLukasz Majewski if (cpufreq_boost_trigger_state(enable)) { 452e837f9b5SJoe Perches pr_err("%s: Cannot %s BOOST!\n", 453e837f9b5SJoe Perches __func__, enable ? "enable" : "disable"); 4546f19efc0SLukasz Majewski return -EINVAL; 4556f19efc0SLukasz Majewski } 4566f19efc0SLukasz Majewski 457e837f9b5SJoe Perches pr_debug("%s: cpufreq BOOST %s\n", 458e837f9b5SJoe Perches __func__, enable ? "enabled" : "disabled"); 4596f19efc0SLukasz Majewski 4606f19efc0SLukasz Majewski return count; 4616f19efc0SLukasz Majewski } 4626f19efc0SLukasz Majewski define_one_global_rw(boost); 4631da177e4SLinus Torvalds 46442f91fa1SViresh Kumar static struct cpufreq_governor *find_governor(const char *str_governor) 4653bcb09a3SJeremy Fitzhardinge { 4663bcb09a3SJeremy Fitzhardinge struct cpufreq_governor *t; 4673bcb09a3SJeremy Fitzhardinge 468f7b27061SViresh Kumar for_each_governor(t) 4697c4f4539SRasmus Villemoes if (!strncasecmp(str_governor, t->name, CPUFREQ_NAME_LEN)) 4703bcb09a3SJeremy Fitzhardinge return t; 4713bcb09a3SJeremy Fitzhardinge 4723bcb09a3SJeremy Fitzhardinge return NULL; 4733bcb09a3SJeremy Fitzhardinge } 4743bcb09a3SJeremy Fitzhardinge 4751da177e4SLinus Torvalds /** 4761da177e4SLinus Torvalds * cpufreq_parse_governor - parse a governor string 4771da177e4SLinus Torvalds */ 4781da177e4SLinus Torvalds static int cpufreq_parse_governor(char *str_governor, unsigned int *policy, 4791da177e4SLinus Torvalds struct cpufreq_governor **governor) 4801da177e4SLinus Torvalds { 4813bcb09a3SJeremy Fitzhardinge int err = -EINVAL; 4823bcb09a3SJeremy Fitzhardinge 4831c3d85ddSRafael J. Wysocki if (!cpufreq_driver) 4843bcb09a3SJeremy Fitzhardinge goto out; 4853bcb09a3SJeremy Fitzhardinge 4861c3d85ddSRafael J. Wysocki if (cpufreq_driver->setpolicy) { 4877c4f4539SRasmus Villemoes if (!strncasecmp(str_governor, "performance", CPUFREQ_NAME_LEN)) { 4881da177e4SLinus Torvalds *policy = CPUFREQ_POLICY_PERFORMANCE; 4893bcb09a3SJeremy Fitzhardinge err = 0; 4907c4f4539SRasmus Villemoes } else if (!strncasecmp(str_governor, "powersave", 491e08f5f5bSGautham R Shenoy CPUFREQ_NAME_LEN)) { 4921da177e4SLinus Torvalds *policy = CPUFREQ_POLICY_POWERSAVE; 4933bcb09a3SJeremy Fitzhardinge err = 0; 4941da177e4SLinus Torvalds } 4952e1cc3a5SViresh Kumar } else { 4961da177e4SLinus Torvalds struct cpufreq_governor *t; 4973bcb09a3SJeremy Fitzhardinge 4983fc54d37Sakpm@osdl.org mutex_lock(&cpufreq_governor_mutex); 4993bcb09a3SJeremy Fitzhardinge 50042f91fa1SViresh Kumar t = find_governor(str_governor); 5013bcb09a3SJeremy Fitzhardinge 502ea714970SJeremy Fitzhardinge if (t == NULL) { 503ea714970SJeremy Fitzhardinge int ret; 504ea714970SJeremy Fitzhardinge 505ea714970SJeremy Fitzhardinge mutex_unlock(&cpufreq_governor_mutex); 5061a8e1463SKees Cook ret = request_module("cpufreq_%s", str_governor); 507ea714970SJeremy Fitzhardinge mutex_lock(&cpufreq_governor_mutex); 508ea714970SJeremy Fitzhardinge 509ea714970SJeremy Fitzhardinge if (ret == 0) 51042f91fa1SViresh Kumar t = find_governor(str_governor); 511ea714970SJeremy Fitzhardinge } 512ea714970SJeremy Fitzhardinge 5133bcb09a3SJeremy Fitzhardinge if (t != NULL) { 5141da177e4SLinus Torvalds *governor = t; 5153bcb09a3SJeremy Fitzhardinge err = 0; 5161da177e4SLinus Torvalds } 5173bcb09a3SJeremy Fitzhardinge 5183bcb09a3SJeremy Fitzhardinge mutex_unlock(&cpufreq_governor_mutex); 5191da177e4SLinus Torvalds } 5201da177e4SLinus Torvalds out: 5213bcb09a3SJeremy Fitzhardinge return err; 5221da177e4SLinus Torvalds } 5231da177e4SLinus Torvalds 5241da177e4SLinus Torvalds /** 525e08f5f5bSGautham R Shenoy * cpufreq_per_cpu_attr_read() / show_##file_name() - 526e08f5f5bSGautham R Shenoy * print out cpufreq information 5271da177e4SLinus Torvalds * 5281da177e4SLinus Torvalds * Write out information from cpufreq_driver->policy[cpu]; object must be 5291da177e4SLinus Torvalds * "unsigned int". 5301da177e4SLinus Torvalds */ 5311da177e4SLinus Torvalds 5321da177e4SLinus Torvalds #define show_one(file_name, object) \ 5331da177e4SLinus Torvalds static ssize_t show_##file_name \ 5341da177e4SLinus Torvalds (struct cpufreq_policy *policy, char *buf) \ 5351da177e4SLinus Torvalds { \ 5361da177e4SLinus Torvalds return sprintf(buf, "%u\n", policy->object); \ 5371da177e4SLinus Torvalds } 5381da177e4SLinus Torvalds 5391da177e4SLinus Torvalds show_one(cpuinfo_min_freq, cpuinfo.min_freq); 5401da177e4SLinus Torvalds show_one(cpuinfo_max_freq, cpuinfo.max_freq); 541ed129784SThomas Renninger show_one(cpuinfo_transition_latency, cpuinfo.transition_latency); 5421da177e4SLinus Torvalds show_one(scaling_min_freq, min); 5431da177e4SLinus Torvalds show_one(scaling_max_freq, max); 544c034b02eSDirk Brandewie 54509347b29SViresh Kumar static ssize_t show_scaling_cur_freq(struct cpufreq_policy *policy, char *buf) 546c034b02eSDirk Brandewie { 547c034b02eSDirk Brandewie ssize_t ret; 548c034b02eSDirk Brandewie 549c034b02eSDirk Brandewie if (cpufreq_driver && cpufreq_driver->setpolicy && cpufreq_driver->get) 550c034b02eSDirk Brandewie ret = sprintf(buf, "%u\n", cpufreq_driver->get(policy->cpu)); 551c034b02eSDirk Brandewie else 552c034b02eSDirk Brandewie ret = sprintf(buf, "%u\n", policy->cur); 553c034b02eSDirk Brandewie return ret; 554c034b02eSDirk Brandewie } 5551da177e4SLinus Torvalds 556037ce839SViresh Kumar static int cpufreq_set_policy(struct cpufreq_policy *policy, 5573a3e9e06SViresh Kumar struct cpufreq_policy *new_policy); 5587970e08bSThomas Renninger 5591da177e4SLinus Torvalds /** 5601da177e4SLinus Torvalds * cpufreq_per_cpu_attr_write() / store_##file_name() - sysfs write access 5611da177e4SLinus Torvalds */ 5621da177e4SLinus Torvalds #define store_one(file_name, object) \ 5631da177e4SLinus Torvalds static ssize_t store_##file_name \ 5641da177e4SLinus Torvalds (struct cpufreq_policy *policy, const char *buf, size_t count) \ 5651da177e4SLinus Torvalds { \ 566619c144cSVince Hsu int ret, temp; \ 5671da177e4SLinus Torvalds struct cpufreq_policy new_policy; \ 5681da177e4SLinus Torvalds \ 5691da177e4SLinus Torvalds ret = cpufreq_get_policy(&new_policy, policy->cpu); \ 5701da177e4SLinus Torvalds if (ret) \ 5711da177e4SLinus Torvalds return -EINVAL; \ 5721da177e4SLinus Torvalds \ 5731da177e4SLinus Torvalds ret = sscanf(buf, "%u", &new_policy.object); \ 5741da177e4SLinus Torvalds if (ret != 1) \ 5751da177e4SLinus Torvalds return -EINVAL; \ 5761da177e4SLinus Torvalds \ 577619c144cSVince Hsu temp = new_policy.object; \ 578037ce839SViresh Kumar ret = cpufreq_set_policy(policy, &new_policy); \ 579619c144cSVince Hsu if (!ret) \ 580619c144cSVince Hsu policy->user_policy.object = temp; \ 5811da177e4SLinus Torvalds \ 5821da177e4SLinus Torvalds return ret ? ret : count; \ 5831da177e4SLinus Torvalds } 5841da177e4SLinus Torvalds 5851da177e4SLinus Torvalds store_one(scaling_min_freq, min); 5861da177e4SLinus Torvalds store_one(scaling_max_freq, max); 5871da177e4SLinus Torvalds 5881da177e4SLinus Torvalds /** 5891da177e4SLinus Torvalds * show_cpuinfo_cur_freq - current CPU frequency as detected by hardware 5901da177e4SLinus Torvalds */ 591e08f5f5bSGautham R Shenoy static ssize_t show_cpuinfo_cur_freq(struct cpufreq_policy *policy, 592e08f5f5bSGautham R Shenoy char *buf) 5931da177e4SLinus Torvalds { 594d92d50a4SViresh Kumar unsigned int cur_freq = __cpufreq_get(policy); 5951da177e4SLinus Torvalds if (!cur_freq) 5961da177e4SLinus Torvalds return sprintf(buf, "<unknown>"); 5971da177e4SLinus Torvalds return sprintf(buf, "%u\n", cur_freq); 5981da177e4SLinus Torvalds } 5991da177e4SLinus Torvalds 6001da177e4SLinus Torvalds /** 6011da177e4SLinus Torvalds * show_scaling_governor - show the current policy for the specified CPU 6021da177e4SLinus Torvalds */ 603905d77cdSDave Jones static ssize_t show_scaling_governor(struct cpufreq_policy *policy, char *buf) 6041da177e4SLinus Torvalds { 6051da177e4SLinus Torvalds if (policy->policy == CPUFREQ_POLICY_POWERSAVE) 6061da177e4SLinus Torvalds return sprintf(buf, "powersave\n"); 6071da177e4SLinus Torvalds else if (policy->policy == CPUFREQ_POLICY_PERFORMANCE) 6081da177e4SLinus Torvalds return sprintf(buf, "performance\n"); 6091da177e4SLinus Torvalds else if (policy->governor) 6104b972f0bSviresh kumar return scnprintf(buf, CPUFREQ_NAME_PLEN, "%s\n", 61129464f28SDave Jones policy->governor->name); 6121da177e4SLinus Torvalds return -EINVAL; 6131da177e4SLinus Torvalds } 6141da177e4SLinus Torvalds 6151da177e4SLinus Torvalds /** 6161da177e4SLinus Torvalds * store_scaling_governor - store policy for the specified CPU 6171da177e4SLinus Torvalds */ 6181da177e4SLinus Torvalds static ssize_t store_scaling_governor(struct cpufreq_policy *policy, 6191da177e4SLinus Torvalds const char *buf, size_t count) 6201da177e4SLinus Torvalds { 6215136fa56SSrivatsa S. Bhat int ret; 6221da177e4SLinus Torvalds char str_governor[16]; 6231da177e4SLinus Torvalds struct cpufreq_policy new_policy; 6241da177e4SLinus Torvalds 6251da177e4SLinus Torvalds ret = cpufreq_get_policy(&new_policy, policy->cpu); 6261da177e4SLinus Torvalds if (ret) 6271da177e4SLinus Torvalds return ret; 6281da177e4SLinus Torvalds 6291da177e4SLinus Torvalds ret = sscanf(buf, "%15s", str_governor); 6301da177e4SLinus Torvalds if (ret != 1) 6311da177e4SLinus Torvalds return -EINVAL; 6321da177e4SLinus Torvalds 633e08f5f5bSGautham R Shenoy if (cpufreq_parse_governor(str_governor, &new_policy.policy, 634e08f5f5bSGautham R Shenoy &new_policy.governor)) 6351da177e4SLinus Torvalds return -EINVAL; 6361da177e4SLinus Torvalds 637037ce839SViresh Kumar ret = cpufreq_set_policy(policy, &new_policy); 6387970e08bSThomas Renninger 6397970e08bSThomas Renninger policy->user_policy.policy = policy->policy; 6407970e08bSThomas Renninger policy->user_policy.governor = policy->governor; 6417970e08bSThomas Renninger 642e08f5f5bSGautham R Shenoy if (ret) 643e08f5f5bSGautham R Shenoy return ret; 644e08f5f5bSGautham R Shenoy else 645e08f5f5bSGautham R Shenoy return count; 6461da177e4SLinus Torvalds } 6471da177e4SLinus Torvalds 6481da177e4SLinus Torvalds /** 6491da177e4SLinus Torvalds * show_scaling_driver - show the cpufreq driver currently loaded 6501da177e4SLinus Torvalds */ 6511da177e4SLinus Torvalds static ssize_t show_scaling_driver(struct cpufreq_policy *policy, char *buf) 6521da177e4SLinus Torvalds { 6531c3d85ddSRafael J. Wysocki return scnprintf(buf, CPUFREQ_NAME_PLEN, "%s\n", cpufreq_driver->name); 6541da177e4SLinus Torvalds } 6551da177e4SLinus Torvalds 6561da177e4SLinus Torvalds /** 6571da177e4SLinus Torvalds * show_scaling_available_governors - show the available CPUfreq governors 6581da177e4SLinus Torvalds */ 6591da177e4SLinus Torvalds static ssize_t show_scaling_available_governors(struct cpufreq_policy *policy, 6601da177e4SLinus Torvalds char *buf) 6611da177e4SLinus Torvalds { 6621da177e4SLinus Torvalds ssize_t i = 0; 6631da177e4SLinus Torvalds struct cpufreq_governor *t; 6641da177e4SLinus Torvalds 6659c0ebcf7SViresh Kumar if (!has_target()) { 6661da177e4SLinus Torvalds i += sprintf(buf, "performance powersave"); 6671da177e4SLinus Torvalds goto out; 6681da177e4SLinus Torvalds } 6691da177e4SLinus Torvalds 670f7b27061SViresh Kumar for_each_governor(t) { 67129464f28SDave Jones if (i >= (ssize_t) ((PAGE_SIZE / sizeof(char)) 67229464f28SDave Jones - (CPUFREQ_NAME_LEN + 2))) 6731da177e4SLinus Torvalds goto out; 6744b972f0bSviresh kumar i += scnprintf(&buf[i], CPUFREQ_NAME_PLEN, "%s ", t->name); 6751da177e4SLinus Torvalds } 6761da177e4SLinus Torvalds out: 6771da177e4SLinus Torvalds i += sprintf(&buf[i], "\n"); 6781da177e4SLinus Torvalds return i; 6791da177e4SLinus Torvalds } 680e8628dd0SDarrick J. Wong 681f4fd3797SLan Tianyu ssize_t cpufreq_show_cpus(const struct cpumask *mask, char *buf) 6821da177e4SLinus Torvalds { 6831da177e4SLinus Torvalds ssize_t i = 0; 6841da177e4SLinus Torvalds unsigned int cpu; 6851da177e4SLinus Torvalds 686835481d9SRusty Russell for_each_cpu(cpu, mask) { 6871da177e4SLinus Torvalds if (i) 6881da177e4SLinus Torvalds i += scnprintf(&buf[i], (PAGE_SIZE - i - 2), " "); 6891da177e4SLinus Torvalds i += scnprintf(&buf[i], (PAGE_SIZE - i - 2), "%u", cpu); 6901da177e4SLinus Torvalds if (i >= (PAGE_SIZE - 5)) 6911da177e4SLinus Torvalds break; 6921da177e4SLinus Torvalds } 6931da177e4SLinus Torvalds i += sprintf(&buf[i], "\n"); 6941da177e4SLinus Torvalds return i; 6951da177e4SLinus Torvalds } 696f4fd3797SLan Tianyu EXPORT_SYMBOL_GPL(cpufreq_show_cpus); 6971da177e4SLinus Torvalds 698e8628dd0SDarrick J. Wong /** 699e8628dd0SDarrick J. Wong * show_related_cpus - show the CPUs affected by each transition even if 700e8628dd0SDarrick J. Wong * hw coordination is in use 701e8628dd0SDarrick J. Wong */ 702e8628dd0SDarrick J. Wong static ssize_t show_related_cpus(struct cpufreq_policy *policy, char *buf) 703e8628dd0SDarrick J. Wong { 704f4fd3797SLan Tianyu return cpufreq_show_cpus(policy->related_cpus, buf); 705e8628dd0SDarrick J. Wong } 706e8628dd0SDarrick J. Wong 707e8628dd0SDarrick J. Wong /** 708e8628dd0SDarrick J. Wong * show_affected_cpus - show the CPUs affected by each transition 709e8628dd0SDarrick J. Wong */ 710e8628dd0SDarrick J. Wong static ssize_t show_affected_cpus(struct cpufreq_policy *policy, char *buf) 711e8628dd0SDarrick J. Wong { 712f4fd3797SLan Tianyu return cpufreq_show_cpus(policy->cpus, buf); 713e8628dd0SDarrick J. Wong } 714e8628dd0SDarrick J. Wong 7159e76988eSVenki Pallipadi static ssize_t store_scaling_setspeed(struct cpufreq_policy *policy, 7169e76988eSVenki Pallipadi const char *buf, size_t count) 7179e76988eSVenki Pallipadi { 7189e76988eSVenki Pallipadi unsigned int freq = 0; 7199e76988eSVenki Pallipadi unsigned int ret; 7209e76988eSVenki Pallipadi 721879000f9SCHIKAMA masaki if (!policy->governor || !policy->governor->store_setspeed) 7229e76988eSVenki Pallipadi return -EINVAL; 7239e76988eSVenki Pallipadi 7249e76988eSVenki Pallipadi ret = sscanf(buf, "%u", &freq); 7259e76988eSVenki Pallipadi if (ret != 1) 7269e76988eSVenki Pallipadi return -EINVAL; 7279e76988eSVenki Pallipadi 7289e76988eSVenki Pallipadi policy->governor->store_setspeed(policy, freq); 7299e76988eSVenki Pallipadi 7309e76988eSVenki Pallipadi return count; 7319e76988eSVenki Pallipadi } 7329e76988eSVenki Pallipadi 7339e76988eSVenki Pallipadi static ssize_t show_scaling_setspeed(struct cpufreq_policy *policy, char *buf) 7349e76988eSVenki Pallipadi { 735879000f9SCHIKAMA masaki if (!policy->governor || !policy->governor->show_setspeed) 7369e76988eSVenki Pallipadi return sprintf(buf, "<unsupported>\n"); 7379e76988eSVenki Pallipadi 7389e76988eSVenki Pallipadi return policy->governor->show_setspeed(policy, buf); 7399e76988eSVenki Pallipadi } 7401da177e4SLinus Torvalds 741e2f74f35SThomas Renninger /** 7428bf1ac72Sviresh kumar * show_bios_limit - show the current cpufreq HW/BIOS limitation 743e2f74f35SThomas Renninger */ 744e2f74f35SThomas Renninger static ssize_t show_bios_limit(struct cpufreq_policy *policy, char *buf) 745e2f74f35SThomas Renninger { 746e2f74f35SThomas Renninger unsigned int limit; 747e2f74f35SThomas Renninger int ret; 7481c3d85ddSRafael J. Wysocki if (cpufreq_driver->bios_limit) { 7491c3d85ddSRafael J. Wysocki ret = cpufreq_driver->bios_limit(policy->cpu, &limit); 750e2f74f35SThomas Renninger if (!ret) 751e2f74f35SThomas Renninger return sprintf(buf, "%u\n", limit); 752e2f74f35SThomas Renninger } 753e2f74f35SThomas Renninger return sprintf(buf, "%u\n", policy->cpuinfo.max_freq); 754e2f74f35SThomas Renninger } 755e2f74f35SThomas Renninger 7566dad2a29SBorislav Petkov cpufreq_freq_attr_ro_perm(cpuinfo_cur_freq, 0400); 7576dad2a29SBorislav Petkov cpufreq_freq_attr_ro(cpuinfo_min_freq); 7586dad2a29SBorislav Petkov cpufreq_freq_attr_ro(cpuinfo_max_freq); 7596dad2a29SBorislav Petkov cpufreq_freq_attr_ro(cpuinfo_transition_latency); 7606dad2a29SBorislav Petkov cpufreq_freq_attr_ro(scaling_available_governors); 7616dad2a29SBorislav Petkov cpufreq_freq_attr_ro(scaling_driver); 7626dad2a29SBorislav Petkov cpufreq_freq_attr_ro(scaling_cur_freq); 7636dad2a29SBorislav Petkov cpufreq_freq_attr_ro(bios_limit); 7646dad2a29SBorislav Petkov cpufreq_freq_attr_ro(related_cpus); 7656dad2a29SBorislav Petkov cpufreq_freq_attr_ro(affected_cpus); 7666dad2a29SBorislav Petkov cpufreq_freq_attr_rw(scaling_min_freq); 7676dad2a29SBorislav Petkov cpufreq_freq_attr_rw(scaling_max_freq); 7686dad2a29SBorislav Petkov cpufreq_freq_attr_rw(scaling_governor); 7696dad2a29SBorislav Petkov cpufreq_freq_attr_rw(scaling_setspeed); 7701da177e4SLinus Torvalds 7711da177e4SLinus Torvalds static struct attribute *default_attrs[] = { 7721da177e4SLinus Torvalds &cpuinfo_min_freq.attr, 7731da177e4SLinus Torvalds &cpuinfo_max_freq.attr, 774ed129784SThomas Renninger &cpuinfo_transition_latency.attr, 7751da177e4SLinus Torvalds &scaling_min_freq.attr, 7761da177e4SLinus Torvalds &scaling_max_freq.attr, 7771da177e4SLinus Torvalds &affected_cpus.attr, 778e8628dd0SDarrick J. Wong &related_cpus.attr, 7791da177e4SLinus Torvalds &scaling_governor.attr, 7801da177e4SLinus Torvalds &scaling_driver.attr, 7811da177e4SLinus Torvalds &scaling_available_governors.attr, 7829e76988eSVenki Pallipadi &scaling_setspeed.attr, 7831da177e4SLinus Torvalds NULL 7841da177e4SLinus Torvalds }; 7851da177e4SLinus Torvalds 7861da177e4SLinus Torvalds #define to_policy(k) container_of(k, struct cpufreq_policy, kobj) 7871da177e4SLinus Torvalds #define to_attr(a) container_of(a, struct freq_attr, attr) 7881da177e4SLinus Torvalds 7891da177e4SLinus Torvalds static ssize_t show(struct kobject *kobj, struct attribute *attr, char *buf) 7901da177e4SLinus Torvalds { 7911da177e4SLinus Torvalds struct cpufreq_policy *policy = to_policy(kobj); 7921da177e4SLinus Torvalds struct freq_attr *fattr = to_attr(attr); 7931b750e3bSViresh Kumar ssize_t ret; 7946eed9404SViresh Kumar 7956eed9404SViresh Kumar if (!down_read_trylock(&cpufreq_rwsem)) 7961b750e3bSViresh Kumar return -EINVAL; 7975a01f2e8SVenkatesh Pallipadi 798ad7722daSviresh kumar down_read(&policy->rwsem); 7995a01f2e8SVenkatesh Pallipadi 800e08f5f5bSGautham R Shenoy if (fattr->show) 801e08f5f5bSGautham R Shenoy ret = fattr->show(policy, buf); 802e08f5f5bSGautham R Shenoy else 803e08f5f5bSGautham R Shenoy ret = -EIO; 804e08f5f5bSGautham R Shenoy 805ad7722daSviresh kumar up_read(&policy->rwsem); 8066eed9404SViresh Kumar up_read(&cpufreq_rwsem); 8071b750e3bSViresh Kumar 8081da177e4SLinus Torvalds return ret; 8091da177e4SLinus Torvalds } 8101da177e4SLinus Torvalds 8111da177e4SLinus Torvalds static ssize_t store(struct kobject *kobj, struct attribute *attr, 8121da177e4SLinus Torvalds const char *buf, size_t count) 8131da177e4SLinus Torvalds { 8141da177e4SLinus Torvalds struct cpufreq_policy *policy = to_policy(kobj); 8151da177e4SLinus Torvalds struct freq_attr *fattr = to_attr(attr); 816a07530b4SDave Jones ssize_t ret = -EINVAL; 8176eed9404SViresh Kumar 8184f750c93SSrivatsa S. Bhat get_online_cpus(); 8194f750c93SSrivatsa S. Bhat 8204f750c93SSrivatsa S. Bhat if (!cpu_online(policy->cpu)) 8214f750c93SSrivatsa S. Bhat goto unlock; 8224f750c93SSrivatsa S. Bhat 8236eed9404SViresh Kumar if (!down_read_trylock(&cpufreq_rwsem)) 8244f750c93SSrivatsa S. Bhat goto unlock; 8255a01f2e8SVenkatesh Pallipadi 826ad7722daSviresh kumar down_write(&policy->rwsem); 8275a01f2e8SVenkatesh Pallipadi 828e08f5f5bSGautham R Shenoy if (fattr->store) 829e08f5f5bSGautham R Shenoy ret = fattr->store(policy, buf, count); 830e08f5f5bSGautham R Shenoy else 831e08f5f5bSGautham R Shenoy ret = -EIO; 832e08f5f5bSGautham R Shenoy 833ad7722daSviresh kumar up_write(&policy->rwsem); 8346eed9404SViresh Kumar 8356eed9404SViresh Kumar up_read(&cpufreq_rwsem); 8364f750c93SSrivatsa S. Bhat unlock: 8374f750c93SSrivatsa S. Bhat put_online_cpus(); 8384f750c93SSrivatsa S. Bhat 8391da177e4SLinus Torvalds return ret; 8401da177e4SLinus Torvalds } 8411da177e4SLinus Torvalds 8421da177e4SLinus Torvalds static void cpufreq_sysfs_release(struct kobject *kobj) 8431da177e4SLinus Torvalds { 8441da177e4SLinus Torvalds struct cpufreq_policy *policy = to_policy(kobj); 8452d06d8c4SDominik Brodowski pr_debug("last reference is dropped\n"); 8461da177e4SLinus Torvalds complete(&policy->kobj_unregister); 8471da177e4SLinus Torvalds } 8481da177e4SLinus Torvalds 84952cf25d0SEmese Revfy static const struct sysfs_ops sysfs_ops = { 8501da177e4SLinus Torvalds .show = show, 8511da177e4SLinus Torvalds .store = store, 8521da177e4SLinus Torvalds }; 8531da177e4SLinus Torvalds 8541da177e4SLinus Torvalds static struct kobj_type ktype_cpufreq = { 8551da177e4SLinus Torvalds .sysfs_ops = &sysfs_ops, 8561da177e4SLinus Torvalds .default_attrs = default_attrs, 8571da177e4SLinus Torvalds .release = cpufreq_sysfs_release, 8581da177e4SLinus Torvalds }; 8591da177e4SLinus Torvalds 8602361be23SViresh Kumar struct kobject *cpufreq_global_kobject; 8612361be23SViresh Kumar EXPORT_SYMBOL(cpufreq_global_kobject); 8622361be23SViresh Kumar 8632361be23SViresh Kumar static int cpufreq_global_kobject_usage; 8642361be23SViresh Kumar 8652361be23SViresh Kumar int cpufreq_get_global_kobject(void) 8662361be23SViresh Kumar { 8672361be23SViresh Kumar if (!cpufreq_global_kobject_usage++) 8682361be23SViresh Kumar return kobject_add(cpufreq_global_kobject, 8692361be23SViresh Kumar &cpu_subsys.dev_root->kobj, "%s", "cpufreq"); 8702361be23SViresh Kumar 8712361be23SViresh Kumar return 0; 8722361be23SViresh Kumar } 8732361be23SViresh Kumar EXPORT_SYMBOL(cpufreq_get_global_kobject); 8742361be23SViresh Kumar 8752361be23SViresh Kumar void cpufreq_put_global_kobject(void) 8762361be23SViresh Kumar { 8772361be23SViresh Kumar if (!--cpufreq_global_kobject_usage) 8782361be23SViresh Kumar kobject_del(cpufreq_global_kobject); 8792361be23SViresh Kumar } 8802361be23SViresh Kumar EXPORT_SYMBOL(cpufreq_put_global_kobject); 8812361be23SViresh Kumar 8822361be23SViresh Kumar int cpufreq_sysfs_create_file(const struct attribute *attr) 8832361be23SViresh Kumar { 8842361be23SViresh Kumar int ret = cpufreq_get_global_kobject(); 8852361be23SViresh Kumar 8862361be23SViresh Kumar if (!ret) { 8872361be23SViresh Kumar ret = sysfs_create_file(cpufreq_global_kobject, attr); 8882361be23SViresh Kumar if (ret) 8892361be23SViresh Kumar cpufreq_put_global_kobject(); 8902361be23SViresh Kumar } 8912361be23SViresh Kumar 8922361be23SViresh Kumar return ret; 8932361be23SViresh Kumar } 8942361be23SViresh Kumar EXPORT_SYMBOL(cpufreq_sysfs_create_file); 8952361be23SViresh Kumar 8962361be23SViresh Kumar void cpufreq_sysfs_remove_file(const struct attribute *attr) 8972361be23SViresh Kumar { 8982361be23SViresh Kumar sysfs_remove_file(cpufreq_global_kobject, attr); 8992361be23SViresh Kumar cpufreq_put_global_kobject(); 9002361be23SViresh Kumar } 9012361be23SViresh Kumar EXPORT_SYMBOL(cpufreq_sysfs_remove_file); 9022361be23SViresh Kumar 90319d6f7ecSDave Jones /* symlink affected CPUs */ 904308b60e7SViresh Kumar static int cpufreq_add_dev_symlink(struct cpufreq_policy *policy) 90519d6f7ecSDave Jones { 90619d6f7ecSDave Jones unsigned int j; 90719d6f7ecSDave Jones int ret = 0; 90819d6f7ecSDave Jones 90919d6f7ecSDave Jones for_each_cpu(j, policy->cpus) { 9108a25a2fdSKay Sievers struct device *cpu_dev; 91119d6f7ecSDave Jones 912308b60e7SViresh Kumar if (j == policy->cpu) 91319d6f7ecSDave Jones continue; 91419d6f7ecSDave Jones 915e8fdde10SViresh Kumar pr_debug("Adding link for CPU: %u\n", j); 9168a25a2fdSKay Sievers cpu_dev = get_cpu_device(j); 9178a25a2fdSKay Sievers ret = sysfs_create_link(&cpu_dev->kobj, &policy->kobj, 91819d6f7ecSDave Jones "cpufreq"); 91971c3461eSRafael J. Wysocki if (ret) 92071c3461eSRafael J. Wysocki break; 92119d6f7ecSDave Jones } 92219d6f7ecSDave Jones return ret; 92319d6f7ecSDave Jones } 92419d6f7ecSDave Jones 925308b60e7SViresh Kumar static int cpufreq_add_dev_interface(struct cpufreq_policy *policy, 9268a25a2fdSKay Sievers struct device *dev) 927909a694eSDave Jones { 928909a694eSDave Jones struct freq_attr **drv_attr; 929909a694eSDave Jones int ret = 0; 930909a694eSDave Jones 931909a694eSDave Jones /* set up files for this cpu device */ 9321c3d85ddSRafael J. Wysocki drv_attr = cpufreq_driver->attr; 933f13f1184SViresh Kumar while (drv_attr && *drv_attr) { 934909a694eSDave Jones ret = sysfs_create_file(&policy->kobj, &((*drv_attr)->attr)); 935909a694eSDave Jones if (ret) 9366d4e81edSTomeu Vizoso return ret; 937909a694eSDave Jones drv_attr++; 938909a694eSDave Jones } 9391c3d85ddSRafael J. Wysocki if (cpufreq_driver->get) { 940909a694eSDave Jones ret = sysfs_create_file(&policy->kobj, &cpuinfo_cur_freq.attr); 941909a694eSDave Jones if (ret) 9426d4e81edSTomeu Vizoso return ret; 943909a694eSDave Jones } 944c034b02eSDirk Brandewie 945909a694eSDave Jones ret = sysfs_create_file(&policy->kobj, &scaling_cur_freq.attr); 946909a694eSDave Jones if (ret) 9476d4e81edSTomeu Vizoso return ret; 948c034b02eSDirk Brandewie 9491c3d85ddSRafael J. Wysocki if (cpufreq_driver->bios_limit) { 950e2f74f35SThomas Renninger ret = sysfs_create_file(&policy->kobj, &bios_limit.attr); 951e2f74f35SThomas Renninger if (ret) 9526d4e81edSTomeu Vizoso return ret; 953e2f74f35SThomas Renninger } 954909a694eSDave Jones 9556d4e81edSTomeu Vizoso return cpufreq_add_dev_symlink(policy); 956e18f1682SSrivatsa S. Bhat } 957e18f1682SSrivatsa S. Bhat 958e18f1682SSrivatsa S. Bhat static void cpufreq_init_policy(struct cpufreq_policy *policy) 959e18f1682SSrivatsa S. Bhat { 9606e2c89d1Sviresh kumar struct cpufreq_governor *gov = NULL; 961e18f1682SSrivatsa S. Bhat struct cpufreq_policy new_policy; 962e18f1682SSrivatsa S. Bhat int ret = 0; 963e18f1682SSrivatsa S. Bhat 964d5b73cd8SViresh Kumar memcpy(&new_policy, policy, sizeof(*policy)); 965a27a9ab7SJason Baron 9666e2c89d1Sviresh kumar /* Update governor of new_policy to the governor used before hotplug */ 96742f91fa1SViresh Kumar gov = find_governor(per_cpu(cpufreq_cpu_governor, policy->cpu)); 9686e2c89d1Sviresh kumar if (gov) 9696e2c89d1Sviresh kumar pr_debug("Restoring governor %s for cpu %d\n", 9706e2c89d1Sviresh kumar policy->governor->name, policy->cpu); 9716e2c89d1Sviresh kumar else 9726e2c89d1Sviresh kumar gov = CPUFREQ_DEFAULT_GOVERNOR; 9736e2c89d1Sviresh kumar 9746e2c89d1Sviresh kumar new_policy.governor = gov; 9756e2c89d1Sviresh kumar 976a27a9ab7SJason Baron /* Use the default policy if its valid. */ 977a27a9ab7SJason Baron if (cpufreq_driver->setpolicy) 9786e2c89d1Sviresh kumar cpufreq_parse_governor(gov->name, &new_policy.policy, NULL); 979ecf7e461SDave Jones 980ecf7e461SDave Jones /* set default policy */ 981037ce839SViresh Kumar ret = cpufreq_set_policy(policy, &new_policy); 982ecf7e461SDave Jones if (ret) { 9832d06d8c4SDominik Brodowski pr_debug("setting policy failed\n"); 9841c3d85ddSRafael J. Wysocki if (cpufreq_driver->exit) 9851c3d85ddSRafael J. Wysocki cpufreq_driver->exit(policy); 986ecf7e461SDave Jones } 987909a694eSDave Jones } 988909a694eSDave Jones 989d8d3b471SViresh Kumar static int cpufreq_add_policy_cpu(struct cpufreq_policy *policy, 99042f921a6SViresh Kumar unsigned int cpu, struct device *dev) 991fcf80582SViresh Kumar { 9929c0ebcf7SViresh Kumar int ret = 0; 993fcf80582SViresh Kumar unsigned long flags; 994fcf80582SViresh Kumar 9959c0ebcf7SViresh Kumar if (has_target()) { 9963de9bdebSViresh Kumar ret = __cpufreq_governor(policy, CPUFREQ_GOV_STOP); 9973de9bdebSViresh Kumar if (ret) { 9983de9bdebSViresh Kumar pr_err("%s: Failed to stop governor\n", __func__); 9993de9bdebSViresh Kumar return ret; 10003de9bdebSViresh Kumar } 10013de9bdebSViresh Kumar } 1002fcf80582SViresh Kumar 1003ad7722daSviresh kumar down_write(&policy->rwsem); 10042eaa3e2dSViresh Kumar 10050d1857a1SNathan Zimmer write_lock_irqsave(&cpufreq_driver_lock, flags); 10062eaa3e2dSViresh Kumar 1007fcf80582SViresh Kumar cpumask_set_cpu(cpu, policy->cpus); 1008fcf80582SViresh Kumar per_cpu(cpufreq_cpu_data, cpu) = policy; 10090d1857a1SNathan Zimmer write_unlock_irqrestore(&cpufreq_driver_lock, flags); 1010fcf80582SViresh Kumar 1011ad7722daSviresh kumar up_write(&policy->rwsem); 10122eaa3e2dSViresh Kumar 10139c0ebcf7SViresh Kumar if (has_target()) { 1014e5c87b76SStratos Karafotis ret = __cpufreq_governor(policy, CPUFREQ_GOV_START); 1015e5c87b76SStratos Karafotis if (!ret) 1016e5c87b76SStratos Karafotis ret = __cpufreq_governor(policy, CPUFREQ_GOV_LIMITS); 1017e5c87b76SStratos Karafotis 1018e5c87b76SStratos Karafotis if (ret) { 10193de9bdebSViresh Kumar pr_err("%s: Failed to start governor\n", __func__); 10203de9bdebSViresh Kumar return ret; 10213de9bdebSViresh Kumar } 1022820c6ca2SViresh Kumar } 1023fcf80582SViresh Kumar 102442f921a6SViresh Kumar return sysfs_create_link(&dev->kobj, &policy->kobj, "cpufreq"); 1025fcf80582SViresh Kumar } 10261da177e4SLinus Torvalds 10278414809cSSrivatsa S. Bhat static struct cpufreq_policy *cpufreq_policy_restore(unsigned int cpu) 10288414809cSSrivatsa S. Bhat { 10298414809cSSrivatsa S. Bhat struct cpufreq_policy *policy; 10308414809cSSrivatsa S. Bhat unsigned long flags; 10318414809cSSrivatsa S. Bhat 103244871c9cSLan Tianyu read_lock_irqsave(&cpufreq_driver_lock, flags); 10338414809cSSrivatsa S. Bhat 10348414809cSSrivatsa S. Bhat policy = per_cpu(cpufreq_cpu_data_fallback, cpu); 10358414809cSSrivatsa S. Bhat 103644871c9cSLan Tianyu read_unlock_irqrestore(&cpufreq_driver_lock, flags); 10378414809cSSrivatsa S. Bhat 103809712f55SGeert Uytterhoeven if (policy) 10396e2c89d1Sviresh kumar policy->governor = NULL; 10406e2c89d1Sviresh kumar 10418414809cSSrivatsa S. Bhat return policy; 10428414809cSSrivatsa S. Bhat } 10438414809cSSrivatsa S. Bhat 1044e9698cc5SSrivatsa S. Bhat static struct cpufreq_policy *cpufreq_policy_alloc(void) 1045e9698cc5SSrivatsa S. Bhat { 1046e9698cc5SSrivatsa S. Bhat struct cpufreq_policy *policy; 1047e9698cc5SSrivatsa S. Bhat 1048e9698cc5SSrivatsa S. Bhat policy = kzalloc(sizeof(*policy), GFP_KERNEL); 1049e9698cc5SSrivatsa S. Bhat if (!policy) 1050e9698cc5SSrivatsa S. Bhat return NULL; 1051e9698cc5SSrivatsa S. Bhat 1052e9698cc5SSrivatsa S. Bhat if (!alloc_cpumask_var(&policy->cpus, GFP_KERNEL)) 1053e9698cc5SSrivatsa S. Bhat goto err_free_policy; 1054e9698cc5SSrivatsa S. Bhat 1055e9698cc5SSrivatsa S. Bhat if (!zalloc_cpumask_var(&policy->related_cpus, GFP_KERNEL)) 1056e9698cc5SSrivatsa S. Bhat goto err_free_cpumask; 1057e9698cc5SSrivatsa S. Bhat 1058c88a1f8bSLukasz Majewski INIT_LIST_HEAD(&policy->policy_list); 1059ad7722daSviresh kumar init_rwsem(&policy->rwsem); 106012478cf0SSrivatsa S. Bhat spin_lock_init(&policy->transition_lock); 106112478cf0SSrivatsa S. Bhat init_waitqueue_head(&policy->transition_wait); 1062818c5712SViresh Kumar init_completion(&policy->kobj_unregister); 1063818c5712SViresh Kumar INIT_WORK(&policy->update, handle_update); 1064ad7722daSviresh kumar 1065e9698cc5SSrivatsa S. Bhat return policy; 1066e9698cc5SSrivatsa S. Bhat 1067e9698cc5SSrivatsa S. Bhat err_free_cpumask: 1068e9698cc5SSrivatsa S. Bhat free_cpumask_var(policy->cpus); 1069e9698cc5SSrivatsa S. Bhat err_free_policy: 1070e9698cc5SSrivatsa S. Bhat kfree(policy); 1071e9698cc5SSrivatsa S. Bhat 1072e9698cc5SSrivatsa S. Bhat return NULL; 1073e9698cc5SSrivatsa S. Bhat } 1074e9698cc5SSrivatsa S. Bhat 107542f921a6SViresh Kumar static void cpufreq_policy_put_kobj(struct cpufreq_policy *policy) 107642f921a6SViresh Kumar { 107742f921a6SViresh Kumar struct kobject *kobj; 107842f921a6SViresh Kumar struct completion *cmp; 107942f921a6SViresh Kumar 1080fcd7af91SViresh Kumar blocking_notifier_call_chain(&cpufreq_policy_notifier_list, 1081fcd7af91SViresh Kumar CPUFREQ_REMOVE_POLICY, policy); 1082fcd7af91SViresh Kumar 108342f921a6SViresh Kumar down_read(&policy->rwsem); 108442f921a6SViresh Kumar kobj = &policy->kobj; 108542f921a6SViresh Kumar cmp = &policy->kobj_unregister; 108642f921a6SViresh Kumar up_read(&policy->rwsem); 108742f921a6SViresh Kumar kobject_put(kobj); 108842f921a6SViresh Kumar 108942f921a6SViresh Kumar /* 109042f921a6SViresh Kumar * We need to make sure that the underlying kobj is 109142f921a6SViresh Kumar * actually not referenced anymore by anybody before we 109242f921a6SViresh Kumar * proceed with unloading. 109342f921a6SViresh Kumar */ 109442f921a6SViresh Kumar pr_debug("waiting for dropping of refcount\n"); 109542f921a6SViresh Kumar wait_for_completion(cmp); 109642f921a6SViresh Kumar pr_debug("wait complete\n"); 109742f921a6SViresh Kumar } 109842f921a6SViresh Kumar 1099e9698cc5SSrivatsa S. Bhat static void cpufreq_policy_free(struct cpufreq_policy *policy) 1100e9698cc5SSrivatsa S. Bhat { 1101e9698cc5SSrivatsa S. Bhat free_cpumask_var(policy->related_cpus); 1102e9698cc5SSrivatsa S. Bhat free_cpumask_var(policy->cpus); 1103e9698cc5SSrivatsa S. Bhat kfree(policy); 1104e9698cc5SSrivatsa S. Bhat } 1105e9698cc5SSrivatsa S. Bhat 11061bfb425bSViresh Kumar static int update_policy_cpu(struct cpufreq_policy *policy, unsigned int cpu, 11071bfb425bSViresh Kumar struct device *cpu_dev) 11080d66b91eSSrivatsa S. Bhat { 11091bfb425bSViresh Kumar int ret; 11101bfb425bSViresh Kumar 111199ec899eSSrivatsa S. Bhat if (WARN_ON(cpu == policy->cpu)) 11121bfb425bSViresh Kumar return 0; 11131bfb425bSViresh Kumar 11141bfb425bSViresh Kumar /* Move kobject to the new policy->cpu */ 11151bfb425bSViresh Kumar ret = kobject_move(&policy->kobj, &cpu_dev->kobj); 11161bfb425bSViresh Kumar if (ret) { 11171bfb425bSViresh Kumar pr_err("%s: Failed to move kobj: %d\n", __func__, ret); 11181bfb425bSViresh Kumar return ret; 11191bfb425bSViresh Kumar } 1120cb38ed5cSSrivatsa S. Bhat 1121ad7722daSviresh kumar down_write(&policy->rwsem); 11220d66b91eSSrivatsa S. Bhat policy->cpu = cpu; 1123ad7722daSviresh kumar up_write(&policy->rwsem); 11248efd5765SViresh Kumar 11251bfb425bSViresh Kumar return 0; 11260d66b91eSSrivatsa S. Bhat } 11270d66b91eSSrivatsa S. Bhat 112823faf0b7SViresh Kumar /** 112923faf0b7SViresh Kumar * cpufreq_add_dev - add a CPU device 113023faf0b7SViresh Kumar * 113123faf0b7SViresh Kumar * Adds the cpufreq interface for a CPU device. 113223faf0b7SViresh Kumar * 113323faf0b7SViresh Kumar * The Oracle says: try running cpufreq registration/unregistration concurrently 113423faf0b7SViresh Kumar * with with cpu hotplugging and all hell will break loose. Tried to clean this 113523faf0b7SViresh Kumar * mess up, but more thorough testing is needed. - Mathieu 113623faf0b7SViresh Kumar */ 113723faf0b7SViresh Kumar static int cpufreq_add_dev(struct device *dev, struct subsys_interface *sif) 11381da177e4SLinus Torvalds { 1139fcf80582SViresh Kumar unsigned int j, cpu = dev->id; 114065922465SViresh Kumar int ret = -ENOMEM; 11417f0c020aSViresh Kumar struct cpufreq_policy *policy; 11421da177e4SLinus Torvalds unsigned long flags; 114396bbbe4aSViresh Kumar bool recover_policy = cpufreq_suspended; 11441da177e4SLinus Torvalds 1145c32b6b8eSAshok Raj if (cpu_is_offline(cpu)) 1146c32b6b8eSAshok Raj return 0; 1147c32b6b8eSAshok Raj 11482d06d8c4SDominik Brodowski pr_debug("adding CPU %u\n", cpu); 11491da177e4SLinus Torvalds 11501da177e4SLinus Torvalds /* check whether a different CPU already registered this 11511da177e4SLinus Torvalds * CPU because it is in the same boat. */ 1152d7a9771cSViresh Kumar policy = cpufreq_cpu_get_raw(cpu); 1153d7a9771cSViresh Kumar if (unlikely(policy)) 11541da177e4SLinus Torvalds return 0; 1155fcf80582SViresh Kumar 11566eed9404SViresh Kumar if (!down_read_trylock(&cpufreq_rwsem)) 11576eed9404SViresh Kumar return 0; 11586eed9404SViresh Kumar 1159fcf80582SViresh Kumar /* Check if this cpu was hot-unplugged earlier and has siblings */ 11600d1857a1SNathan Zimmer read_lock_irqsave(&cpufreq_driver_lock, flags); 1161b4f0676fSViresh Kumar for_each_policy(policy) { 11627f0c020aSViresh Kumar if (cpumask_test_cpu(cpu, policy->related_cpus)) { 11630d1857a1SNathan Zimmer read_unlock_irqrestore(&cpufreq_driver_lock, flags); 11647f0c020aSViresh Kumar ret = cpufreq_add_policy_cpu(policy, cpu, dev); 11656eed9404SViresh Kumar up_read(&cpufreq_rwsem); 11666eed9404SViresh Kumar return ret; 1167fcf80582SViresh Kumar } 11682eaa3e2dSViresh Kumar } 11690d1857a1SNathan Zimmer read_unlock_irqrestore(&cpufreq_driver_lock, flags); 11701da177e4SLinus Torvalds 117172368d12SRafael J. Wysocki /* 117272368d12SRafael J. Wysocki * Restore the saved policy when doing light-weight init and fall back 117372368d12SRafael J. Wysocki * to the full init if that fails. 117472368d12SRafael J. Wysocki */ 117596bbbe4aSViresh Kumar policy = recover_policy ? cpufreq_policy_restore(cpu) : NULL; 117672368d12SRafael J. Wysocki if (!policy) { 117796bbbe4aSViresh Kumar recover_policy = false; 1178e9698cc5SSrivatsa S. Bhat policy = cpufreq_policy_alloc(); 1179059019a3SDave Jones if (!policy) 11801da177e4SLinus Torvalds goto nomem_out; 118172368d12SRafael J. Wysocki } 11820d66b91eSSrivatsa S. Bhat 11830d66b91eSSrivatsa S. Bhat /* 11840d66b91eSSrivatsa S. Bhat * In the resume path, since we restore a saved policy, the assignment 11850d66b91eSSrivatsa S. Bhat * to policy->cpu is like an update of the existing policy, rather than 11860d66b91eSSrivatsa S. Bhat * the creation of a brand new one. So we need to perform this update 11870d66b91eSSrivatsa S. Bhat * by invoking update_policy_cpu(). 11880d66b91eSSrivatsa S. Bhat */ 11891bfb425bSViresh Kumar if (recover_policy && cpu != policy->cpu) 11901bfb425bSViresh Kumar WARN_ON(update_policy_cpu(policy, cpu, dev)); 11911bfb425bSViresh Kumar else 11921da177e4SLinus Torvalds policy->cpu = cpu; 11930d66b91eSSrivatsa S. Bhat 1194835481d9SRusty Russell cpumask_copy(policy->cpus, cpumask_of(cpu)); 11951da177e4SLinus Torvalds 11961da177e4SLinus Torvalds /* call driver. From then on the cpufreq must be able 11971da177e4SLinus Torvalds * to accept all calls to ->verify and ->setpolicy for this CPU 11981da177e4SLinus Torvalds */ 11991c3d85ddSRafael J. Wysocki ret = cpufreq_driver->init(policy); 12001da177e4SLinus Torvalds if (ret) { 12012d06d8c4SDominik Brodowski pr_debug("initialization failed\n"); 12022eaa3e2dSViresh Kumar goto err_set_policy_cpu; 12031da177e4SLinus Torvalds } 1204643ae6e8SViresh Kumar 12056d4e81edSTomeu Vizoso down_write(&policy->rwsem); 12066d4e81edSTomeu Vizoso 12075a7e56a5SViresh Kumar /* related cpus should atleast have policy->cpus */ 12085a7e56a5SViresh Kumar cpumask_or(policy->related_cpus, policy->related_cpus, policy->cpus); 12095a7e56a5SViresh Kumar 12105a7e56a5SViresh Kumar /* 12115a7e56a5SViresh Kumar * affected cpus must always be the one, which are online. We aren't 12125a7e56a5SViresh Kumar * managing offline cpus here. 12135a7e56a5SViresh Kumar */ 12145a7e56a5SViresh Kumar cpumask_and(policy->cpus, policy->cpus, cpu_online_mask); 12155a7e56a5SViresh Kumar 121696bbbe4aSViresh Kumar if (!recover_policy) { 12175a7e56a5SViresh Kumar policy->user_policy.min = policy->min; 12185a7e56a5SViresh Kumar policy->user_policy.max = policy->max; 12196d4e81edSTomeu Vizoso 12206d4e81edSTomeu Vizoso /* prepare interface data */ 12216d4e81edSTomeu Vizoso ret = kobject_init_and_add(&policy->kobj, &ktype_cpufreq, 12226d4e81edSTomeu Vizoso &dev->kobj, "cpufreq"); 12236d4e81edSTomeu Vizoso if (ret) { 12246d4e81edSTomeu Vizoso pr_err("%s: failed to init policy->kobj: %d\n", 12256d4e81edSTomeu Vizoso __func__, ret); 12266d4e81edSTomeu Vizoso goto err_init_policy_kobj; 12276d4e81edSTomeu Vizoso } 12285a7e56a5SViresh Kumar } 12295a7e56a5SViresh Kumar 1230652ed95dSViresh Kumar write_lock_irqsave(&cpufreq_driver_lock, flags); 1231652ed95dSViresh Kumar for_each_cpu(j, policy->cpus) 1232652ed95dSViresh Kumar per_cpu(cpufreq_cpu_data, j) = policy; 1233652ed95dSViresh Kumar write_unlock_irqrestore(&cpufreq_driver_lock, flags); 1234652ed95dSViresh Kumar 12352ed99e39SRafael J. Wysocki if (cpufreq_driver->get && !cpufreq_driver->setpolicy) { 1236da60ce9fSViresh Kumar policy->cur = cpufreq_driver->get(policy->cpu); 1237da60ce9fSViresh Kumar if (!policy->cur) { 1238da60ce9fSViresh Kumar pr_err("%s: ->get() failed\n", __func__); 1239da60ce9fSViresh Kumar goto err_get_freq; 1240da60ce9fSViresh Kumar } 1241da60ce9fSViresh Kumar } 1242da60ce9fSViresh Kumar 1243d3916691SViresh Kumar /* 1244d3916691SViresh Kumar * Sometimes boot loaders set CPU frequency to a value outside of 1245d3916691SViresh Kumar * frequency table present with cpufreq core. In such cases CPU might be 1246d3916691SViresh Kumar * unstable if it has to run on that frequency for long duration of time 1247d3916691SViresh Kumar * and so its better to set it to a frequency which is specified in 1248d3916691SViresh Kumar * freq-table. This also makes cpufreq stats inconsistent as 1249d3916691SViresh Kumar * cpufreq-stats would fail to register because current frequency of CPU 1250d3916691SViresh Kumar * isn't found in freq-table. 1251d3916691SViresh Kumar * 1252d3916691SViresh Kumar * Because we don't want this change to effect boot process badly, we go 1253d3916691SViresh Kumar * for the next freq which is >= policy->cur ('cur' must be set by now, 1254d3916691SViresh Kumar * otherwise we will end up setting freq to lowest of the table as 'cur' 1255d3916691SViresh Kumar * is initialized to zero). 1256d3916691SViresh Kumar * 1257d3916691SViresh Kumar * We are passing target-freq as "policy->cur - 1" otherwise 1258d3916691SViresh Kumar * __cpufreq_driver_target() would simply fail, as policy->cur will be 1259d3916691SViresh Kumar * equal to target-freq. 1260d3916691SViresh Kumar */ 1261d3916691SViresh Kumar if ((cpufreq_driver->flags & CPUFREQ_NEED_INITIAL_FREQ_CHECK) 1262d3916691SViresh Kumar && has_target()) { 1263d3916691SViresh Kumar /* Are we running at unknown frequency ? */ 1264d3916691SViresh Kumar ret = cpufreq_frequency_table_get_index(policy, policy->cur); 1265d3916691SViresh Kumar if (ret == -EINVAL) { 1266d3916691SViresh Kumar /* Warn user and fix it */ 1267d3916691SViresh Kumar pr_warn("%s: CPU%d: Running at unlisted freq: %u KHz\n", 1268d3916691SViresh Kumar __func__, policy->cpu, policy->cur); 1269d3916691SViresh Kumar ret = __cpufreq_driver_target(policy, policy->cur - 1, 1270d3916691SViresh Kumar CPUFREQ_RELATION_L); 1271d3916691SViresh Kumar 1272d3916691SViresh Kumar /* 1273d3916691SViresh Kumar * Reaching here after boot in a few seconds may not 1274d3916691SViresh Kumar * mean that system will remain stable at "unknown" 1275d3916691SViresh Kumar * frequency for longer duration. Hence, a BUG_ON(). 1276d3916691SViresh Kumar */ 1277d3916691SViresh Kumar BUG_ON(ret); 1278d3916691SViresh Kumar pr_warn("%s: CPU%d: Unlisted initial frequency changed to: %u KHz\n", 1279d3916691SViresh Kumar __func__, policy->cpu, policy->cur); 1280d3916691SViresh Kumar } 1281d3916691SViresh Kumar } 1282d3916691SViresh Kumar 1283a1531acdSThomas Renninger blocking_notifier_call_chain(&cpufreq_policy_notifier_list, 1284a1531acdSThomas Renninger CPUFREQ_START, policy); 1285a1531acdSThomas Renninger 128696bbbe4aSViresh Kumar if (!recover_policy) { 1287308b60e7SViresh Kumar ret = cpufreq_add_dev_interface(policy, dev); 128819d6f7ecSDave Jones if (ret) 12890142f9dcSAhmed S. Darwish goto err_out_unregister; 1290fcd7af91SViresh Kumar blocking_notifier_call_chain(&cpufreq_policy_notifier_list, 1291fcd7af91SViresh Kumar CPUFREQ_CREATE_POLICY, policy); 12929515f4d6SViresh Kumar } 1293c88a1f8bSLukasz Majewski 1294c88a1f8bSLukasz Majewski write_lock_irqsave(&cpufreq_driver_lock, flags); 1295c88a1f8bSLukasz Majewski list_add(&policy->policy_list, &cpufreq_policy_list); 1296c88a1f8bSLukasz Majewski write_unlock_irqrestore(&cpufreq_driver_lock, flags); 12978ff69732SDave Jones 1298e18f1682SSrivatsa S. Bhat cpufreq_init_policy(policy); 1299e18f1682SSrivatsa S. Bhat 130096bbbe4aSViresh Kumar if (!recover_policy) { 130108fd8c1cSViresh Kumar policy->user_policy.policy = policy->policy; 130208fd8c1cSViresh Kumar policy->user_policy.governor = policy->governor; 130308fd8c1cSViresh Kumar } 13044e97b631SViresh Kumar up_write(&policy->rwsem); 130508fd8c1cSViresh Kumar 1306038c5b3eSGreg Kroah-Hartman kobject_uevent(&policy->kobj, KOBJ_ADD); 13077c45cf31SViresh Kumar 13086eed9404SViresh Kumar up_read(&cpufreq_rwsem); 13096eed9404SViresh Kumar 13107c45cf31SViresh Kumar /* Callback for handling stuff after policy is ready */ 13117c45cf31SViresh Kumar if (cpufreq_driver->ready) 13127c45cf31SViresh Kumar cpufreq_driver->ready(policy); 13137c45cf31SViresh Kumar 13142d06d8c4SDominik Brodowski pr_debug("initialization complete\n"); 13151da177e4SLinus Torvalds 13161da177e4SLinus Torvalds return 0; 13171da177e4SLinus Torvalds 13181da177e4SLinus Torvalds err_out_unregister: 1319652ed95dSViresh Kumar err_get_freq: 13200d1857a1SNathan Zimmer write_lock_irqsave(&cpufreq_driver_lock, flags); 1321474deff7SViresh Kumar for_each_cpu(j, policy->cpus) 13227a6aedfaSMike Travis per_cpu(cpufreq_cpu_data, j) = NULL; 13230d1857a1SNathan Zimmer write_unlock_irqrestore(&cpufreq_driver_lock, flags); 13241da177e4SLinus Torvalds 13256d4e81edSTomeu Vizoso if (!recover_policy) { 13266d4e81edSTomeu Vizoso kobject_put(&policy->kobj); 13276d4e81edSTomeu Vizoso wait_for_completion(&policy->kobj_unregister); 13286d4e81edSTomeu Vizoso } 13296d4e81edSTomeu Vizoso err_init_policy_kobj: 13307106e02bSPrarit Bhargava up_write(&policy->rwsem); 13317106e02bSPrarit Bhargava 1332da60ce9fSViresh Kumar if (cpufreq_driver->exit) 1333da60ce9fSViresh Kumar cpufreq_driver->exit(policy); 13342eaa3e2dSViresh Kumar err_set_policy_cpu: 133596bbbe4aSViresh Kumar if (recover_policy) { 133672368d12SRafael J. Wysocki /* Do not leave stale fallback data behind. */ 133772368d12SRafael J. Wysocki per_cpu(cpufreq_cpu_data_fallback, cpu) = NULL; 133842f921a6SViresh Kumar cpufreq_policy_put_kobj(policy); 133972368d12SRafael J. Wysocki } 1340e9698cc5SSrivatsa S. Bhat cpufreq_policy_free(policy); 134142f921a6SViresh Kumar 13421da177e4SLinus Torvalds nomem_out: 13436eed9404SViresh Kumar up_read(&cpufreq_rwsem); 13446eed9404SViresh Kumar 13451da177e4SLinus Torvalds return ret; 13461da177e4SLinus Torvalds } 13471da177e4SLinus Torvalds 1348cedb70afSSrivatsa S. Bhat static int __cpufreq_remove_dev_prepare(struct device *dev, 134996bbbe4aSViresh Kumar struct subsys_interface *sif) 13501da177e4SLinus Torvalds { 1351f9ba680dSSrivatsa S. Bhat unsigned int cpu = dev->id, cpus; 13521bfb425bSViresh Kumar int ret; 13531da177e4SLinus Torvalds unsigned long flags; 13543a3e9e06SViresh Kumar struct cpufreq_policy *policy; 13551da177e4SLinus Torvalds 1356b8eed8afSViresh Kumar pr_debug("%s: unregistering CPU %u\n", __func__, cpu); 13571da177e4SLinus Torvalds 13580d1857a1SNathan Zimmer write_lock_irqsave(&cpufreq_driver_lock, flags); 13591da177e4SLinus Torvalds 13603a3e9e06SViresh Kumar policy = per_cpu(cpufreq_cpu_data, cpu); 13611da177e4SLinus Torvalds 13628414809cSSrivatsa S. Bhat /* Save the policy somewhere when doing a light-weight tear-down */ 136396bbbe4aSViresh Kumar if (cpufreq_suspended) 13643a3e9e06SViresh Kumar per_cpu(cpufreq_cpu_data_fallback, cpu) = policy; 13658414809cSSrivatsa S. Bhat 13660d1857a1SNathan Zimmer write_unlock_irqrestore(&cpufreq_driver_lock, flags); 13671da177e4SLinus Torvalds 13683a3e9e06SViresh Kumar if (!policy) { 1369b8eed8afSViresh Kumar pr_debug("%s: No cpu_data found\n", __func__); 13701da177e4SLinus Torvalds return -EINVAL; 13711da177e4SLinus Torvalds } 13721da177e4SLinus Torvalds 13739c0ebcf7SViresh Kumar if (has_target()) { 13743de9bdebSViresh Kumar ret = __cpufreq_governor(policy, CPUFREQ_GOV_STOP); 13753de9bdebSViresh Kumar if (ret) { 13763de9bdebSViresh Kumar pr_err("%s: Failed to stop governor\n", __func__); 13773de9bdebSViresh Kumar return ret; 13783de9bdebSViresh Kumar } 13795a01f2e8SVenkatesh Pallipadi 1380fa69e33fSDirk Brandewie strncpy(per_cpu(cpufreq_cpu_governor, cpu), 13813a3e9e06SViresh Kumar policy->governor->name, CPUFREQ_NAME_LEN); 1382db5f2995SViresh Kumar } 13831da177e4SLinus Torvalds 1384ad7722daSviresh kumar down_read(&policy->rwsem); 13853a3e9e06SViresh Kumar cpus = cpumask_weight(policy->cpus); 1386ad7722daSviresh kumar up_read(&policy->rwsem); 13871da177e4SLinus Torvalds 138861173f25SSrivatsa S. Bhat if (cpu != policy->cpu) { 138973bf0fc2SViresh Kumar sysfs_remove_link(&dev->kobj, "cpufreq"); 139073bf0fc2SViresh Kumar } else if (cpus > 1) { 13911bfb425bSViresh Kumar /* Nominate new CPU */ 13921bfb425bSViresh Kumar int new_cpu = cpumask_any_but(policy->cpus, cpu); 13931bfb425bSViresh Kumar struct device *cpu_dev = get_cpu_device(new_cpu); 13941bfb425bSViresh Kumar 13951bfb425bSViresh Kumar sysfs_remove_link(&cpu_dev->kobj, "cpufreq"); 13961bfb425bSViresh Kumar ret = update_policy_cpu(policy, new_cpu, cpu_dev); 13971bfb425bSViresh Kumar if (ret) { 13981bfb425bSViresh Kumar if (sysfs_create_link(&cpu_dev->kobj, &policy->kobj, 13991bfb425bSViresh Kumar "cpufreq")) 14001bfb425bSViresh Kumar pr_err("%s: Failed to restore kobj link to cpu:%d\n", 14011bfb425bSViresh Kumar __func__, cpu_dev->id); 14021bfb425bSViresh Kumar return ret; 14031bfb425bSViresh Kumar } 1404a82fab29SSrivatsa S. Bhat 1405bda9f552SStratos Karafotis if (!cpufreq_suspended) 140675949c9aSViresh Kumar pr_debug("%s: policy Kobject moved to cpu: %d from: %d\n", 140775949c9aSViresh Kumar __func__, new_cpu, cpu); 1408789ca243SPreeti U Murthy } else if (cpufreq_driver->stop_cpu) { 1409367dc4aaSDirk Brandewie cpufreq_driver->stop_cpu(policy); 14101da177e4SLinus Torvalds } 1411b8eed8afSViresh Kumar 1412cedb70afSSrivatsa S. Bhat return 0; 1413cedb70afSSrivatsa S. Bhat } 1414cedb70afSSrivatsa S. Bhat 1415cedb70afSSrivatsa S. Bhat static int __cpufreq_remove_dev_finish(struct device *dev, 141696bbbe4aSViresh Kumar struct subsys_interface *sif) 1417cedb70afSSrivatsa S. Bhat { 1418cedb70afSSrivatsa S. Bhat unsigned int cpu = dev->id, cpus; 1419cedb70afSSrivatsa S. Bhat int ret; 1420cedb70afSSrivatsa S. Bhat unsigned long flags; 1421cedb70afSSrivatsa S. Bhat struct cpufreq_policy *policy; 1422cedb70afSSrivatsa S. Bhat 14236ffae8c0SViresh Kumar write_lock_irqsave(&cpufreq_driver_lock, flags); 1424cedb70afSSrivatsa S. Bhat policy = per_cpu(cpufreq_cpu_data, cpu); 14256ffae8c0SViresh Kumar per_cpu(cpufreq_cpu_data, cpu) = NULL; 14266ffae8c0SViresh Kumar write_unlock_irqrestore(&cpufreq_driver_lock, flags); 1427cedb70afSSrivatsa S. Bhat 1428cedb70afSSrivatsa S. Bhat if (!policy) { 1429cedb70afSSrivatsa S. Bhat pr_debug("%s: No cpu_data found\n", __func__); 1430cedb70afSSrivatsa S. Bhat return -EINVAL; 1431cedb70afSSrivatsa S. Bhat } 1432cedb70afSSrivatsa S. Bhat 1433ad7722daSviresh kumar down_write(&policy->rwsem); 1434cedb70afSSrivatsa S. Bhat cpus = cpumask_weight(policy->cpus); 14359c8f1ee4SViresh Kumar 14369c8f1ee4SViresh Kumar if (cpus > 1) 14379c8f1ee4SViresh Kumar cpumask_clear_cpu(cpu, policy->cpus); 1438ad7722daSviresh kumar up_write(&policy->rwsem); 1439cedb70afSSrivatsa S. Bhat 1440b8eed8afSViresh Kumar /* If cpu is last user of policy, free policy */ 1441b8eed8afSViresh Kumar if (cpus == 1) { 14429c0ebcf7SViresh Kumar if (has_target()) { 14433de9bdebSViresh Kumar ret = __cpufreq_governor(policy, 14443de9bdebSViresh Kumar CPUFREQ_GOV_POLICY_EXIT); 14453de9bdebSViresh Kumar if (ret) { 14463de9bdebSViresh Kumar pr_err("%s: Failed to exit governor\n", 14473de9bdebSViresh Kumar __func__); 14483de9bdebSViresh Kumar return ret; 14493de9bdebSViresh Kumar } 14503de9bdebSViresh Kumar } 14512a998599SRafael J. Wysocki 145296bbbe4aSViresh Kumar if (!cpufreq_suspended) 145342f921a6SViresh Kumar cpufreq_policy_put_kobj(policy); 14541da177e4SLinus Torvalds 14558414809cSSrivatsa S. Bhat /* 14568414809cSSrivatsa S. Bhat * Perform the ->exit() even during light-weight tear-down, 14578414809cSSrivatsa S. Bhat * since this is a core component, and is essential for the 14588414809cSSrivatsa S. Bhat * subsequent light-weight ->init() to succeed. 14598414809cSSrivatsa S. Bhat */ 14601c3d85ddSRafael J. Wysocki if (cpufreq_driver->exit) 14613a3e9e06SViresh Kumar cpufreq_driver->exit(policy); 146227ecddc2SJacob Shin 14639515f4d6SViresh Kumar /* Remove policy from list of active policies */ 14649515f4d6SViresh Kumar write_lock_irqsave(&cpufreq_driver_lock, flags); 14659515f4d6SViresh Kumar list_del(&policy->policy_list); 14669515f4d6SViresh Kumar write_unlock_irqrestore(&cpufreq_driver_lock, flags); 14679515f4d6SViresh Kumar 146896bbbe4aSViresh Kumar if (!cpufreq_suspended) 14693a3e9e06SViresh Kumar cpufreq_policy_free(policy); 1470e5c87b76SStratos Karafotis } else if (has_target()) { 1471e5c87b76SStratos Karafotis ret = __cpufreq_governor(policy, CPUFREQ_GOV_START); 1472e5c87b76SStratos Karafotis if (!ret) 1473e5c87b76SStratos Karafotis ret = __cpufreq_governor(policy, CPUFREQ_GOV_LIMITS); 1474e5c87b76SStratos Karafotis 1475e5c87b76SStratos Karafotis if (ret) { 1476e5c87b76SStratos Karafotis pr_err("%s: Failed to start governor\n", __func__); 14773de9bdebSViresh Kumar return ret; 14783de9bdebSViresh Kumar } 1479b8eed8afSViresh Kumar } 14801da177e4SLinus Torvalds 14811da177e4SLinus Torvalds return 0; 14821da177e4SLinus Torvalds } 14831da177e4SLinus Torvalds 1484cedb70afSSrivatsa S. Bhat /** 148527a862e9SViresh Kumar * cpufreq_remove_dev - remove a CPU device 1486cedb70afSSrivatsa S. Bhat * 1487cedb70afSSrivatsa S. Bhat * Removes the cpufreq interface for a CPU device. 1488cedb70afSSrivatsa S. Bhat */ 14898a25a2fdSKay Sievers static int cpufreq_remove_dev(struct device *dev, struct subsys_interface *sif) 14905a01f2e8SVenkatesh Pallipadi { 14918a25a2fdSKay Sievers unsigned int cpu = dev->id; 149227a862e9SViresh Kumar int ret; 1493ec28297aSVenki Pallipadi 1494ec28297aSVenki Pallipadi if (cpu_is_offline(cpu)) 1495ec28297aSVenki Pallipadi return 0; 1496ec28297aSVenki Pallipadi 149796bbbe4aSViresh Kumar ret = __cpufreq_remove_dev_prepare(dev, sif); 149827a862e9SViresh Kumar 149927a862e9SViresh Kumar if (!ret) 150096bbbe4aSViresh Kumar ret = __cpufreq_remove_dev_finish(dev, sif); 150127a862e9SViresh Kumar 150227a862e9SViresh Kumar return ret; 15035a01f2e8SVenkatesh Pallipadi } 15045a01f2e8SVenkatesh Pallipadi 150565f27f38SDavid Howells static void handle_update(struct work_struct *work) 15061da177e4SLinus Torvalds { 150765f27f38SDavid Howells struct cpufreq_policy *policy = 150865f27f38SDavid Howells container_of(work, struct cpufreq_policy, update); 150965f27f38SDavid Howells unsigned int cpu = policy->cpu; 15102d06d8c4SDominik Brodowski pr_debug("handle_update for cpu %u called\n", cpu); 15111da177e4SLinus Torvalds cpufreq_update_policy(cpu); 15121da177e4SLinus Torvalds } 15131da177e4SLinus Torvalds 15141da177e4SLinus Torvalds /** 1515bb176f7dSViresh Kumar * cpufreq_out_of_sync - If actual and saved CPU frequency differs, we're 1516bb176f7dSViresh Kumar * in deep trouble. 1517a1e1dc41SViresh Kumar * @policy: policy managing CPUs 15181da177e4SLinus Torvalds * @new_freq: CPU frequency the CPU actually runs at 15191da177e4SLinus Torvalds * 152029464f28SDave Jones * We adjust to current frequency first, and need to clean up later. 152129464f28SDave Jones * So either call to cpufreq_update_policy() or schedule handle_update()). 15221da177e4SLinus Torvalds */ 1523a1e1dc41SViresh Kumar static void cpufreq_out_of_sync(struct cpufreq_policy *policy, 1524e08f5f5bSGautham R Shenoy unsigned int new_freq) 15251da177e4SLinus Torvalds { 15261da177e4SLinus Torvalds struct cpufreq_freqs freqs; 1527b43a7ffbSViresh Kumar 1528e837f9b5SJoe Perches pr_debug("Warning: CPU frequency out of sync: cpufreq and timing core thinks of %u, is %u kHz\n", 1529a1e1dc41SViresh Kumar policy->cur, new_freq); 15301da177e4SLinus Torvalds 1531a1e1dc41SViresh Kumar freqs.old = policy->cur; 15321da177e4SLinus Torvalds freqs.new = new_freq; 1533b43a7ffbSViresh Kumar 15348fec051eSViresh Kumar cpufreq_freq_transition_begin(policy, &freqs); 15358fec051eSViresh Kumar cpufreq_freq_transition_end(policy, &freqs, 0); 15361da177e4SLinus Torvalds } 15371da177e4SLinus Torvalds 15381da177e4SLinus Torvalds /** 15394ab70df4SDhaval Giani * cpufreq_quick_get - get the CPU frequency (in kHz) from policy->cur 154095235ca2SVenkatesh Pallipadi * @cpu: CPU number 154195235ca2SVenkatesh Pallipadi * 154295235ca2SVenkatesh Pallipadi * This is the last known freq, without actually getting it from the driver. 154395235ca2SVenkatesh Pallipadi * Return value will be same as what is shown in scaling_cur_freq in sysfs. 154495235ca2SVenkatesh Pallipadi */ 154595235ca2SVenkatesh Pallipadi unsigned int cpufreq_quick_get(unsigned int cpu) 154695235ca2SVenkatesh Pallipadi { 15479e21ba8bSDirk Brandewie struct cpufreq_policy *policy; 1548e08f5f5bSGautham R Shenoy unsigned int ret_freq = 0; 154995235ca2SVenkatesh Pallipadi 15501c3d85ddSRafael J. Wysocki if (cpufreq_driver && cpufreq_driver->setpolicy && cpufreq_driver->get) 15511c3d85ddSRafael J. Wysocki return cpufreq_driver->get(cpu); 15529e21ba8bSDirk Brandewie 15539e21ba8bSDirk Brandewie policy = cpufreq_cpu_get(cpu); 155495235ca2SVenkatesh Pallipadi if (policy) { 1555e08f5f5bSGautham R Shenoy ret_freq = policy->cur; 155695235ca2SVenkatesh Pallipadi cpufreq_cpu_put(policy); 155795235ca2SVenkatesh Pallipadi } 155895235ca2SVenkatesh Pallipadi 15594d34a67dSDave Jones return ret_freq; 156095235ca2SVenkatesh Pallipadi } 156195235ca2SVenkatesh Pallipadi EXPORT_SYMBOL(cpufreq_quick_get); 156295235ca2SVenkatesh Pallipadi 15633d737108SJesse Barnes /** 15643d737108SJesse Barnes * cpufreq_quick_get_max - get the max reported CPU frequency for this CPU 15653d737108SJesse Barnes * @cpu: CPU number 15663d737108SJesse Barnes * 15673d737108SJesse Barnes * Just return the max possible frequency for a given CPU. 15683d737108SJesse Barnes */ 15693d737108SJesse Barnes unsigned int cpufreq_quick_get_max(unsigned int cpu) 15703d737108SJesse Barnes { 15713d737108SJesse Barnes struct cpufreq_policy *policy = cpufreq_cpu_get(cpu); 15723d737108SJesse Barnes unsigned int ret_freq = 0; 15733d737108SJesse Barnes 15743d737108SJesse Barnes if (policy) { 15753d737108SJesse Barnes ret_freq = policy->max; 15763d737108SJesse Barnes cpufreq_cpu_put(policy); 15773d737108SJesse Barnes } 15783d737108SJesse Barnes 15793d737108SJesse Barnes return ret_freq; 15803d737108SJesse Barnes } 15813d737108SJesse Barnes EXPORT_SYMBOL(cpufreq_quick_get_max); 15823d737108SJesse Barnes 1583d92d50a4SViresh Kumar static unsigned int __cpufreq_get(struct cpufreq_policy *policy) 15841da177e4SLinus Torvalds { 1585e08f5f5bSGautham R Shenoy unsigned int ret_freq = 0; 15861da177e4SLinus Torvalds 15871c3d85ddSRafael J. Wysocki if (!cpufreq_driver->get) 15884d34a67dSDave Jones return ret_freq; 15891da177e4SLinus Torvalds 1590d92d50a4SViresh Kumar ret_freq = cpufreq_driver->get(policy->cpu); 15911da177e4SLinus Torvalds 1592e08f5f5bSGautham R Shenoy if (ret_freq && policy->cur && 15931c3d85ddSRafael J. Wysocki !(cpufreq_driver->flags & CPUFREQ_CONST_LOOPS)) { 1594e08f5f5bSGautham R Shenoy /* verify no discrepancy between actual and 1595e08f5f5bSGautham R Shenoy saved value exists */ 1596e08f5f5bSGautham R Shenoy if (unlikely(ret_freq != policy->cur)) { 1597a1e1dc41SViresh Kumar cpufreq_out_of_sync(policy, ret_freq); 15981da177e4SLinus Torvalds schedule_work(&policy->update); 15991da177e4SLinus Torvalds } 16001da177e4SLinus Torvalds } 16011da177e4SLinus Torvalds 16024d34a67dSDave Jones return ret_freq; 16035a01f2e8SVenkatesh Pallipadi } 16041da177e4SLinus Torvalds 16055a01f2e8SVenkatesh Pallipadi /** 16065a01f2e8SVenkatesh Pallipadi * cpufreq_get - get the current CPU frequency (in kHz) 16075a01f2e8SVenkatesh Pallipadi * @cpu: CPU number 16085a01f2e8SVenkatesh Pallipadi * 16095a01f2e8SVenkatesh Pallipadi * Get the CPU current (static) CPU frequency 16105a01f2e8SVenkatesh Pallipadi */ 16115a01f2e8SVenkatesh Pallipadi unsigned int cpufreq_get(unsigned int cpu) 16125a01f2e8SVenkatesh Pallipadi { 1613999976e0SAaron Plattner struct cpufreq_policy *policy = cpufreq_cpu_get(cpu); 16145a01f2e8SVenkatesh Pallipadi unsigned int ret_freq = 0; 16155a01f2e8SVenkatesh Pallipadi 1616999976e0SAaron Plattner if (policy) { 1617ad7722daSviresh kumar down_read(&policy->rwsem); 1618d92d50a4SViresh Kumar ret_freq = __cpufreq_get(policy); 1619ad7722daSviresh kumar up_read(&policy->rwsem); 1620999976e0SAaron Plattner 1621999976e0SAaron Plattner cpufreq_cpu_put(policy); 1622999976e0SAaron Plattner } 16236eed9404SViresh Kumar 16244d34a67dSDave Jones return ret_freq; 16251da177e4SLinus Torvalds } 16261da177e4SLinus Torvalds EXPORT_SYMBOL(cpufreq_get); 16271da177e4SLinus Torvalds 16288a25a2fdSKay Sievers static struct subsys_interface cpufreq_interface = { 16298a25a2fdSKay Sievers .name = "cpufreq", 16308a25a2fdSKay Sievers .subsys = &cpu_subsys, 16318a25a2fdSKay Sievers .add_dev = cpufreq_add_dev, 16328a25a2fdSKay Sievers .remove_dev = cpufreq_remove_dev, 1633e00e56dfSRafael J. Wysocki }; 1634e00e56dfSRafael J. Wysocki 1635e28867eaSViresh Kumar /* 1636e28867eaSViresh Kumar * In case platform wants some specific frequency to be configured 1637e28867eaSViresh Kumar * during suspend.. 163842d4dc3fSBenjamin Herrenschmidt */ 1639e28867eaSViresh Kumar int cpufreq_generic_suspend(struct cpufreq_policy *policy) 164042d4dc3fSBenjamin Herrenschmidt { 1641e28867eaSViresh Kumar int ret; 16424bc5d341SDave Jones 1643e28867eaSViresh Kumar if (!policy->suspend_freq) { 1644e28867eaSViresh Kumar pr_err("%s: suspend_freq can't be zero\n", __func__); 1645e28867eaSViresh Kumar return -EINVAL; 164642d4dc3fSBenjamin Herrenschmidt } 164742d4dc3fSBenjamin Herrenschmidt 1648e28867eaSViresh Kumar pr_debug("%s: Setting suspend-freq: %u\n", __func__, 1649e28867eaSViresh Kumar policy->suspend_freq); 1650e28867eaSViresh Kumar 1651e28867eaSViresh Kumar ret = __cpufreq_driver_target(policy, policy->suspend_freq, 1652e28867eaSViresh Kumar CPUFREQ_RELATION_H); 1653e28867eaSViresh Kumar if (ret) 1654e28867eaSViresh Kumar pr_err("%s: unable to set suspend-freq: %u. err: %d\n", 1655e28867eaSViresh Kumar __func__, policy->suspend_freq, ret); 1656e28867eaSViresh Kumar 1657c9060494SDave Jones return ret; 165842d4dc3fSBenjamin Herrenschmidt } 1659e28867eaSViresh Kumar EXPORT_SYMBOL(cpufreq_generic_suspend); 166042d4dc3fSBenjamin Herrenschmidt 166142d4dc3fSBenjamin Herrenschmidt /** 16622f0aea93SViresh Kumar * cpufreq_suspend() - Suspend CPUFreq governors 16631da177e4SLinus Torvalds * 16642f0aea93SViresh Kumar * Called during system wide Suspend/Hibernate cycles for suspending governors 16652f0aea93SViresh Kumar * as some platforms can't change frequency after this point in suspend cycle. 16662f0aea93SViresh Kumar * Because some of the devices (like: i2c, regulators, etc) they use for 16672f0aea93SViresh Kumar * changing frequency are suspended quickly after this point. 16681da177e4SLinus Torvalds */ 16692f0aea93SViresh Kumar void cpufreq_suspend(void) 16701da177e4SLinus Torvalds { 16713a3e9e06SViresh Kumar struct cpufreq_policy *policy; 16721da177e4SLinus Torvalds 16732f0aea93SViresh Kumar if (!cpufreq_driver) 1674e00e56dfSRafael J. Wysocki return; 16751da177e4SLinus Torvalds 16762f0aea93SViresh Kumar if (!has_target()) 1677b1b12babSViresh Kumar goto suspend; 16781da177e4SLinus Torvalds 16792f0aea93SViresh Kumar pr_debug("%s: Suspending Governors\n", __func__); 16802f0aea93SViresh Kumar 1681b4f0676fSViresh Kumar for_each_policy(policy) { 16822f0aea93SViresh Kumar if (__cpufreq_governor(policy, CPUFREQ_GOV_STOP)) 16832f0aea93SViresh Kumar pr_err("%s: Failed to stop governor for policy: %p\n", 16842f0aea93SViresh Kumar __func__, policy); 16852f0aea93SViresh Kumar else if (cpufreq_driver->suspend 16862f0aea93SViresh Kumar && cpufreq_driver->suspend(policy)) 16872f0aea93SViresh Kumar pr_err("%s: Failed to suspend driver: %p\n", __func__, 16882f0aea93SViresh Kumar policy); 16891da177e4SLinus Torvalds } 1690b1b12babSViresh Kumar 1691b1b12babSViresh Kumar suspend: 1692b1b12babSViresh Kumar cpufreq_suspended = true; 16931da177e4SLinus Torvalds } 16941da177e4SLinus Torvalds 16951da177e4SLinus Torvalds /** 16962f0aea93SViresh Kumar * cpufreq_resume() - Resume CPUFreq governors 16971da177e4SLinus Torvalds * 16982f0aea93SViresh Kumar * Called during system wide Suspend/Hibernate cycle for resuming governors that 16992f0aea93SViresh Kumar * are suspended with cpufreq_suspend(). 17001da177e4SLinus Torvalds */ 17012f0aea93SViresh Kumar void cpufreq_resume(void) 17021da177e4SLinus Torvalds { 17031da177e4SLinus Torvalds struct cpufreq_policy *policy; 17041da177e4SLinus Torvalds 17052f0aea93SViresh Kumar if (!cpufreq_driver) 17061da177e4SLinus Torvalds return; 17071da177e4SLinus Torvalds 17088e30444eSLan Tianyu cpufreq_suspended = false; 17098e30444eSLan Tianyu 17102f0aea93SViresh Kumar if (!has_target()) 17112f0aea93SViresh Kumar return; 17121da177e4SLinus Torvalds 17132f0aea93SViresh Kumar pr_debug("%s: Resuming Governors\n", __func__); 17142f0aea93SViresh Kumar 1715b4f0676fSViresh Kumar for_each_policy(policy) { 17160c5aa405SViresh Kumar if (cpufreq_driver->resume && cpufreq_driver->resume(policy)) 17170c5aa405SViresh Kumar pr_err("%s: Failed to resume driver: %p\n", __func__, 17180c5aa405SViresh Kumar policy); 17190c5aa405SViresh Kumar else if (__cpufreq_governor(policy, CPUFREQ_GOV_START) 17202f0aea93SViresh Kumar || __cpufreq_governor(policy, CPUFREQ_GOV_LIMITS)) 17212f0aea93SViresh Kumar pr_err("%s: Failed to start governor for policy: %p\n", 17222f0aea93SViresh Kumar __func__, policy); 1723c75de0acSViresh Kumar } 17242f0aea93SViresh Kumar 17252f0aea93SViresh Kumar /* 1726c75de0acSViresh Kumar * schedule call cpufreq_update_policy() for first-online CPU, as that 1727c75de0acSViresh Kumar * wouldn't be hotplugged-out on suspend. It will verify that the 1728c75de0acSViresh Kumar * current freq is in sync with what we believe it to be. 17292f0aea93SViresh Kumar */ 1730c75de0acSViresh Kumar policy = cpufreq_cpu_get_raw(cpumask_first(cpu_online_mask)); 1731c75de0acSViresh Kumar if (WARN_ON(!policy)) 1732c75de0acSViresh Kumar return; 1733c75de0acSViresh Kumar 17343a3e9e06SViresh Kumar schedule_work(&policy->update); 17351da177e4SLinus Torvalds } 17361da177e4SLinus Torvalds 17379d95046eSBorislav Petkov /** 17389d95046eSBorislav Petkov * cpufreq_get_current_driver - return current driver's name 17399d95046eSBorislav Petkov * 17409d95046eSBorislav Petkov * Return the name string of the currently loaded cpufreq driver 17419d95046eSBorislav Petkov * or NULL, if none. 17429d95046eSBorislav Petkov */ 17439d95046eSBorislav Petkov const char *cpufreq_get_current_driver(void) 17449d95046eSBorislav Petkov { 17451c3d85ddSRafael J. Wysocki if (cpufreq_driver) 17461c3d85ddSRafael J. Wysocki return cpufreq_driver->name; 17471c3d85ddSRafael J. Wysocki 17481c3d85ddSRafael J. Wysocki return NULL; 17499d95046eSBorislav Petkov } 17509d95046eSBorislav Petkov EXPORT_SYMBOL_GPL(cpufreq_get_current_driver); 17511da177e4SLinus Torvalds 175251315cdfSThomas Petazzoni /** 175351315cdfSThomas Petazzoni * cpufreq_get_driver_data - return current driver data 175451315cdfSThomas Petazzoni * 175551315cdfSThomas Petazzoni * Return the private data of the currently loaded cpufreq 175651315cdfSThomas Petazzoni * driver, or NULL if no cpufreq driver is loaded. 175751315cdfSThomas Petazzoni */ 175851315cdfSThomas Petazzoni void *cpufreq_get_driver_data(void) 175951315cdfSThomas Petazzoni { 176051315cdfSThomas Petazzoni if (cpufreq_driver) 176151315cdfSThomas Petazzoni return cpufreq_driver->driver_data; 176251315cdfSThomas Petazzoni 176351315cdfSThomas Petazzoni return NULL; 176451315cdfSThomas Petazzoni } 176551315cdfSThomas Petazzoni EXPORT_SYMBOL_GPL(cpufreq_get_driver_data); 176651315cdfSThomas Petazzoni 17671da177e4SLinus Torvalds /********************************************************************* 17681da177e4SLinus Torvalds * NOTIFIER LISTS INTERFACE * 17691da177e4SLinus Torvalds *********************************************************************/ 17701da177e4SLinus Torvalds 17711da177e4SLinus Torvalds /** 17721da177e4SLinus Torvalds * cpufreq_register_notifier - register a driver with cpufreq 17731da177e4SLinus Torvalds * @nb: notifier function to register 17741da177e4SLinus Torvalds * @list: CPUFREQ_TRANSITION_NOTIFIER or CPUFREQ_POLICY_NOTIFIER 17751da177e4SLinus Torvalds * 17761da177e4SLinus Torvalds * Add a driver to one of two lists: either a list of drivers that 17771da177e4SLinus Torvalds * are notified about clock rate changes (once before and once after 17781da177e4SLinus Torvalds * the transition), or a list of drivers that are notified about 17791da177e4SLinus Torvalds * changes in cpufreq policy. 17801da177e4SLinus Torvalds * 17811da177e4SLinus Torvalds * This function may sleep, and has the same return conditions as 1782e041c683SAlan Stern * blocking_notifier_chain_register. 17831da177e4SLinus Torvalds */ 17841da177e4SLinus Torvalds int cpufreq_register_notifier(struct notifier_block *nb, unsigned int list) 17851da177e4SLinus Torvalds { 17861da177e4SLinus Torvalds int ret; 17871da177e4SLinus Torvalds 1788d5aaffa9SDirk Brandewie if (cpufreq_disabled()) 1789d5aaffa9SDirk Brandewie return -EINVAL; 1790d5aaffa9SDirk Brandewie 179174212ca4SCesar Eduardo Barros WARN_ON(!init_cpufreq_transition_notifier_list_called); 179274212ca4SCesar Eduardo Barros 17931da177e4SLinus Torvalds switch (list) { 17941da177e4SLinus Torvalds case CPUFREQ_TRANSITION_NOTIFIER: 1795b4dfdbb3SAlan Stern ret = srcu_notifier_chain_register( 1796e041c683SAlan Stern &cpufreq_transition_notifier_list, nb); 17971da177e4SLinus Torvalds break; 17981da177e4SLinus Torvalds case CPUFREQ_POLICY_NOTIFIER: 1799e041c683SAlan Stern ret = blocking_notifier_chain_register( 1800e041c683SAlan Stern &cpufreq_policy_notifier_list, nb); 18011da177e4SLinus Torvalds break; 18021da177e4SLinus Torvalds default: 18031da177e4SLinus Torvalds ret = -EINVAL; 18041da177e4SLinus Torvalds } 18051da177e4SLinus Torvalds 18061da177e4SLinus Torvalds return ret; 18071da177e4SLinus Torvalds } 18081da177e4SLinus Torvalds EXPORT_SYMBOL(cpufreq_register_notifier); 18091da177e4SLinus Torvalds 18101da177e4SLinus Torvalds /** 18111da177e4SLinus Torvalds * cpufreq_unregister_notifier - unregister a driver with cpufreq 18121da177e4SLinus Torvalds * @nb: notifier block to be unregistered 18131da177e4SLinus Torvalds * @list: CPUFREQ_TRANSITION_NOTIFIER or CPUFREQ_POLICY_NOTIFIER 18141da177e4SLinus Torvalds * 18151da177e4SLinus Torvalds * Remove a driver from the CPU frequency notifier list. 18161da177e4SLinus Torvalds * 18171da177e4SLinus Torvalds * This function may sleep, and has the same return conditions as 1818e041c683SAlan Stern * blocking_notifier_chain_unregister. 18191da177e4SLinus Torvalds */ 18201da177e4SLinus Torvalds int cpufreq_unregister_notifier(struct notifier_block *nb, unsigned int list) 18211da177e4SLinus Torvalds { 18221da177e4SLinus Torvalds int ret; 18231da177e4SLinus Torvalds 1824d5aaffa9SDirk Brandewie if (cpufreq_disabled()) 1825d5aaffa9SDirk Brandewie return -EINVAL; 1826d5aaffa9SDirk Brandewie 18271da177e4SLinus Torvalds switch (list) { 18281da177e4SLinus Torvalds case CPUFREQ_TRANSITION_NOTIFIER: 1829b4dfdbb3SAlan Stern ret = srcu_notifier_chain_unregister( 1830e041c683SAlan Stern &cpufreq_transition_notifier_list, nb); 18311da177e4SLinus Torvalds break; 18321da177e4SLinus Torvalds case CPUFREQ_POLICY_NOTIFIER: 1833e041c683SAlan Stern ret = blocking_notifier_chain_unregister( 1834e041c683SAlan Stern &cpufreq_policy_notifier_list, nb); 18351da177e4SLinus Torvalds break; 18361da177e4SLinus Torvalds default: 18371da177e4SLinus Torvalds ret = -EINVAL; 18381da177e4SLinus Torvalds } 18391da177e4SLinus Torvalds 18401da177e4SLinus Torvalds return ret; 18411da177e4SLinus Torvalds } 18421da177e4SLinus Torvalds EXPORT_SYMBOL(cpufreq_unregister_notifier); 18431da177e4SLinus Torvalds 18441da177e4SLinus Torvalds 18451da177e4SLinus Torvalds /********************************************************************* 18461da177e4SLinus Torvalds * GOVERNORS * 18471da177e4SLinus Torvalds *********************************************************************/ 18481da177e4SLinus Torvalds 18491c03a2d0SViresh Kumar /* Must set freqs->new to intermediate frequency */ 18501c03a2d0SViresh Kumar static int __target_intermediate(struct cpufreq_policy *policy, 18511c03a2d0SViresh Kumar struct cpufreq_freqs *freqs, int index) 18521c03a2d0SViresh Kumar { 18531c03a2d0SViresh Kumar int ret; 18541c03a2d0SViresh Kumar 18551c03a2d0SViresh Kumar freqs->new = cpufreq_driver->get_intermediate(policy, index); 18561c03a2d0SViresh Kumar 18571c03a2d0SViresh Kumar /* We don't need to switch to intermediate freq */ 18581c03a2d0SViresh Kumar if (!freqs->new) 18591c03a2d0SViresh Kumar return 0; 18601c03a2d0SViresh Kumar 18611c03a2d0SViresh Kumar pr_debug("%s: cpu: %d, switching to intermediate freq: oldfreq: %u, intermediate freq: %u\n", 18621c03a2d0SViresh Kumar __func__, policy->cpu, freqs->old, freqs->new); 18631c03a2d0SViresh Kumar 18641c03a2d0SViresh Kumar cpufreq_freq_transition_begin(policy, freqs); 18651c03a2d0SViresh Kumar ret = cpufreq_driver->target_intermediate(policy, index); 18661c03a2d0SViresh Kumar cpufreq_freq_transition_end(policy, freqs, ret); 18671c03a2d0SViresh Kumar 18681c03a2d0SViresh Kumar if (ret) 18691c03a2d0SViresh Kumar pr_err("%s: Failed to change to intermediate frequency: %d\n", 18701c03a2d0SViresh Kumar __func__, ret); 18711c03a2d0SViresh Kumar 18721c03a2d0SViresh Kumar return ret; 18731c03a2d0SViresh Kumar } 18741c03a2d0SViresh Kumar 18758d65775dSViresh Kumar static int __target_index(struct cpufreq_policy *policy, 18768d65775dSViresh Kumar struct cpufreq_frequency_table *freq_table, int index) 18778d65775dSViresh Kumar { 18781c03a2d0SViresh Kumar struct cpufreq_freqs freqs = {.old = policy->cur, .flags = 0}; 18791c03a2d0SViresh Kumar unsigned int intermediate_freq = 0; 18808d65775dSViresh Kumar int retval = -EINVAL; 18818d65775dSViresh Kumar bool notify; 18828d65775dSViresh Kumar 18838d65775dSViresh Kumar notify = !(cpufreq_driver->flags & CPUFREQ_ASYNC_NOTIFICATION); 18848d65775dSViresh Kumar if (notify) { 18851c03a2d0SViresh Kumar /* Handle switching to intermediate frequency */ 18861c03a2d0SViresh Kumar if (cpufreq_driver->get_intermediate) { 18871c03a2d0SViresh Kumar retval = __target_intermediate(policy, &freqs, index); 18881c03a2d0SViresh Kumar if (retval) 18891c03a2d0SViresh Kumar return retval; 18908d65775dSViresh Kumar 18911c03a2d0SViresh Kumar intermediate_freq = freqs.new; 18921c03a2d0SViresh Kumar /* Set old freq to intermediate */ 18931c03a2d0SViresh Kumar if (intermediate_freq) 18941c03a2d0SViresh Kumar freqs.old = freqs.new; 18951c03a2d0SViresh Kumar } 18961c03a2d0SViresh Kumar 18971c03a2d0SViresh Kumar freqs.new = freq_table[index].frequency; 18988d65775dSViresh Kumar pr_debug("%s: cpu: %d, oldfreq: %u, new freq: %u\n", 18998d65775dSViresh Kumar __func__, policy->cpu, freqs.old, freqs.new); 19008d65775dSViresh Kumar 19018d65775dSViresh Kumar cpufreq_freq_transition_begin(policy, &freqs); 19028d65775dSViresh Kumar } 19038d65775dSViresh Kumar 19048d65775dSViresh Kumar retval = cpufreq_driver->target_index(policy, index); 19058d65775dSViresh Kumar if (retval) 19068d65775dSViresh Kumar pr_err("%s: Failed to change cpu frequency: %d\n", __func__, 19078d65775dSViresh Kumar retval); 19088d65775dSViresh Kumar 19091c03a2d0SViresh Kumar if (notify) { 19108d65775dSViresh Kumar cpufreq_freq_transition_end(policy, &freqs, retval); 19118d65775dSViresh Kumar 19121c03a2d0SViresh Kumar /* 19131c03a2d0SViresh Kumar * Failed after setting to intermediate freq? Driver should have 19141c03a2d0SViresh Kumar * reverted back to initial frequency and so should we. Check 19151c03a2d0SViresh Kumar * here for intermediate_freq instead of get_intermediate, in 19161c03a2d0SViresh Kumar * case we have't switched to intermediate freq at all. 19171c03a2d0SViresh Kumar */ 19181c03a2d0SViresh Kumar if (unlikely(retval && intermediate_freq)) { 19191c03a2d0SViresh Kumar freqs.old = intermediate_freq; 19201c03a2d0SViresh Kumar freqs.new = policy->restore_freq; 19211c03a2d0SViresh Kumar cpufreq_freq_transition_begin(policy, &freqs); 19221c03a2d0SViresh Kumar cpufreq_freq_transition_end(policy, &freqs, 0); 19231c03a2d0SViresh Kumar } 19241c03a2d0SViresh Kumar } 19251c03a2d0SViresh Kumar 19268d65775dSViresh Kumar return retval; 19278d65775dSViresh Kumar } 19288d65775dSViresh Kumar 19291da177e4SLinus Torvalds int __cpufreq_driver_target(struct cpufreq_policy *policy, 19301da177e4SLinus Torvalds unsigned int target_freq, 19311da177e4SLinus Torvalds unsigned int relation) 19321da177e4SLinus Torvalds { 19337249924eSViresh Kumar unsigned int old_target_freq = target_freq; 19348d65775dSViresh Kumar int retval = -EINVAL; 1935c32b6b8eSAshok Raj 1936a7b422cdSKonrad Rzeszutek Wilk if (cpufreq_disabled()) 1937a7b422cdSKonrad Rzeszutek Wilk return -ENODEV; 1938a7b422cdSKonrad Rzeszutek Wilk 19397249924eSViresh Kumar /* Make sure that target_freq is within supported range */ 19407249924eSViresh Kumar if (target_freq > policy->max) 19417249924eSViresh Kumar target_freq = policy->max; 19427249924eSViresh Kumar if (target_freq < policy->min) 19437249924eSViresh Kumar target_freq = policy->min; 19447249924eSViresh Kumar 19457249924eSViresh Kumar pr_debug("target for CPU %u: %u kHz, relation %u, requested %u kHz\n", 19467249924eSViresh Kumar policy->cpu, target_freq, relation, old_target_freq); 19475a1c0228SViresh Kumar 19489c0ebcf7SViresh Kumar /* 19499c0ebcf7SViresh Kumar * This might look like a redundant call as we are checking it again 19509c0ebcf7SViresh Kumar * after finding index. But it is left intentionally for cases where 19519c0ebcf7SViresh Kumar * exactly same freq is called again and so we can save on few function 19529c0ebcf7SViresh Kumar * calls. 19539c0ebcf7SViresh Kumar */ 19545a1c0228SViresh Kumar if (target_freq == policy->cur) 19555a1c0228SViresh Kumar return 0; 19565a1c0228SViresh Kumar 19571c03a2d0SViresh Kumar /* Save last value to restore later on errors */ 19581c03a2d0SViresh Kumar policy->restore_freq = policy->cur; 19591c03a2d0SViresh Kumar 19601c3d85ddSRafael J. Wysocki if (cpufreq_driver->target) 19611c3d85ddSRafael J. Wysocki retval = cpufreq_driver->target(policy, target_freq, relation); 19629c0ebcf7SViresh Kumar else if (cpufreq_driver->target_index) { 19639c0ebcf7SViresh Kumar struct cpufreq_frequency_table *freq_table; 19649c0ebcf7SViresh Kumar int index; 196590d45d17SAshok Raj 19669c0ebcf7SViresh Kumar freq_table = cpufreq_frequency_get_table(policy->cpu); 19679c0ebcf7SViresh Kumar if (unlikely(!freq_table)) { 19689c0ebcf7SViresh Kumar pr_err("%s: Unable to find freq_table\n", __func__); 19699c0ebcf7SViresh Kumar goto out; 19709c0ebcf7SViresh Kumar } 19719c0ebcf7SViresh Kumar 19729c0ebcf7SViresh Kumar retval = cpufreq_frequency_table_target(policy, freq_table, 19739c0ebcf7SViresh Kumar target_freq, relation, &index); 19749c0ebcf7SViresh Kumar if (unlikely(retval)) { 19759c0ebcf7SViresh Kumar pr_err("%s: Unable to find matching freq\n", __func__); 19769c0ebcf7SViresh Kumar goto out; 19779c0ebcf7SViresh Kumar } 19789c0ebcf7SViresh Kumar 1979d4019f0aSViresh Kumar if (freq_table[index].frequency == policy->cur) { 19809c0ebcf7SViresh Kumar retval = 0; 1981d4019f0aSViresh Kumar goto out; 1982d4019f0aSViresh Kumar } 1983d4019f0aSViresh Kumar 19848d65775dSViresh Kumar retval = __target_index(policy, freq_table, index); 19859c0ebcf7SViresh Kumar } 19869c0ebcf7SViresh Kumar 19879c0ebcf7SViresh Kumar out: 19881da177e4SLinus Torvalds return retval; 19891da177e4SLinus Torvalds } 19901da177e4SLinus Torvalds EXPORT_SYMBOL_GPL(__cpufreq_driver_target); 19911da177e4SLinus Torvalds 19921da177e4SLinus Torvalds int cpufreq_driver_target(struct cpufreq_policy *policy, 19931da177e4SLinus Torvalds unsigned int target_freq, 19941da177e4SLinus Torvalds unsigned int relation) 19951da177e4SLinus Torvalds { 1996f1829e4aSJulia Lawall int ret = -EINVAL; 19971da177e4SLinus Torvalds 1998ad7722daSviresh kumar down_write(&policy->rwsem); 19991da177e4SLinus Torvalds 20001da177e4SLinus Torvalds ret = __cpufreq_driver_target(policy, target_freq, relation); 20011da177e4SLinus Torvalds 2002ad7722daSviresh kumar up_write(&policy->rwsem); 20031da177e4SLinus Torvalds 20041da177e4SLinus Torvalds return ret; 20051da177e4SLinus Torvalds } 20061da177e4SLinus Torvalds EXPORT_SYMBOL_GPL(cpufreq_driver_target); 20071da177e4SLinus Torvalds 2008e08f5f5bSGautham R Shenoy static int __cpufreq_governor(struct cpufreq_policy *policy, 2009e08f5f5bSGautham R Shenoy unsigned int event) 20101da177e4SLinus Torvalds { 2011cc993cabSDave Jones int ret; 20126afde10cSThomas Renninger 20136afde10cSThomas Renninger /* Only must be defined when default governor is known to have latency 20146afde10cSThomas Renninger restrictions, like e.g. conservative or ondemand. 20156afde10cSThomas Renninger That this is the case is already ensured in Kconfig 20166afde10cSThomas Renninger */ 20176afde10cSThomas Renninger #ifdef CONFIG_CPU_FREQ_GOV_PERFORMANCE 20186afde10cSThomas Renninger struct cpufreq_governor *gov = &cpufreq_gov_performance; 20196afde10cSThomas Renninger #else 20206afde10cSThomas Renninger struct cpufreq_governor *gov = NULL; 20216afde10cSThomas Renninger #endif 20221c256245SThomas Renninger 20232f0aea93SViresh Kumar /* Don't start any governor operations if we are entering suspend */ 20242f0aea93SViresh Kumar if (cpufreq_suspended) 20252f0aea93SViresh Kumar return 0; 2026cb57720bSEthan Zhao /* 2027cb57720bSEthan Zhao * Governor might not be initiated here if ACPI _PPC changed 2028cb57720bSEthan Zhao * notification happened, so check it. 2029cb57720bSEthan Zhao */ 2030cb57720bSEthan Zhao if (!policy->governor) 2031cb57720bSEthan Zhao return -EINVAL; 20322f0aea93SViresh Kumar 20331c256245SThomas Renninger if (policy->governor->max_transition_latency && 20341c256245SThomas Renninger policy->cpuinfo.transition_latency > 20351c256245SThomas Renninger policy->governor->max_transition_latency) { 20366afde10cSThomas Renninger if (!gov) 20376afde10cSThomas Renninger return -EINVAL; 20386afde10cSThomas Renninger else { 2039e837f9b5SJoe Perches pr_warn("%s governor failed, too long transition latency of HW, fallback to %s governor\n", 2040e837f9b5SJoe Perches policy->governor->name, gov->name); 20411c256245SThomas Renninger policy->governor = gov; 20421c256245SThomas Renninger } 20436afde10cSThomas Renninger } 20441da177e4SLinus Torvalds 2045fe492f3fSViresh Kumar if (event == CPUFREQ_GOV_POLICY_INIT) 20461da177e4SLinus Torvalds if (!try_module_get(policy->governor->owner)) 20471da177e4SLinus Torvalds return -EINVAL; 20481da177e4SLinus Torvalds 20492d06d8c4SDominik Brodowski pr_debug("__cpufreq_governor for CPU %u, event %u\n", 2050e08f5f5bSGautham R Shenoy policy->cpu, event); 205195731ebbSXiaoguang Chen 205295731ebbSXiaoguang Chen mutex_lock(&cpufreq_governor_lock); 205356d07db2SSrivatsa S. Bhat if ((policy->governor_enabled && event == CPUFREQ_GOV_START) 2054f73d3933SViresh Kumar || (!policy->governor_enabled 2055f73d3933SViresh Kumar && (event == CPUFREQ_GOV_LIMITS || event == CPUFREQ_GOV_STOP))) { 205695731ebbSXiaoguang Chen mutex_unlock(&cpufreq_governor_lock); 205795731ebbSXiaoguang Chen return -EBUSY; 205895731ebbSXiaoguang Chen } 205995731ebbSXiaoguang Chen 206095731ebbSXiaoguang Chen if (event == CPUFREQ_GOV_STOP) 206195731ebbSXiaoguang Chen policy->governor_enabled = false; 206295731ebbSXiaoguang Chen else if (event == CPUFREQ_GOV_START) 206395731ebbSXiaoguang Chen policy->governor_enabled = true; 206495731ebbSXiaoguang Chen 206595731ebbSXiaoguang Chen mutex_unlock(&cpufreq_governor_lock); 206695731ebbSXiaoguang Chen 20671da177e4SLinus Torvalds ret = policy->governor->governor(policy, event); 20681da177e4SLinus Torvalds 20694d5dcc42SViresh Kumar if (!ret) { 20704d5dcc42SViresh Kumar if (event == CPUFREQ_GOV_POLICY_INIT) 20718e53695fSViresh Kumar policy->governor->initialized++; 20724d5dcc42SViresh Kumar else if (event == CPUFREQ_GOV_POLICY_EXIT) 20738e53695fSViresh Kumar policy->governor->initialized--; 207495731ebbSXiaoguang Chen } else { 207595731ebbSXiaoguang Chen /* Restore original values */ 207695731ebbSXiaoguang Chen mutex_lock(&cpufreq_governor_lock); 207795731ebbSXiaoguang Chen if (event == CPUFREQ_GOV_STOP) 207895731ebbSXiaoguang Chen policy->governor_enabled = true; 207995731ebbSXiaoguang Chen else if (event == CPUFREQ_GOV_START) 208095731ebbSXiaoguang Chen policy->governor_enabled = false; 208195731ebbSXiaoguang Chen mutex_unlock(&cpufreq_governor_lock); 20824d5dcc42SViresh Kumar } 2083b394058fSViresh Kumar 2084fe492f3fSViresh Kumar if (((event == CPUFREQ_GOV_POLICY_INIT) && ret) || 2085fe492f3fSViresh Kumar ((event == CPUFREQ_GOV_POLICY_EXIT) && !ret)) 20861da177e4SLinus Torvalds module_put(policy->governor->owner); 20871da177e4SLinus Torvalds 20881da177e4SLinus Torvalds return ret; 20891da177e4SLinus Torvalds } 20901da177e4SLinus Torvalds 20911da177e4SLinus Torvalds int cpufreq_register_governor(struct cpufreq_governor *governor) 20921da177e4SLinus Torvalds { 20933bcb09a3SJeremy Fitzhardinge int err; 20941da177e4SLinus Torvalds 20951da177e4SLinus Torvalds if (!governor) 20961da177e4SLinus Torvalds return -EINVAL; 20971da177e4SLinus Torvalds 2098a7b422cdSKonrad Rzeszutek Wilk if (cpufreq_disabled()) 2099a7b422cdSKonrad Rzeszutek Wilk return -ENODEV; 2100a7b422cdSKonrad Rzeszutek Wilk 21013fc54d37Sakpm@osdl.org mutex_lock(&cpufreq_governor_mutex); 21021da177e4SLinus Torvalds 2103b394058fSViresh Kumar governor->initialized = 0; 21043bcb09a3SJeremy Fitzhardinge err = -EBUSY; 210542f91fa1SViresh Kumar if (!find_governor(governor->name)) { 21063bcb09a3SJeremy Fitzhardinge err = 0; 21071da177e4SLinus Torvalds list_add(&governor->governor_list, &cpufreq_governor_list); 21083bcb09a3SJeremy Fitzhardinge } 21091da177e4SLinus Torvalds 21103fc54d37Sakpm@osdl.org mutex_unlock(&cpufreq_governor_mutex); 21113bcb09a3SJeremy Fitzhardinge return err; 21121da177e4SLinus Torvalds } 21131da177e4SLinus Torvalds EXPORT_SYMBOL_GPL(cpufreq_register_governor); 21141da177e4SLinus Torvalds 21151da177e4SLinus Torvalds void cpufreq_unregister_governor(struct cpufreq_governor *governor) 21161da177e4SLinus Torvalds { 211790e41bacSPrarit Bhargava int cpu; 211890e41bacSPrarit Bhargava 21191da177e4SLinus Torvalds if (!governor) 21201da177e4SLinus Torvalds return; 21211da177e4SLinus Torvalds 2122a7b422cdSKonrad Rzeszutek Wilk if (cpufreq_disabled()) 2123a7b422cdSKonrad Rzeszutek Wilk return; 2124a7b422cdSKonrad Rzeszutek Wilk 212590e41bacSPrarit Bhargava for_each_present_cpu(cpu) { 212690e41bacSPrarit Bhargava if (cpu_online(cpu)) 212790e41bacSPrarit Bhargava continue; 212890e41bacSPrarit Bhargava if (!strcmp(per_cpu(cpufreq_cpu_governor, cpu), governor->name)) 212990e41bacSPrarit Bhargava strcpy(per_cpu(cpufreq_cpu_governor, cpu), "\0"); 213090e41bacSPrarit Bhargava } 213190e41bacSPrarit Bhargava 21323fc54d37Sakpm@osdl.org mutex_lock(&cpufreq_governor_mutex); 21331da177e4SLinus Torvalds list_del(&governor->governor_list); 21343fc54d37Sakpm@osdl.org mutex_unlock(&cpufreq_governor_mutex); 21351da177e4SLinus Torvalds return; 21361da177e4SLinus Torvalds } 21371da177e4SLinus Torvalds EXPORT_SYMBOL_GPL(cpufreq_unregister_governor); 21381da177e4SLinus Torvalds 21391da177e4SLinus Torvalds 21401da177e4SLinus Torvalds /********************************************************************* 21411da177e4SLinus Torvalds * POLICY INTERFACE * 21421da177e4SLinus Torvalds *********************************************************************/ 21431da177e4SLinus Torvalds 21441da177e4SLinus Torvalds /** 21451da177e4SLinus Torvalds * cpufreq_get_policy - get the current cpufreq_policy 214629464f28SDave Jones * @policy: struct cpufreq_policy into which the current cpufreq_policy 214729464f28SDave Jones * is written 21481da177e4SLinus Torvalds * 21491da177e4SLinus Torvalds * Reads the current cpufreq policy. 21501da177e4SLinus Torvalds */ 21511da177e4SLinus Torvalds int cpufreq_get_policy(struct cpufreq_policy *policy, unsigned int cpu) 21521da177e4SLinus Torvalds { 21531da177e4SLinus Torvalds struct cpufreq_policy *cpu_policy; 21541da177e4SLinus Torvalds if (!policy) 21551da177e4SLinus Torvalds return -EINVAL; 21561da177e4SLinus Torvalds 21571da177e4SLinus Torvalds cpu_policy = cpufreq_cpu_get(cpu); 21581da177e4SLinus Torvalds if (!cpu_policy) 21591da177e4SLinus Torvalds return -EINVAL; 21601da177e4SLinus Torvalds 2161d5b73cd8SViresh Kumar memcpy(policy, cpu_policy, sizeof(*policy)); 21621da177e4SLinus Torvalds 21631da177e4SLinus Torvalds cpufreq_cpu_put(cpu_policy); 21641da177e4SLinus Torvalds return 0; 21651da177e4SLinus Torvalds } 21661da177e4SLinus Torvalds EXPORT_SYMBOL(cpufreq_get_policy); 21671da177e4SLinus Torvalds 2168153d7f3fSArjan van de Ven /* 2169037ce839SViresh Kumar * policy : current policy. 2170037ce839SViresh Kumar * new_policy: policy to be set. 2171153d7f3fSArjan van de Ven */ 2172037ce839SViresh Kumar static int cpufreq_set_policy(struct cpufreq_policy *policy, 21733a3e9e06SViresh Kumar struct cpufreq_policy *new_policy) 21741da177e4SLinus Torvalds { 2175d9a789c7SRafael J. Wysocki struct cpufreq_governor *old_gov; 2176d9a789c7SRafael J. Wysocki int ret; 21771da177e4SLinus Torvalds 2178e837f9b5SJoe Perches pr_debug("setting new policy for CPU %u: %u - %u kHz\n", 2179e837f9b5SJoe Perches new_policy->cpu, new_policy->min, new_policy->max); 21801da177e4SLinus Torvalds 2181d5b73cd8SViresh Kumar memcpy(&new_policy->cpuinfo, &policy->cpuinfo, sizeof(policy->cpuinfo)); 21821da177e4SLinus Torvalds 2183d9a789c7SRafael J. Wysocki if (new_policy->min > policy->max || new_policy->max < policy->min) 2184d9a789c7SRafael J. Wysocki return -EINVAL; 21859c9a43edSMattia Dongili 21861da177e4SLinus Torvalds /* verify the cpu speed can be set within this limit */ 21873a3e9e06SViresh Kumar ret = cpufreq_driver->verify(new_policy); 21881da177e4SLinus Torvalds if (ret) 2189d9a789c7SRafael J. Wysocki return ret; 21901da177e4SLinus Torvalds 21911da177e4SLinus Torvalds /* adjust if necessary - all reasons */ 2192e041c683SAlan Stern blocking_notifier_call_chain(&cpufreq_policy_notifier_list, 21933a3e9e06SViresh Kumar CPUFREQ_ADJUST, new_policy); 21941da177e4SLinus Torvalds 21951da177e4SLinus Torvalds /* adjust if necessary - hardware incompatibility*/ 2196e041c683SAlan Stern blocking_notifier_call_chain(&cpufreq_policy_notifier_list, 21973a3e9e06SViresh Kumar CPUFREQ_INCOMPATIBLE, new_policy); 21981da177e4SLinus Torvalds 2199bb176f7dSViresh Kumar /* 2200bb176f7dSViresh Kumar * verify the cpu speed can be set within this limit, which might be 2201bb176f7dSViresh Kumar * different to the first one 2202bb176f7dSViresh Kumar */ 22033a3e9e06SViresh Kumar ret = cpufreq_driver->verify(new_policy); 2204e041c683SAlan Stern if (ret) 2205d9a789c7SRafael J. Wysocki return ret; 22061da177e4SLinus Torvalds 22071da177e4SLinus Torvalds /* notification of the new policy */ 2208e041c683SAlan Stern blocking_notifier_call_chain(&cpufreq_policy_notifier_list, 22093a3e9e06SViresh Kumar CPUFREQ_NOTIFY, new_policy); 22101da177e4SLinus Torvalds 22113a3e9e06SViresh Kumar policy->min = new_policy->min; 22123a3e9e06SViresh Kumar policy->max = new_policy->max; 22131da177e4SLinus Torvalds 22142d06d8c4SDominik Brodowski pr_debug("new min and max freqs are %u - %u kHz\n", 22153a3e9e06SViresh Kumar policy->min, policy->max); 22161da177e4SLinus Torvalds 22171c3d85ddSRafael J. Wysocki if (cpufreq_driver->setpolicy) { 22183a3e9e06SViresh Kumar policy->policy = new_policy->policy; 22192d06d8c4SDominik Brodowski pr_debug("setting range\n"); 2220d9a789c7SRafael J. Wysocki return cpufreq_driver->setpolicy(new_policy); 2221d9a789c7SRafael J. Wysocki } 2222d9a789c7SRafael J. Wysocki 2223d9a789c7SRafael J. Wysocki if (new_policy->governor == policy->governor) 2224d9a789c7SRafael J. Wysocki goto out; 22251da177e4SLinus Torvalds 22262d06d8c4SDominik Brodowski pr_debug("governor switch\n"); 22271da177e4SLinus Torvalds 2228d9a789c7SRafael J. Wysocki /* save old, working values */ 2229d9a789c7SRafael J. Wysocki old_gov = policy->governor; 22301da177e4SLinus Torvalds /* end old governor */ 2231d9a789c7SRafael J. Wysocki if (old_gov) { 22323a3e9e06SViresh Kumar __cpufreq_governor(policy, CPUFREQ_GOV_STOP); 2233ad7722daSviresh kumar up_write(&policy->rwsem); 2234d9a789c7SRafael J. Wysocki __cpufreq_governor(policy, CPUFREQ_GOV_POLICY_EXIT); 2235ad7722daSviresh kumar down_write(&policy->rwsem); 22367bd353a9SViresh Kumar } 22371da177e4SLinus Torvalds 22381da177e4SLinus Torvalds /* start new governor */ 22393a3e9e06SViresh Kumar policy->governor = new_policy->governor; 22403a3e9e06SViresh Kumar if (!__cpufreq_governor(policy, CPUFREQ_GOV_POLICY_INIT)) { 2241d9a789c7SRafael J. Wysocki if (!__cpufreq_governor(policy, CPUFREQ_GOV_START)) 2242d9a789c7SRafael J. Wysocki goto out; 2243d9a789c7SRafael J. Wysocki 2244ad7722daSviresh kumar up_write(&policy->rwsem); 2245d9a789c7SRafael J. Wysocki __cpufreq_governor(policy, CPUFREQ_GOV_POLICY_EXIT); 2246ad7722daSviresh kumar down_write(&policy->rwsem); 2247955ef483SViresh Kumar } 22487bd353a9SViresh Kumar 22491da177e4SLinus Torvalds /* new governor failed, so re-start old one */ 2250d9a789c7SRafael J. Wysocki pr_debug("starting governor %s failed\n", policy->governor->name); 22511da177e4SLinus Torvalds if (old_gov) { 22523a3e9e06SViresh Kumar policy->governor = old_gov; 2253d9a789c7SRafael J. Wysocki __cpufreq_governor(policy, CPUFREQ_GOV_POLICY_INIT); 2254d9a789c7SRafael J. Wysocki __cpufreq_governor(policy, CPUFREQ_GOV_START); 22551da177e4SLinus Torvalds } 22561da177e4SLinus Torvalds 2257d9a789c7SRafael J. Wysocki return -EINVAL; 2258d9a789c7SRafael J. Wysocki 2259d9a789c7SRafael J. Wysocki out: 2260d9a789c7SRafael J. Wysocki pr_debug("governor: change or update limits\n"); 2261d9a789c7SRafael J. Wysocki return __cpufreq_governor(policy, CPUFREQ_GOV_LIMITS); 22621da177e4SLinus Torvalds } 22631da177e4SLinus Torvalds 22641da177e4SLinus Torvalds /** 22651da177e4SLinus Torvalds * cpufreq_update_policy - re-evaluate an existing cpufreq policy 22661da177e4SLinus Torvalds * @cpu: CPU which shall be re-evaluated 22671da177e4SLinus Torvalds * 226825985edcSLucas De Marchi * Useful for policy notifiers which have different necessities 22691da177e4SLinus Torvalds * at different times. 22701da177e4SLinus Torvalds */ 22711da177e4SLinus Torvalds int cpufreq_update_policy(unsigned int cpu) 22721da177e4SLinus Torvalds { 22733a3e9e06SViresh Kumar struct cpufreq_policy *policy = cpufreq_cpu_get(cpu); 22743a3e9e06SViresh Kumar struct cpufreq_policy new_policy; 2275f1829e4aSJulia Lawall int ret; 22761da177e4SLinus Torvalds 2277fefa8ff8SAaron Plattner if (!policy) 2278fefa8ff8SAaron Plattner return -ENODEV; 22791da177e4SLinus Torvalds 2280ad7722daSviresh kumar down_write(&policy->rwsem); 22811da177e4SLinus Torvalds 22822d06d8c4SDominik Brodowski pr_debug("updating policy for CPU %u\n", cpu); 2283d5b73cd8SViresh Kumar memcpy(&new_policy, policy, sizeof(*policy)); 22843a3e9e06SViresh Kumar new_policy.min = policy->user_policy.min; 22853a3e9e06SViresh Kumar new_policy.max = policy->user_policy.max; 22863a3e9e06SViresh Kumar new_policy.policy = policy->user_policy.policy; 22873a3e9e06SViresh Kumar new_policy.governor = policy->user_policy.governor; 22881da177e4SLinus Torvalds 2289bb176f7dSViresh Kumar /* 2290bb176f7dSViresh Kumar * BIOS might change freq behind our back 2291bb176f7dSViresh Kumar * -> ask driver for current freq and notify governors about a change 2292bb176f7dSViresh Kumar */ 22932ed99e39SRafael J. Wysocki if (cpufreq_driver->get && !cpufreq_driver->setpolicy) { 22943a3e9e06SViresh Kumar new_policy.cur = cpufreq_driver->get(cpu); 2295bd0fa9bbSViresh Kumar if (WARN_ON(!new_policy.cur)) { 2296bd0fa9bbSViresh Kumar ret = -EIO; 2297fefa8ff8SAaron Plattner goto unlock; 2298bd0fa9bbSViresh Kumar } 2299bd0fa9bbSViresh Kumar 23003a3e9e06SViresh Kumar if (!policy->cur) { 2301e837f9b5SJoe Perches pr_debug("Driver did not initialize current freq\n"); 23023a3e9e06SViresh Kumar policy->cur = new_policy.cur; 2303a85f7bd3SThomas Renninger } else { 23049c0ebcf7SViresh Kumar if (policy->cur != new_policy.cur && has_target()) 2305a1e1dc41SViresh Kumar cpufreq_out_of_sync(policy, new_policy.cur); 23060961dd0dSThomas Renninger } 2307a85f7bd3SThomas Renninger } 23080961dd0dSThomas Renninger 2309037ce839SViresh Kumar ret = cpufreq_set_policy(policy, &new_policy); 23101da177e4SLinus Torvalds 2311fefa8ff8SAaron Plattner unlock: 2312ad7722daSviresh kumar up_write(&policy->rwsem); 23135a01f2e8SVenkatesh Pallipadi 23143a3e9e06SViresh Kumar cpufreq_cpu_put(policy); 23151da177e4SLinus Torvalds return ret; 23161da177e4SLinus Torvalds } 23171da177e4SLinus Torvalds EXPORT_SYMBOL(cpufreq_update_policy); 23181da177e4SLinus Torvalds 23192760984fSPaul Gortmaker static int cpufreq_cpu_callback(struct notifier_block *nfb, 2320c32b6b8eSAshok Raj unsigned long action, void *hcpu) 2321c32b6b8eSAshok Raj { 2322c32b6b8eSAshok Raj unsigned int cpu = (unsigned long)hcpu; 23238a25a2fdSKay Sievers struct device *dev; 2324c32b6b8eSAshok Raj 23258a25a2fdSKay Sievers dev = get_cpu_device(cpu); 23268a25a2fdSKay Sievers if (dev) { 23275302c3fbSSrivatsa S. Bhat switch (action & ~CPU_TASKS_FROZEN) { 2328c32b6b8eSAshok Raj case CPU_ONLINE: 232923faf0b7SViresh Kumar cpufreq_add_dev(dev, NULL); 2330c32b6b8eSAshok Raj break; 23315302c3fbSSrivatsa S. Bhat 2332c32b6b8eSAshok Raj case CPU_DOWN_PREPARE: 233396bbbe4aSViresh Kumar __cpufreq_remove_dev_prepare(dev, NULL); 23341aee40acSSrivatsa S. Bhat break; 23351aee40acSSrivatsa S. Bhat 23361aee40acSSrivatsa S. Bhat case CPU_POST_DEAD: 233796bbbe4aSViresh Kumar __cpufreq_remove_dev_finish(dev, NULL); 2338c32b6b8eSAshok Raj break; 23395302c3fbSSrivatsa S. Bhat 23405a01f2e8SVenkatesh Pallipadi case CPU_DOWN_FAILED: 234123faf0b7SViresh Kumar cpufreq_add_dev(dev, NULL); 2342c32b6b8eSAshok Raj break; 2343c32b6b8eSAshok Raj } 2344c32b6b8eSAshok Raj } 2345c32b6b8eSAshok Raj return NOTIFY_OK; 2346c32b6b8eSAshok Raj } 2347c32b6b8eSAshok Raj 23489c36f746SNeal Buckendahl static struct notifier_block __refdata cpufreq_cpu_notifier = { 2349c32b6b8eSAshok Raj .notifier_call = cpufreq_cpu_callback, 2350c32b6b8eSAshok Raj }; 23511da177e4SLinus Torvalds 23521da177e4SLinus Torvalds /********************************************************************* 23536f19efc0SLukasz Majewski * BOOST * 23546f19efc0SLukasz Majewski *********************************************************************/ 23556f19efc0SLukasz Majewski static int cpufreq_boost_set_sw(int state) 23566f19efc0SLukasz Majewski { 23576f19efc0SLukasz Majewski struct cpufreq_frequency_table *freq_table; 23586f19efc0SLukasz Majewski struct cpufreq_policy *policy; 23596f19efc0SLukasz Majewski int ret = -EINVAL; 23606f19efc0SLukasz Majewski 2361b4f0676fSViresh Kumar for_each_policy(policy) { 23626f19efc0SLukasz Majewski freq_table = cpufreq_frequency_get_table(policy->cpu); 23636f19efc0SLukasz Majewski if (freq_table) { 23646f19efc0SLukasz Majewski ret = cpufreq_frequency_table_cpuinfo(policy, 23656f19efc0SLukasz Majewski freq_table); 23666f19efc0SLukasz Majewski if (ret) { 23676f19efc0SLukasz Majewski pr_err("%s: Policy frequency update failed\n", 23686f19efc0SLukasz Majewski __func__); 23696f19efc0SLukasz Majewski break; 23706f19efc0SLukasz Majewski } 23716f19efc0SLukasz Majewski policy->user_policy.max = policy->max; 23726f19efc0SLukasz Majewski __cpufreq_governor(policy, CPUFREQ_GOV_LIMITS); 23736f19efc0SLukasz Majewski } 23746f19efc0SLukasz Majewski } 23756f19efc0SLukasz Majewski 23766f19efc0SLukasz Majewski return ret; 23776f19efc0SLukasz Majewski } 23786f19efc0SLukasz Majewski 23796f19efc0SLukasz Majewski int cpufreq_boost_trigger_state(int state) 23806f19efc0SLukasz Majewski { 23816f19efc0SLukasz Majewski unsigned long flags; 23826f19efc0SLukasz Majewski int ret = 0; 23836f19efc0SLukasz Majewski 23846f19efc0SLukasz Majewski if (cpufreq_driver->boost_enabled == state) 23856f19efc0SLukasz Majewski return 0; 23866f19efc0SLukasz Majewski 23876f19efc0SLukasz Majewski write_lock_irqsave(&cpufreq_driver_lock, flags); 23886f19efc0SLukasz Majewski cpufreq_driver->boost_enabled = state; 23896f19efc0SLukasz Majewski write_unlock_irqrestore(&cpufreq_driver_lock, flags); 23906f19efc0SLukasz Majewski 23916f19efc0SLukasz Majewski ret = cpufreq_driver->set_boost(state); 23926f19efc0SLukasz Majewski if (ret) { 23936f19efc0SLukasz Majewski write_lock_irqsave(&cpufreq_driver_lock, flags); 23946f19efc0SLukasz Majewski cpufreq_driver->boost_enabled = !state; 23956f19efc0SLukasz Majewski write_unlock_irqrestore(&cpufreq_driver_lock, flags); 23966f19efc0SLukasz Majewski 2397e837f9b5SJoe Perches pr_err("%s: Cannot %s BOOST\n", 2398e837f9b5SJoe Perches __func__, state ? "enable" : "disable"); 23996f19efc0SLukasz Majewski } 24006f19efc0SLukasz Majewski 24016f19efc0SLukasz Majewski return ret; 24026f19efc0SLukasz Majewski } 24036f19efc0SLukasz Majewski 24046f19efc0SLukasz Majewski int cpufreq_boost_supported(void) 24056f19efc0SLukasz Majewski { 24066f19efc0SLukasz Majewski if (likely(cpufreq_driver)) 24076f19efc0SLukasz Majewski return cpufreq_driver->boost_supported; 24086f19efc0SLukasz Majewski 24096f19efc0SLukasz Majewski return 0; 24106f19efc0SLukasz Majewski } 24116f19efc0SLukasz Majewski EXPORT_SYMBOL_GPL(cpufreq_boost_supported); 24126f19efc0SLukasz Majewski 24136f19efc0SLukasz Majewski int cpufreq_boost_enabled(void) 24146f19efc0SLukasz Majewski { 24156f19efc0SLukasz Majewski return cpufreq_driver->boost_enabled; 24166f19efc0SLukasz Majewski } 24176f19efc0SLukasz Majewski EXPORT_SYMBOL_GPL(cpufreq_boost_enabled); 24186f19efc0SLukasz Majewski 24196f19efc0SLukasz Majewski /********************************************************************* 24201da177e4SLinus Torvalds * REGISTER / UNREGISTER CPUFREQ DRIVER * 24211da177e4SLinus Torvalds *********************************************************************/ 24221da177e4SLinus Torvalds 24231da177e4SLinus Torvalds /** 24241da177e4SLinus Torvalds * cpufreq_register_driver - register a CPU Frequency driver 24251da177e4SLinus Torvalds * @driver_data: A struct cpufreq_driver containing the values# 24261da177e4SLinus Torvalds * submitted by the CPU Frequency driver. 24271da177e4SLinus Torvalds * 24281da177e4SLinus Torvalds * Registers a CPU Frequency driver to this core code. This code 24291da177e4SLinus Torvalds * returns zero on success, -EBUSY when another driver got here first 24301da177e4SLinus Torvalds * (and isn't unregistered in the meantime). 24311da177e4SLinus Torvalds * 24321da177e4SLinus Torvalds */ 2433221dee28SLinus Torvalds int cpufreq_register_driver(struct cpufreq_driver *driver_data) 24341da177e4SLinus Torvalds { 24351da177e4SLinus Torvalds unsigned long flags; 24361da177e4SLinus Torvalds int ret; 24371da177e4SLinus Torvalds 2438a7b422cdSKonrad Rzeszutek Wilk if (cpufreq_disabled()) 2439a7b422cdSKonrad Rzeszutek Wilk return -ENODEV; 2440a7b422cdSKonrad Rzeszutek Wilk 24411da177e4SLinus Torvalds if (!driver_data || !driver_data->verify || !driver_data->init || 24429c0ebcf7SViresh Kumar !(driver_data->setpolicy || driver_data->target_index || 24439832235fSRafael J. Wysocki driver_data->target) || 24449832235fSRafael J. Wysocki (driver_data->setpolicy && (driver_data->target_index || 24451c03a2d0SViresh Kumar driver_data->target)) || 24461c03a2d0SViresh Kumar (!!driver_data->get_intermediate != !!driver_data->target_intermediate)) 24471da177e4SLinus Torvalds return -EINVAL; 24481da177e4SLinus Torvalds 24492d06d8c4SDominik Brodowski pr_debug("trying to register driver %s\n", driver_data->name); 24501da177e4SLinus Torvalds 24510d1857a1SNathan Zimmer write_lock_irqsave(&cpufreq_driver_lock, flags); 24521c3d85ddSRafael J. Wysocki if (cpufreq_driver) { 24530d1857a1SNathan Zimmer write_unlock_irqrestore(&cpufreq_driver_lock, flags); 24544dea5806SYinghai Lu return -EEXIST; 24551da177e4SLinus Torvalds } 24561c3d85ddSRafael J. Wysocki cpufreq_driver = driver_data; 24570d1857a1SNathan Zimmer write_unlock_irqrestore(&cpufreq_driver_lock, flags); 24581da177e4SLinus Torvalds 2459bc68b7dfSViresh Kumar if (driver_data->setpolicy) 2460bc68b7dfSViresh Kumar driver_data->flags |= CPUFREQ_CONST_LOOPS; 2461bc68b7dfSViresh Kumar 24626f19efc0SLukasz Majewski if (cpufreq_boost_supported()) { 24636f19efc0SLukasz Majewski /* 24646f19efc0SLukasz Majewski * Check if driver provides function to enable boost - 24656f19efc0SLukasz Majewski * if not, use cpufreq_boost_set_sw as default 24666f19efc0SLukasz Majewski */ 24676f19efc0SLukasz Majewski if (!cpufreq_driver->set_boost) 24686f19efc0SLukasz Majewski cpufreq_driver->set_boost = cpufreq_boost_set_sw; 24696f19efc0SLukasz Majewski 24706f19efc0SLukasz Majewski ret = cpufreq_sysfs_create_file(&boost.attr); 24716f19efc0SLukasz Majewski if (ret) { 24726f19efc0SLukasz Majewski pr_err("%s: cannot register global BOOST sysfs file\n", 24736f19efc0SLukasz Majewski __func__); 24746f19efc0SLukasz Majewski goto err_null_driver; 24756f19efc0SLukasz Majewski } 24766f19efc0SLukasz Majewski } 24776f19efc0SLukasz Majewski 24788a25a2fdSKay Sievers ret = subsys_interface_register(&cpufreq_interface); 24798f5bc2abSJiri Slaby if (ret) 24806f19efc0SLukasz Majewski goto err_boost_unreg; 24811da177e4SLinus Torvalds 2482ce1bcfe9SViresh Kumar if (!(cpufreq_driver->flags & CPUFREQ_STICKY) && 2483ce1bcfe9SViresh Kumar list_empty(&cpufreq_policy_list)) { 24841da177e4SLinus Torvalds /* if all ->init() calls failed, unregister */ 2485ce1bcfe9SViresh Kumar pr_debug("%s: No CPU initialized for driver %s\n", __func__, 2486e08f5f5bSGautham R Shenoy driver_data->name); 24878a25a2fdSKay Sievers goto err_if_unreg; 24881da177e4SLinus Torvalds } 24891da177e4SLinus Torvalds 249065edc68cSChandra Seetharaman register_hotcpu_notifier(&cpufreq_cpu_notifier); 24912d06d8c4SDominik Brodowski pr_debug("driver %s up and running\n", driver_data->name); 24921da177e4SLinus Torvalds 24938f5bc2abSJiri Slaby return 0; 24948a25a2fdSKay Sievers err_if_unreg: 24958a25a2fdSKay Sievers subsys_interface_unregister(&cpufreq_interface); 24966f19efc0SLukasz Majewski err_boost_unreg: 24976f19efc0SLukasz Majewski if (cpufreq_boost_supported()) 24986f19efc0SLukasz Majewski cpufreq_sysfs_remove_file(&boost.attr); 24998f5bc2abSJiri Slaby err_null_driver: 25000d1857a1SNathan Zimmer write_lock_irqsave(&cpufreq_driver_lock, flags); 25011c3d85ddSRafael J. Wysocki cpufreq_driver = NULL; 25020d1857a1SNathan Zimmer write_unlock_irqrestore(&cpufreq_driver_lock, flags); 25034d34a67dSDave Jones return ret; 25041da177e4SLinus Torvalds } 25051da177e4SLinus Torvalds EXPORT_SYMBOL_GPL(cpufreq_register_driver); 25061da177e4SLinus Torvalds 25071da177e4SLinus Torvalds /** 25081da177e4SLinus Torvalds * cpufreq_unregister_driver - unregister the current CPUFreq driver 25091da177e4SLinus Torvalds * 25101da177e4SLinus Torvalds * Unregister the current CPUFreq driver. Only call this if you have 25111da177e4SLinus Torvalds * the right to do so, i.e. if you have succeeded in initialising before! 25121da177e4SLinus Torvalds * Returns zero if successful, and -EINVAL if the cpufreq_driver is 25131da177e4SLinus Torvalds * currently not initialised. 25141da177e4SLinus Torvalds */ 2515221dee28SLinus Torvalds int cpufreq_unregister_driver(struct cpufreq_driver *driver) 25161da177e4SLinus Torvalds { 25171da177e4SLinus Torvalds unsigned long flags; 25181da177e4SLinus Torvalds 25191c3d85ddSRafael J. Wysocki if (!cpufreq_driver || (driver != cpufreq_driver)) 25201da177e4SLinus Torvalds return -EINVAL; 25211da177e4SLinus Torvalds 25222d06d8c4SDominik Brodowski pr_debug("unregistering driver %s\n", driver->name); 25231da177e4SLinus Torvalds 25248a25a2fdSKay Sievers subsys_interface_unregister(&cpufreq_interface); 25256f19efc0SLukasz Majewski if (cpufreq_boost_supported()) 25266f19efc0SLukasz Majewski cpufreq_sysfs_remove_file(&boost.attr); 25276f19efc0SLukasz Majewski 252865edc68cSChandra Seetharaman unregister_hotcpu_notifier(&cpufreq_cpu_notifier); 25291da177e4SLinus Torvalds 25306eed9404SViresh Kumar down_write(&cpufreq_rwsem); 25310d1857a1SNathan Zimmer write_lock_irqsave(&cpufreq_driver_lock, flags); 25326eed9404SViresh Kumar 25331c3d85ddSRafael J. Wysocki cpufreq_driver = NULL; 25346eed9404SViresh Kumar 25350d1857a1SNathan Zimmer write_unlock_irqrestore(&cpufreq_driver_lock, flags); 25366eed9404SViresh Kumar up_write(&cpufreq_rwsem); 25371da177e4SLinus Torvalds 25381da177e4SLinus Torvalds return 0; 25391da177e4SLinus Torvalds } 25401da177e4SLinus Torvalds EXPORT_SYMBOL_GPL(cpufreq_unregister_driver); 25415a01f2e8SVenkatesh Pallipadi 254290de2a4aSDoug Anderson /* 254390de2a4aSDoug Anderson * Stop cpufreq at shutdown to make sure it isn't holding any locks 254490de2a4aSDoug Anderson * or mutexes when secondary CPUs are halted. 254590de2a4aSDoug Anderson */ 254690de2a4aSDoug Anderson static struct syscore_ops cpufreq_syscore_ops = { 254790de2a4aSDoug Anderson .shutdown = cpufreq_suspend, 254890de2a4aSDoug Anderson }; 254990de2a4aSDoug Anderson 25505a01f2e8SVenkatesh Pallipadi static int __init cpufreq_core_init(void) 25515a01f2e8SVenkatesh Pallipadi { 2552a7b422cdSKonrad Rzeszutek Wilk if (cpufreq_disabled()) 2553a7b422cdSKonrad Rzeszutek Wilk return -ENODEV; 2554a7b422cdSKonrad Rzeszutek Wilk 25552361be23SViresh Kumar cpufreq_global_kobject = kobject_create(); 25568aa84ad8SThomas Renninger BUG_ON(!cpufreq_global_kobject); 25578aa84ad8SThomas Renninger 255890de2a4aSDoug Anderson register_syscore_ops(&cpufreq_syscore_ops); 255990de2a4aSDoug Anderson 25605a01f2e8SVenkatesh Pallipadi return 0; 25615a01f2e8SVenkatesh Pallipadi } 25625a01f2e8SVenkatesh Pallipadi core_initcall(cpufreq_core_init); 2563