xref: /openbmc/linux/drivers/cpufreq/cpufreq.c (revision ee89bd6b)
1 /*
2  *  linux/drivers/cpufreq/cpufreq.c
3  *
4  *  Copyright (C) 2001 Russell King
5  *            (C) 2002 - 2003 Dominik Brodowski <linux@brodo.de>
6  *
7  *  Oct 2005 - Ashok Raj <ashok.raj@intel.com>
8  *	Added handling for CPU hotplug
9  *  Feb 2006 - Jacob Shin <jacob.shin@amd.com>
10  *	Fix handling for CPU hotplug -- affected CPUs
11  *
12  * This program is free software; you can redistribute it and/or modify
13  * it under the terms of the GNU General Public License version 2 as
14  * published by the Free Software Foundation.
15  *
16  */
17 
18 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
19 
20 #include <linux/kernel.h>
21 #include <linux/module.h>
22 #include <linux/init.h>
23 #include <linux/notifier.h>
24 #include <linux/cpufreq.h>
25 #include <linux/delay.h>
26 #include <linux/interrupt.h>
27 #include <linux/spinlock.h>
28 #include <linux/device.h>
29 #include <linux/slab.h>
30 #include <linux/cpu.h>
31 #include <linux/completion.h>
32 #include <linux/mutex.h>
33 #include <linux/syscore_ops.h>
34 
35 #include <trace/events/power.h>
36 
37 /**
38  * The "cpufreq driver" - the arch- or hardware-dependent low
39  * level driver of CPUFreq support, and its spinlock. This lock
40  * also protects the cpufreq_cpu_data array.
41  */
42 static struct cpufreq_driver *cpufreq_driver;
43 static DEFINE_PER_CPU(struct cpufreq_policy *, cpufreq_cpu_data);
44 #ifdef CONFIG_HOTPLUG_CPU
45 /* This one keeps track of the previously set governor of a removed CPU */
46 static DEFINE_PER_CPU(char[CPUFREQ_NAME_LEN], cpufreq_cpu_governor);
47 #endif
48 static DEFINE_RWLOCK(cpufreq_driver_lock);
49 
50 /*
51  * cpu_policy_rwsem is a per CPU reader-writer semaphore designed to cure
52  * all cpufreq/hotplug/workqueue/etc related lock issues.
53  *
54  * The rules for this semaphore:
55  * - Any routine that wants to read from the policy structure will
56  *   do a down_read on this semaphore.
57  * - Any routine that will write to the policy structure and/or may take away
58  *   the policy altogether (eg. CPU hotplug), will hold this lock in write
59  *   mode before doing so.
60  *
61  * Additional rules:
62  * - Governor routines that can be called in cpufreq hotplug path should not
63  *   take this sem as top level hotplug notifier handler takes this.
64  * - Lock should not be held across
65  *     __cpufreq_governor(data, CPUFREQ_GOV_STOP);
66  */
67 static DEFINE_PER_CPU(int, cpufreq_policy_cpu);
68 static DEFINE_PER_CPU(struct rw_semaphore, cpu_policy_rwsem);
69 
70 #define lock_policy_rwsem(mode, cpu)					\
71 static int lock_policy_rwsem_##mode(int cpu)				\
72 {									\
73 	int policy_cpu = per_cpu(cpufreq_policy_cpu, cpu);		\
74 	BUG_ON(policy_cpu == -1);					\
75 	down_##mode(&per_cpu(cpu_policy_rwsem, policy_cpu));		\
76 									\
77 	return 0;							\
78 }
79 
80 lock_policy_rwsem(read, cpu);
81 lock_policy_rwsem(write, cpu);
82 
83 #define unlock_policy_rwsem(mode, cpu)					\
84 static void unlock_policy_rwsem_##mode(int cpu)				\
85 {									\
86 	int policy_cpu = per_cpu(cpufreq_policy_cpu, cpu);		\
87 	BUG_ON(policy_cpu == -1);					\
88 	up_##mode(&per_cpu(cpu_policy_rwsem, policy_cpu));		\
89 }
90 
91 unlock_policy_rwsem(read, cpu);
92 unlock_policy_rwsem(write, cpu);
93 
94 /* internal prototypes */
95 static int __cpufreq_governor(struct cpufreq_policy *policy,
96 		unsigned int event);
97 static unsigned int __cpufreq_get(unsigned int cpu);
98 static void handle_update(struct work_struct *work);
99 
100 /**
101  * Two notifier lists: the "policy" list is involved in the
102  * validation process for a new CPU frequency policy; the
103  * "transition" list for kernel code that needs to handle
104  * changes to devices when the CPU clock speed changes.
105  * The mutex locks both lists.
106  */
107 static BLOCKING_NOTIFIER_HEAD(cpufreq_policy_notifier_list);
108 static struct srcu_notifier_head cpufreq_transition_notifier_list;
109 
110 static bool init_cpufreq_transition_notifier_list_called;
111 static int __init init_cpufreq_transition_notifier_list(void)
112 {
113 	srcu_init_notifier_head(&cpufreq_transition_notifier_list);
114 	init_cpufreq_transition_notifier_list_called = true;
115 	return 0;
116 }
117 pure_initcall(init_cpufreq_transition_notifier_list);
118 
119 static int off __read_mostly;
120 static int cpufreq_disabled(void)
121 {
122 	return off;
123 }
124 void disable_cpufreq(void)
125 {
126 	off = 1;
127 }
128 static LIST_HEAD(cpufreq_governor_list);
129 static DEFINE_MUTEX(cpufreq_governor_mutex);
130 
131 bool have_governor_per_policy(void)
132 {
133 	return cpufreq_driver->have_governor_per_policy;
134 }
135 
136 static struct cpufreq_policy *__cpufreq_cpu_get(unsigned int cpu, bool sysfs)
137 {
138 	struct cpufreq_policy *data;
139 	unsigned long flags;
140 
141 	if (cpu >= nr_cpu_ids)
142 		goto err_out;
143 
144 	/* get the cpufreq driver */
145 	read_lock_irqsave(&cpufreq_driver_lock, flags);
146 
147 	if (!cpufreq_driver)
148 		goto err_out_unlock;
149 
150 	if (!try_module_get(cpufreq_driver->owner))
151 		goto err_out_unlock;
152 
153 
154 	/* get the CPU */
155 	data = per_cpu(cpufreq_cpu_data, cpu);
156 
157 	if (!data)
158 		goto err_out_put_module;
159 
160 	if (!sysfs && !kobject_get(&data->kobj))
161 		goto err_out_put_module;
162 
163 	read_unlock_irqrestore(&cpufreq_driver_lock, flags);
164 	return data;
165 
166 err_out_put_module:
167 	module_put(cpufreq_driver->owner);
168 err_out_unlock:
169 	read_unlock_irqrestore(&cpufreq_driver_lock, flags);
170 err_out:
171 	return NULL;
172 }
173 
174 struct cpufreq_policy *cpufreq_cpu_get(unsigned int cpu)
175 {
176 	if (cpufreq_disabled())
177 		return NULL;
178 
179 	return __cpufreq_cpu_get(cpu, false);
180 }
181 EXPORT_SYMBOL_GPL(cpufreq_cpu_get);
182 
183 static struct cpufreq_policy *cpufreq_cpu_get_sysfs(unsigned int cpu)
184 {
185 	return __cpufreq_cpu_get(cpu, true);
186 }
187 
188 static void __cpufreq_cpu_put(struct cpufreq_policy *data, bool sysfs)
189 {
190 	if (!sysfs)
191 		kobject_put(&data->kobj);
192 	module_put(cpufreq_driver->owner);
193 }
194 
195 void cpufreq_cpu_put(struct cpufreq_policy *data)
196 {
197 	if (cpufreq_disabled())
198 		return;
199 
200 	__cpufreq_cpu_put(data, false);
201 }
202 EXPORT_SYMBOL_GPL(cpufreq_cpu_put);
203 
204 static void cpufreq_cpu_put_sysfs(struct cpufreq_policy *data)
205 {
206 	__cpufreq_cpu_put(data, true);
207 }
208 
209 /*********************************************************************
210  *            EXTERNALLY AFFECTING FREQUENCY CHANGES                 *
211  *********************************************************************/
212 
213 /**
214  * adjust_jiffies - adjust the system "loops_per_jiffy"
215  *
216  * This function alters the system "loops_per_jiffy" for the clock
217  * speed change. Note that loops_per_jiffy cannot be updated on SMP
218  * systems as each CPU might be scaled differently. So, use the arch
219  * per-CPU loops_per_jiffy value wherever possible.
220  */
221 #ifndef CONFIG_SMP
222 static unsigned long l_p_j_ref;
223 static unsigned int  l_p_j_ref_freq;
224 
225 static void adjust_jiffies(unsigned long val, struct cpufreq_freqs *ci)
226 {
227 	if (ci->flags & CPUFREQ_CONST_LOOPS)
228 		return;
229 
230 	if (!l_p_j_ref_freq) {
231 		l_p_j_ref = loops_per_jiffy;
232 		l_p_j_ref_freq = ci->old;
233 		pr_debug("saving %lu as reference value for loops_per_jiffy; "
234 			"freq is %u kHz\n", l_p_j_ref, l_p_j_ref_freq);
235 	}
236 	if ((val == CPUFREQ_POSTCHANGE  && ci->old != ci->new) ||
237 	    (val == CPUFREQ_RESUMECHANGE || val == CPUFREQ_SUSPENDCHANGE)) {
238 		loops_per_jiffy = cpufreq_scale(l_p_j_ref, l_p_j_ref_freq,
239 								ci->new);
240 		pr_debug("scaling loops_per_jiffy to %lu "
241 			"for frequency %u kHz\n", loops_per_jiffy, ci->new);
242 	}
243 }
244 #else
245 static inline void adjust_jiffies(unsigned long val, struct cpufreq_freqs *ci)
246 {
247 	return;
248 }
249 #endif
250 
251 
252 void __cpufreq_notify_transition(struct cpufreq_policy *policy,
253 		struct cpufreq_freqs *freqs, unsigned int state)
254 {
255 	BUG_ON(irqs_disabled());
256 
257 	if (cpufreq_disabled())
258 		return;
259 
260 	freqs->flags = cpufreq_driver->flags;
261 	pr_debug("notification %u of frequency transition to %u kHz\n",
262 		state, freqs->new);
263 
264 	switch (state) {
265 
266 	case CPUFREQ_PRECHANGE:
267 		/* detect if the driver reported a value as "old frequency"
268 		 * which is not equal to what the cpufreq core thinks is
269 		 * "old frequency".
270 		 */
271 		if (!(cpufreq_driver->flags & CPUFREQ_CONST_LOOPS)) {
272 			if ((policy) && (policy->cpu == freqs->cpu) &&
273 			    (policy->cur) && (policy->cur != freqs->old)) {
274 				pr_debug("Warning: CPU frequency is"
275 					" %u, cpufreq assumed %u kHz.\n",
276 					freqs->old, policy->cur);
277 				freqs->old = policy->cur;
278 			}
279 		}
280 		srcu_notifier_call_chain(&cpufreq_transition_notifier_list,
281 				CPUFREQ_PRECHANGE, freqs);
282 		adjust_jiffies(CPUFREQ_PRECHANGE, freqs);
283 		break;
284 
285 	case CPUFREQ_POSTCHANGE:
286 		adjust_jiffies(CPUFREQ_POSTCHANGE, freqs);
287 		pr_debug("FREQ: %lu - CPU: %lu", (unsigned long)freqs->new,
288 			(unsigned long)freqs->cpu);
289 		trace_cpu_frequency(freqs->new, freqs->cpu);
290 		srcu_notifier_call_chain(&cpufreq_transition_notifier_list,
291 				CPUFREQ_POSTCHANGE, freqs);
292 		if (likely(policy) && likely(policy->cpu == freqs->cpu))
293 			policy->cur = freqs->new;
294 		break;
295 	}
296 }
297 /**
298  * cpufreq_notify_transition - call notifier chain and adjust_jiffies
299  * on frequency transition.
300  *
301  * This function calls the transition notifiers and the "adjust_jiffies"
302  * function. It is called twice on all CPU frequency changes that have
303  * external effects.
304  */
305 void cpufreq_notify_transition(struct cpufreq_policy *policy,
306 		struct cpufreq_freqs *freqs, unsigned int state)
307 {
308 	for_each_cpu(freqs->cpu, policy->cpus)
309 		__cpufreq_notify_transition(policy, freqs, state);
310 }
311 EXPORT_SYMBOL_GPL(cpufreq_notify_transition);
312 
313 
314 
315 /*********************************************************************
316  *                          SYSFS INTERFACE                          *
317  *********************************************************************/
318 
319 static struct cpufreq_governor *__find_governor(const char *str_governor)
320 {
321 	struct cpufreq_governor *t;
322 
323 	list_for_each_entry(t, &cpufreq_governor_list, governor_list)
324 		if (!strnicmp(str_governor, t->name, CPUFREQ_NAME_LEN))
325 			return t;
326 
327 	return NULL;
328 }
329 
330 /**
331  * cpufreq_parse_governor - parse a governor string
332  */
333 static int cpufreq_parse_governor(char *str_governor, unsigned int *policy,
334 				struct cpufreq_governor **governor)
335 {
336 	int err = -EINVAL;
337 
338 	if (!cpufreq_driver)
339 		goto out;
340 
341 	if (cpufreq_driver->setpolicy) {
342 		if (!strnicmp(str_governor, "performance", CPUFREQ_NAME_LEN)) {
343 			*policy = CPUFREQ_POLICY_PERFORMANCE;
344 			err = 0;
345 		} else if (!strnicmp(str_governor, "powersave",
346 						CPUFREQ_NAME_LEN)) {
347 			*policy = CPUFREQ_POLICY_POWERSAVE;
348 			err = 0;
349 		}
350 	} else if (cpufreq_driver->target) {
351 		struct cpufreq_governor *t;
352 
353 		mutex_lock(&cpufreq_governor_mutex);
354 
355 		t = __find_governor(str_governor);
356 
357 		if (t == NULL) {
358 			int ret;
359 
360 			mutex_unlock(&cpufreq_governor_mutex);
361 			ret = request_module("cpufreq_%s", str_governor);
362 			mutex_lock(&cpufreq_governor_mutex);
363 
364 			if (ret == 0)
365 				t = __find_governor(str_governor);
366 		}
367 
368 		if (t != NULL) {
369 			*governor = t;
370 			err = 0;
371 		}
372 
373 		mutex_unlock(&cpufreq_governor_mutex);
374 	}
375 out:
376 	return err;
377 }
378 
379 
380 /**
381  * cpufreq_per_cpu_attr_read() / show_##file_name() -
382  * print out cpufreq information
383  *
384  * Write out information from cpufreq_driver->policy[cpu]; object must be
385  * "unsigned int".
386  */
387 
388 #define show_one(file_name, object)			\
389 static ssize_t show_##file_name				\
390 (struct cpufreq_policy *policy, char *buf)		\
391 {							\
392 	return sprintf(buf, "%u\n", policy->object);	\
393 }
394 
395 show_one(cpuinfo_min_freq, cpuinfo.min_freq);
396 show_one(cpuinfo_max_freq, cpuinfo.max_freq);
397 show_one(cpuinfo_transition_latency, cpuinfo.transition_latency);
398 show_one(scaling_min_freq, min);
399 show_one(scaling_max_freq, max);
400 show_one(scaling_cur_freq, cur);
401 
402 static int __cpufreq_set_policy(struct cpufreq_policy *data,
403 				struct cpufreq_policy *policy);
404 
405 /**
406  * cpufreq_per_cpu_attr_write() / store_##file_name() - sysfs write access
407  */
408 #define store_one(file_name, object)			\
409 static ssize_t store_##file_name					\
410 (struct cpufreq_policy *policy, const char *buf, size_t count)		\
411 {									\
412 	unsigned int ret;						\
413 	struct cpufreq_policy new_policy;				\
414 									\
415 	ret = cpufreq_get_policy(&new_policy, policy->cpu);		\
416 	if (ret)							\
417 		return -EINVAL;						\
418 									\
419 	ret = sscanf(buf, "%u", &new_policy.object);			\
420 	if (ret != 1)							\
421 		return -EINVAL;						\
422 									\
423 	ret = __cpufreq_set_policy(policy, &new_policy);		\
424 	policy->user_policy.object = policy->object;			\
425 									\
426 	return ret ? ret : count;					\
427 }
428 
429 store_one(scaling_min_freq, min);
430 store_one(scaling_max_freq, max);
431 
432 /**
433  * show_cpuinfo_cur_freq - current CPU frequency as detected by hardware
434  */
435 static ssize_t show_cpuinfo_cur_freq(struct cpufreq_policy *policy,
436 					char *buf)
437 {
438 	unsigned int cur_freq = __cpufreq_get(policy->cpu);
439 	if (!cur_freq)
440 		return sprintf(buf, "<unknown>");
441 	return sprintf(buf, "%u\n", cur_freq);
442 }
443 
444 
445 /**
446  * show_scaling_governor - show the current policy for the specified CPU
447  */
448 static ssize_t show_scaling_governor(struct cpufreq_policy *policy, char *buf)
449 {
450 	if (policy->policy == CPUFREQ_POLICY_POWERSAVE)
451 		return sprintf(buf, "powersave\n");
452 	else if (policy->policy == CPUFREQ_POLICY_PERFORMANCE)
453 		return sprintf(buf, "performance\n");
454 	else if (policy->governor)
455 		return scnprintf(buf, CPUFREQ_NAME_PLEN, "%s\n",
456 				policy->governor->name);
457 	return -EINVAL;
458 }
459 
460 
461 /**
462  * store_scaling_governor - store policy for the specified CPU
463  */
464 static ssize_t store_scaling_governor(struct cpufreq_policy *policy,
465 					const char *buf, size_t count)
466 {
467 	unsigned int ret;
468 	char	str_governor[16];
469 	struct cpufreq_policy new_policy;
470 
471 	ret = cpufreq_get_policy(&new_policy, policy->cpu);
472 	if (ret)
473 		return ret;
474 
475 	ret = sscanf(buf, "%15s", str_governor);
476 	if (ret != 1)
477 		return -EINVAL;
478 
479 	if (cpufreq_parse_governor(str_governor, &new_policy.policy,
480 						&new_policy.governor))
481 		return -EINVAL;
482 
483 	/* Do not use cpufreq_set_policy here or the user_policy.max
484 	   will be wrongly overridden */
485 	ret = __cpufreq_set_policy(policy, &new_policy);
486 
487 	policy->user_policy.policy = policy->policy;
488 	policy->user_policy.governor = policy->governor;
489 
490 	if (ret)
491 		return ret;
492 	else
493 		return count;
494 }
495 
496 /**
497  * show_scaling_driver - show the cpufreq driver currently loaded
498  */
499 static ssize_t show_scaling_driver(struct cpufreq_policy *policy, char *buf)
500 {
501 	return scnprintf(buf, CPUFREQ_NAME_PLEN, "%s\n", cpufreq_driver->name);
502 }
503 
504 /**
505  * show_scaling_available_governors - show the available CPUfreq governors
506  */
507 static ssize_t show_scaling_available_governors(struct cpufreq_policy *policy,
508 						char *buf)
509 {
510 	ssize_t i = 0;
511 	struct cpufreq_governor *t;
512 
513 	if (!cpufreq_driver->target) {
514 		i += sprintf(buf, "performance powersave");
515 		goto out;
516 	}
517 
518 	list_for_each_entry(t, &cpufreq_governor_list, governor_list) {
519 		if (i >= (ssize_t) ((PAGE_SIZE / sizeof(char))
520 		    - (CPUFREQ_NAME_LEN + 2)))
521 			goto out;
522 		i += scnprintf(&buf[i], CPUFREQ_NAME_PLEN, "%s ", t->name);
523 	}
524 out:
525 	i += sprintf(&buf[i], "\n");
526 	return i;
527 }
528 
529 static ssize_t show_cpus(const struct cpumask *mask, char *buf)
530 {
531 	ssize_t i = 0;
532 	unsigned int cpu;
533 
534 	for_each_cpu(cpu, mask) {
535 		if (i)
536 			i += scnprintf(&buf[i], (PAGE_SIZE - i - 2), " ");
537 		i += scnprintf(&buf[i], (PAGE_SIZE - i - 2), "%u", cpu);
538 		if (i >= (PAGE_SIZE - 5))
539 			break;
540 	}
541 	i += sprintf(&buf[i], "\n");
542 	return i;
543 }
544 
545 /**
546  * show_related_cpus - show the CPUs affected by each transition even if
547  * hw coordination is in use
548  */
549 static ssize_t show_related_cpus(struct cpufreq_policy *policy, char *buf)
550 {
551 	return show_cpus(policy->related_cpus, buf);
552 }
553 
554 /**
555  * show_affected_cpus - show the CPUs affected by each transition
556  */
557 static ssize_t show_affected_cpus(struct cpufreq_policy *policy, char *buf)
558 {
559 	return show_cpus(policy->cpus, buf);
560 }
561 
562 static ssize_t store_scaling_setspeed(struct cpufreq_policy *policy,
563 					const char *buf, size_t count)
564 {
565 	unsigned int freq = 0;
566 	unsigned int ret;
567 
568 	if (!policy->governor || !policy->governor->store_setspeed)
569 		return -EINVAL;
570 
571 	ret = sscanf(buf, "%u", &freq);
572 	if (ret != 1)
573 		return -EINVAL;
574 
575 	policy->governor->store_setspeed(policy, freq);
576 
577 	return count;
578 }
579 
580 static ssize_t show_scaling_setspeed(struct cpufreq_policy *policy, char *buf)
581 {
582 	if (!policy->governor || !policy->governor->show_setspeed)
583 		return sprintf(buf, "<unsupported>\n");
584 
585 	return policy->governor->show_setspeed(policy, buf);
586 }
587 
588 /**
589  * show_bios_limit - show the current cpufreq HW/BIOS limitation
590  */
591 static ssize_t show_bios_limit(struct cpufreq_policy *policy, char *buf)
592 {
593 	unsigned int limit;
594 	int ret;
595 	if (cpufreq_driver->bios_limit) {
596 		ret = cpufreq_driver->bios_limit(policy->cpu, &limit);
597 		if (!ret)
598 			return sprintf(buf, "%u\n", limit);
599 	}
600 	return sprintf(buf, "%u\n", policy->cpuinfo.max_freq);
601 }
602 
603 cpufreq_freq_attr_ro_perm(cpuinfo_cur_freq, 0400);
604 cpufreq_freq_attr_ro(cpuinfo_min_freq);
605 cpufreq_freq_attr_ro(cpuinfo_max_freq);
606 cpufreq_freq_attr_ro(cpuinfo_transition_latency);
607 cpufreq_freq_attr_ro(scaling_available_governors);
608 cpufreq_freq_attr_ro(scaling_driver);
609 cpufreq_freq_attr_ro(scaling_cur_freq);
610 cpufreq_freq_attr_ro(bios_limit);
611 cpufreq_freq_attr_ro(related_cpus);
612 cpufreq_freq_attr_ro(affected_cpus);
613 cpufreq_freq_attr_rw(scaling_min_freq);
614 cpufreq_freq_attr_rw(scaling_max_freq);
615 cpufreq_freq_attr_rw(scaling_governor);
616 cpufreq_freq_attr_rw(scaling_setspeed);
617 
618 static struct attribute *default_attrs[] = {
619 	&cpuinfo_min_freq.attr,
620 	&cpuinfo_max_freq.attr,
621 	&cpuinfo_transition_latency.attr,
622 	&scaling_min_freq.attr,
623 	&scaling_max_freq.attr,
624 	&affected_cpus.attr,
625 	&related_cpus.attr,
626 	&scaling_governor.attr,
627 	&scaling_driver.attr,
628 	&scaling_available_governors.attr,
629 	&scaling_setspeed.attr,
630 	NULL
631 };
632 
633 struct kobject *cpufreq_global_kobject;
634 EXPORT_SYMBOL(cpufreq_global_kobject);
635 
636 #define to_policy(k) container_of(k, struct cpufreq_policy, kobj)
637 #define to_attr(a) container_of(a, struct freq_attr, attr)
638 
639 static ssize_t show(struct kobject *kobj, struct attribute *attr, char *buf)
640 {
641 	struct cpufreq_policy *policy = to_policy(kobj);
642 	struct freq_attr *fattr = to_attr(attr);
643 	ssize_t ret = -EINVAL;
644 	policy = cpufreq_cpu_get_sysfs(policy->cpu);
645 	if (!policy)
646 		goto no_policy;
647 
648 	if (lock_policy_rwsem_read(policy->cpu) < 0)
649 		goto fail;
650 
651 	if (fattr->show)
652 		ret = fattr->show(policy, buf);
653 	else
654 		ret = -EIO;
655 
656 	unlock_policy_rwsem_read(policy->cpu);
657 fail:
658 	cpufreq_cpu_put_sysfs(policy);
659 no_policy:
660 	return ret;
661 }
662 
663 static ssize_t store(struct kobject *kobj, struct attribute *attr,
664 		     const char *buf, size_t count)
665 {
666 	struct cpufreq_policy *policy = to_policy(kobj);
667 	struct freq_attr *fattr = to_attr(attr);
668 	ssize_t ret = -EINVAL;
669 	policy = cpufreq_cpu_get_sysfs(policy->cpu);
670 	if (!policy)
671 		goto no_policy;
672 
673 	if (lock_policy_rwsem_write(policy->cpu) < 0)
674 		goto fail;
675 
676 	if (fattr->store)
677 		ret = fattr->store(policy, buf, count);
678 	else
679 		ret = -EIO;
680 
681 	unlock_policy_rwsem_write(policy->cpu);
682 fail:
683 	cpufreq_cpu_put_sysfs(policy);
684 no_policy:
685 	return ret;
686 }
687 
688 static void cpufreq_sysfs_release(struct kobject *kobj)
689 {
690 	struct cpufreq_policy *policy = to_policy(kobj);
691 	pr_debug("last reference is dropped\n");
692 	complete(&policy->kobj_unregister);
693 }
694 
695 static const struct sysfs_ops sysfs_ops = {
696 	.show	= show,
697 	.store	= store,
698 };
699 
700 static struct kobj_type ktype_cpufreq = {
701 	.sysfs_ops	= &sysfs_ops,
702 	.default_attrs	= default_attrs,
703 	.release	= cpufreq_sysfs_release,
704 };
705 
706 /* symlink affected CPUs */
707 static int cpufreq_add_dev_symlink(unsigned int cpu,
708 				   struct cpufreq_policy *policy)
709 {
710 	unsigned int j;
711 	int ret = 0;
712 
713 	for_each_cpu(j, policy->cpus) {
714 		struct cpufreq_policy *managed_policy;
715 		struct device *cpu_dev;
716 
717 		if (j == cpu)
718 			continue;
719 
720 		pr_debug("CPU %u already managed, adding link\n", j);
721 		managed_policy = cpufreq_cpu_get(cpu);
722 		cpu_dev = get_cpu_device(j);
723 		ret = sysfs_create_link(&cpu_dev->kobj, &policy->kobj,
724 					"cpufreq");
725 		if (ret) {
726 			cpufreq_cpu_put(managed_policy);
727 			return ret;
728 		}
729 	}
730 	return ret;
731 }
732 
733 static int cpufreq_add_dev_interface(unsigned int cpu,
734 				     struct cpufreq_policy *policy,
735 				     struct device *dev)
736 {
737 	struct cpufreq_policy new_policy;
738 	struct freq_attr **drv_attr;
739 	unsigned long flags;
740 	int ret = 0;
741 	unsigned int j;
742 
743 	/* prepare interface data */
744 	ret = kobject_init_and_add(&policy->kobj, &ktype_cpufreq,
745 				   &dev->kobj, "cpufreq");
746 	if (ret)
747 		return ret;
748 
749 	/* set up files for this cpu device */
750 	drv_attr = cpufreq_driver->attr;
751 	while ((drv_attr) && (*drv_attr)) {
752 		ret = sysfs_create_file(&policy->kobj, &((*drv_attr)->attr));
753 		if (ret)
754 			goto err_out_kobj_put;
755 		drv_attr++;
756 	}
757 	if (cpufreq_driver->get) {
758 		ret = sysfs_create_file(&policy->kobj, &cpuinfo_cur_freq.attr);
759 		if (ret)
760 			goto err_out_kobj_put;
761 	}
762 	if (cpufreq_driver->target) {
763 		ret = sysfs_create_file(&policy->kobj, &scaling_cur_freq.attr);
764 		if (ret)
765 			goto err_out_kobj_put;
766 	}
767 	if (cpufreq_driver->bios_limit) {
768 		ret = sysfs_create_file(&policy->kobj, &bios_limit.attr);
769 		if (ret)
770 			goto err_out_kobj_put;
771 	}
772 
773 	write_lock_irqsave(&cpufreq_driver_lock, flags);
774 	for_each_cpu(j, policy->cpus) {
775 		per_cpu(cpufreq_cpu_data, j) = policy;
776 		per_cpu(cpufreq_policy_cpu, j) = policy->cpu;
777 	}
778 	write_unlock_irqrestore(&cpufreq_driver_lock, flags);
779 
780 	ret = cpufreq_add_dev_symlink(cpu, policy);
781 	if (ret)
782 		goto err_out_kobj_put;
783 
784 	memcpy(&new_policy, policy, sizeof(struct cpufreq_policy));
785 	/* assure that the starting sequence is run in __cpufreq_set_policy */
786 	policy->governor = NULL;
787 
788 	/* set default policy */
789 	ret = __cpufreq_set_policy(policy, &new_policy);
790 	policy->user_policy.policy = policy->policy;
791 	policy->user_policy.governor = policy->governor;
792 
793 	if (ret) {
794 		pr_debug("setting policy failed\n");
795 		if (cpufreq_driver->exit)
796 			cpufreq_driver->exit(policy);
797 	}
798 	return ret;
799 
800 err_out_kobj_put:
801 	kobject_put(&policy->kobj);
802 	wait_for_completion(&policy->kobj_unregister);
803 	return ret;
804 }
805 
806 #ifdef CONFIG_HOTPLUG_CPU
807 static int cpufreq_add_policy_cpu(unsigned int cpu, unsigned int sibling,
808 				  struct device *dev)
809 {
810 	struct cpufreq_policy *policy;
811 	int ret = 0, has_target = !!cpufreq_driver->target;
812 	unsigned long flags;
813 
814 	policy = cpufreq_cpu_get(sibling);
815 	WARN_ON(!policy);
816 
817 	if (has_target)
818 		__cpufreq_governor(policy, CPUFREQ_GOV_STOP);
819 
820 	lock_policy_rwsem_write(sibling);
821 
822 	write_lock_irqsave(&cpufreq_driver_lock, flags);
823 
824 	cpumask_set_cpu(cpu, policy->cpus);
825 	per_cpu(cpufreq_policy_cpu, cpu) = policy->cpu;
826 	per_cpu(cpufreq_cpu_data, cpu) = policy;
827 	write_unlock_irqrestore(&cpufreq_driver_lock, flags);
828 
829 	unlock_policy_rwsem_write(sibling);
830 
831 	if (has_target) {
832 		__cpufreq_governor(policy, CPUFREQ_GOV_START);
833 		__cpufreq_governor(policy, CPUFREQ_GOV_LIMITS);
834 	}
835 
836 	ret = sysfs_create_link(&dev->kobj, &policy->kobj, "cpufreq");
837 	if (ret) {
838 		cpufreq_cpu_put(policy);
839 		return ret;
840 	}
841 
842 	return 0;
843 }
844 #endif
845 
846 /**
847  * cpufreq_add_dev - add a CPU device
848  *
849  * Adds the cpufreq interface for a CPU device.
850  *
851  * The Oracle says: try running cpufreq registration/unregistration concurrently
852  * with with cpu hotplugging and all hell will break loose. Tried to clean this
853  * mess up, but more thorough testing is needed. - Mathieu
854  */
855 static int cpufreq_add_dev(struct device *dev, struct subsys_interface *sif)
856 {
857 	unsigned int j, cpu = dev->id;
858 	int ret = -ENOMEM;
859 	struct cpufreq_policy *policy;
860 	unsigned long flags;
861 #ifdef CONFIG_HOTPLUG_CPU
862 	struct cpufreq_governor *gov;
863 	int sibling;
864 #endif
865 
866 	if (cpu_is_offline(cpu))
867 		return 0;
868 
869 	pr_debug("adding CPU %u\n", cpu);
870 
871 #ifdef CONFIG_SMP
872 	/* check whether a different CPU already registered this
873 	 * CPU because it is in the same boat. */
874 	policy = cpufreq_cpu_get(cpu);
875 	if (unlikely(policy)) {
876 		cpufreq_cpu_put(policy);
877 		return 0;
878 	}
879 
880 #ifdef CONFIG_HOTPLUG_CPU
881 	/* Check if this cpu was hot-unplugged earlier and has siblings */
882 	read_lock_irqsave(&cpufreq_driver_lock, flags);
883 	for_each_online_cpu(sibling) {
884 		struct cpufreq_policy *cp = per_cpu(cpufreq_cpu_data, sibling);
885 		if (cp && cpumask_test_cpu(cpu, cp->related_cpus)) {
886 			read_unlock_irqrestore(&cpufreq_driver_lock, flags);
887 			return cpufreq_add_policy_cpu(cpu, sibling, dev);
888 		}
889 	}
890 	read_unlock_irqrestore(&cpufreq_driver_lock, flags);
891 #endif
892 #endif
893 
894 	if (!try_module_get(cpufreq_driver->owner)) {
895 		ret = -EINVAL;
896 		goto module_out;
897 	}
898 
899 	policy = kzalloc(sizeof(struct cpufreq_policy), GFP_KERNEL);
900 	if (!policy)
901 		goto nomem_out;
902 
903 	if (!alloc_cpumask_var(&policy->cpus, GFP_KERNEL))
904 		goto err_free_policy;
905 
906 	if (!zalloc_cpumask_var(&policy->related_cpus, GFP_KERNEL))
907 		goto err_free_cpumask;
908 
909 	policy->cpu = cpu;
910 	policy->governor = CPUFREQ_DEFAULT_GOVERNOR;
911 	cpumask_copy(policy->cpus, cpumask_of(cpu));
912 
913 	/* Initially set CPU itself as the policy_cpu */
914 	per_cpu(cpufreq_policy_cpu, cpu) = cpu;
915 
916 	init_completion(&policy->kobj_unregister);
917 	INIT_WORK(&policy->update, handle_update);
918 
919 	/* call driver. From then on the cpufreq must be able
920 	 * to accept all calls to ->verify and ->setpolicy for this CPU
921 	 */
922 	ret = cpufreq_driver->init(policy);
923 	if (ret) {
924 		pr_debug("initialization failed\n");
925 		goto err_set_policy_cpu;
926 	}
927 
928 	/* related cpus should atleast have policy->cpus */
929 	cpumask_or(policy->related_cpus, policy->related_cpus, policy->cpus);
930 
931 	/*
932 	 * affected cpus must always be the one, which are online. We aren't
933 	 * managing offline cpus here.
934 	 */
935 	cpumask_and(policy->cpus, policy->cpus, cpu_online_mask);
936 
937 	policy->user_policy.min = policy->min;
938 	policy->user_policy.max = policy->max;
939 
940 	blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
941 				     CPUFREQ_START, policy);
942 
943 #ifdef CONFIG_HOTPLUG_CPU
944 	gov = __find_governor(per_cpu(cpufreq_cpu_governor, cpu));
945 	if (gov) {
946 		policy->governor = gov;
947 		pr_debug("Restoring governor %s for cpu %d\n",
948 		       policy->governor->name, cpu);
949 	}
950 #endif
951 
952 	ret = cpufreq_add_dev_interface(cpu, policy, dev);
953 	if (ret)
954 		goto err_out_unregister;
955 
956 	kobject_uevent(&policy->kobj, KOBJ_ADD);
957 	module_put(cpufreq_driver->owner);
958 	pr_debug("initialization complete\n");
959 
960 	return 0;
961 
962 err_out_unregister:
963 	write_lock_irqsave(&cpufreq_driver_lock, flags);
964 	for_each_cpu(j, policy->cpus)
965 		per_cpu(cpufreq_cpu_data, j) = NULL;
966 	write_unlock_irqrestore(&cpufreq_driver_lock, flags);
967 
968 	kobject_put(&policy->kobj);
969 	wait_for_completion(&policy->kobj_unregister);
970 
971 err_set_policy_cpu:
972 	per_cpu(cpufreq_policy_cpu, cpu) = -1;
973 	free_cpumask_var(policy->related_cpus);
974 err_free_cpumask:
975 	free_cpumask_var(policy->cpus);
976 err_free_policy:
977 	kfree(policy);
978 nomem_out:
979 	module_put(cpufreq_driver->owner);
980 module_out:
981 	return ret;
982 }
983 
984 static void update_policy_cpu(struct cpufreq_policy *policy, unsigned int cpu)
985 {
986 	int j;
987 
988 	policy->last_cpu = policy->cpu;
989 	policy->cpu = cpu;
990 
991 	for_each_cpu(j, policy->cpus)
992 		per_cpu(cpufreq_policy_cpu, j) = cpu;
993 
994 #ifdef CONFIG_CPU_FREQ_TABLE
995 	cpufreq_frequency_table_update_policy_cpu(policy);
996 #endif
997 	blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
998 			CPUFREQ_UPDATE_POLICY_CPU, policy);
999 }
1000 
1001 /**
1002  * __cpufreq_remove_dev - remove a CPU device
1003  *
1004  * Removes the cpufreq interface for a CPU device.
1005  * Caller should already have policy_rwsem in write mode for this CPU.
1006  * This routine frees the rwsem before returning.
1007  */
1008 static int __cpufreq_remove_dev(struct device *dev, struct subsys_interface *sif)
1009 {
1010 	unsigned int cpu = dev->id, ret, cpus;
1011 	unsigned long flags;
1012 	struct cpufreq_policy *data;
1013 	struct kobject *kobj;
1014 	struct completion *cmp;
1015 	struct device *cpu_dev;
1016 
1017 	pr_debug("%s: unregistering CPU %u\n", __func__, cpu);
1018 
1019 	write_lock_irqsave(&cpufreq_driver_lock, flags);
1020 
1021 	data = per_cpu(cpufreq_cpu_data, cpu);
1022 	per_cpu(cpufreq_cpu_data, cpu) = NULL;
1023 
1024 	write_unlock_irqrestore(&cpufreq_driver_lock, flags);
1025 
1026 	if (!data) {
1027 		pr_debug("%s: No cpu_data found\n", __func__);
1028 		return -EINVAL;
1029 	}
1030 
1031 	if (cpufreq_driver->target)
1032 		__cpufreq_governor(data, CPUFREQ_GOV_STOP);
1033 
1034 #ifdef CONFIG_HOTPLUG_CPU
1035 	if (!cpufreq_driver->setpolicy)
1036 		strncpy(per_cpu(cpufreq_cpu_governor, cpu),
1037 			data->governor->name, CPUFREQ_NAME_LEN);
1038 #endif
1039 
1040 	WARN_ON(lock_policy_rwsem_write(cpu));
1041 	cpus = cpumask_weight(data->cpus);
1042 
1043 	if (cpus > 1)
1044 		cpumask_clear_cpu(cpu, data->cpus);
1045 	unlock_policy_rwsem_write(cpu);
1046 
1047 	if (cpu != data->cpu) {
1048 		sysfs_remove_link(&dev->kobj, "cpufreq");
1049 	} else if (cpus > 1) {
1050 		/* first sibling now owns the new sysfs dir */
1051 		cpu_dev = get_cpu_device(cpumask_first(data->cpus));
1052 		sysfs_remove_link(&cpu_dev->kobj, "cpufreq");
1053 		ret = kobject_move(&data->kobj, &cpu_dev->kobj);
1054 		if (ret) {
1055 			pr_err("%s: Failed to move kobj: %d", __func__, ret);
1056 
1057 			WARN_ON(lock_policy_rwsem_write(cpu));
1058 			cpumask_set_cpu(cpu, data->cpus);
1059 
1060 			write_lock_irqsave(&cpufreq_driver_lock, flags);
1061 			per_cpu(cpufreq_cpu_data, cpu) = data;
1062 			write_unlock_irqrestore(&cpufreq_driver_lock, flags);
1063 
1064 			unlock_policy_rwsem_write(cpu);
1065 
1066 			ret = sysfs_create_link(&cpu_dev->kobj, &data->kobj,
1067 					"cpufreq");
1068 			return -EINVAL;
1069 		}
1070 
1071 		WARN_ON(lock_policy_rwsem_write(cpu));
1072 		update_policy_cpu(data, cpu_dev->id);
1073 		unlock_policy_rwsem_write(cpu);
1074 		pr_debug("%s: policy Kobject moved to cpu: %d from: %d\n",
1075 				__func__, cpu_dev->id, cpu);
1076 	}
1077 
1078 	if ((cpus == 1) && (cpufreq_driver->target))
1079 		__cpufreq_governor(data, CPUFREQ_GOV_POLICY_EXIT);
1080 
1081 	pr_debug("%s: removing link, cpu: %d\n", __func__, cpu);
1082 	cpufreq_cpu_put(data);
1083 
1084 	/* If cpu is last user of policy, free policy */
1085 	if (cpus == 1) {
1086 		lock_policy_rwsem_read(cpu);
1087 		kobj = &data->kobj;
1088 		cmp = &data->kobj_unregister;
1089 		unlock_policy_rwsem_read(cpu);
1090 		kobject_put(kobj);
1091 
1092 		/* we need to make sure that the underlying kobj is actually
1093 		 * not referenced anymore by anybody before we proceed with
1094 		 * unloading.
1095 		 */
1096 		pr_debug("waiting for dropping of refcount\n");
1097 		wait_for_completion(cmp);
1098 		pr_debug("wait complete\n");
1099 
1100 		if (cpufreq_driver->exit)
1101 			cpufreq_driver->exit(data);
1102 
1103 		free_cpumask_var(data->related_cpus);
1104 		free_cpumask_var(data->cpus);
1105 		kfree(data);
1106 	} else if (cpufreq_driver->target) {
1107 		__cpufreq_governor(data, CPUFREQ_GOV_START);
1108 		__cpufreq_governor(data, CPUFREQ_GOV_LIMITS);
1109 	}
1110 
1111 	per_cpu(cpufreq_policy_cpu, cpu) = -1;
1112 	return 0;
1113 }
1114 
1115 
1116 static int cpufreq_remove_dev(struct device *dev, struct subsys_interface *sif)
1117 {
1118 	unsigned int cpu = dev->id;
1119 	int retval;
1120 
1121 	if (cpu_is_offline(cpu))
1122 		return 0;
1123 
1124 	retval = __cpufreq_remove_dev(dev, sif);
1125 	return retval;
1126 }
1127 
1128 
1129 static void handle_update(struct work_struct *work)
1130 {
1131 	struct cpufreq_policy *policy =
1132 		container_of(work, struct cpufreq_policy, update);
1133 	unsigned int cpu = policy->cpu;
1134 	pr_debug("handle_update for cpu %u called\n", cpu);
1135 	cpufreq_update_policy(cpu);
1136 }
1137 
1138 /**
1139  *	cpufreq_out_of_sync - If actual and saved CPU frequency differs, we're in deep trouble.
1140  *	@cpu: cpu number
1141  *	@old_freq: CPU frequency the kernel thinks the CPU runs at
1142  *	@new_freq: CPU frequency the CPU actually runs at
1143  *
1144  *	We adjust to current frequency first, and need to clean up later.
1145  *	So either call to cpufreq_update_policy() or schedule handle_update()).
1146  */
1147 static void cpufreq_out_of_sync(unsigned int cpu, unsigned int old_freq,
1148 				unsigned int new_freq)
1149 {
1150 	struct cpufreq_policy *policy;
1151 	struct cpufreq_freqs freqs;
1152 	unsigned long flags;
1153 
1154 
1155 	pr_debug("Warning: CPU frequency out of sync: cpufreq and timing "
1156 	       "core thinks of %u, is %u kHz.\n", old_freq, new_freq);
1157 
1158 	freqs.old = old_freq;
1159 	freqs.new = new_freq;
1160 
1161 	read_lock_irqsave(&cpufreq_driver_lock, flags);
1162 	policy = per_cpu(cpufreq_cpu_data, cpu);
1163 	read_unlock_irqrestore(&cpufreq_driver_lock, flags);
1164 
1165 	cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE);
1166 	cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE);
1167 }
1168 
1169 
1170 /**
1171  * cpufreq_quick_get - get the CPU frequency (in kHz) from policy->cur
1172  * @cpu: CPU number
1173  *
1174  * This is the last known freq, without actually getting it from the driver.
1175  * Return value will be same as what is shown in scaling_cur_freq in sysfs.
1176  */
1177 unsigned int cpufreq_quick_get(unsigned int cpu)
1178 {
1179 	struct cpufreq_policy *policy;
1180 	unsigned int ret_freq = 0;
1181 
1182 	if (cpufreq_driver && cpufreq_driver->setpolicy && cpufreq_driver->get)
1183 		return cpufreq_driver->get(cpu);
1184 
1185 	policy = cpufreq_cpu_get(cpu);
1186 	if (policy) {
1187 		ret_freq = policy->cur;
1188 		cpufreq_cpu_put(policy);
1189 	}
1190 
1191 	return ret_freq;
1192 }
1193 EXPORT_SYMBOL(cpufreq_quick_get);
1194 
1195 /**
1196  * cpufreq_quick_get_max - get the max reported CPU frequency for this CPU
1197  * @cpu: CPU number
1198  *
1199  * Just return the max possible frequency for a given CPU.
1200  */
1201 unsigned int cpufreq_quick_get_max(unsigned int cpu)
1202 {
1203 	struct cpufreq_policy *policy = cpufreq_cpu_get(cpu);
1204 	unsigned int ret_freq = 0;
1205 
1206 	if (policy) {
1207 		ret_freq = policy->max;
1208 		cpufreq_cpu_put(policy);
1209 	}
1210 
1211 	return ret_freq;
1212 }
1213 EXPORT_SYMBOL(cpufreq_quick_get_max);
1214 
1215 
1216 static unsigned int __cpufreq_get(unsigned int cpu)
1217 {
1218 	struct cpufreq_policy *policy = per_cpu(cpufreq_cpu_data, cpu);
1219 	unsigned int ret_freq = 0;
1220 
1221 	if (!cpufreq_driver->get)
1222 		return ret_freq;
1223 
1224 	ret_freq = cpufreq_driver->get(cpu);
1225 
1226 	if (ret_freq && policy->cur &&
1227 		!(cpufreq_driver->flags & CPUFREQ_CONST_LOOPS)) {
1228 		/* verify no discrepancy between actual and
1229 					saved value exists */
1230 		if (unlikely(ret_freq != policy->cur)) {
1231 			cpufreq_out_of_sync(cpu, policy->cur, ret_freq);
1232 			schedule_work(&policy->update);
1233 		}
1234 	}
1235 
1236 	return ret_freq;
1237 }
1238 
1239 /**
1240  * cpufreq_get - get the current CPU frequency (in kHz)
1241  * @cpu: CPU number
1242  *
1243  * Get the CPU current (static) CPU frequency
1244  */
1245 unsigned int cpufreq_get(unsigned int cpu)
1246 {
1247 	unsigned int ret_freq = 0;
1248 	struct cpufreq_policy *policy = cpufreq_cpu_get(cpu);
1249 
1250 	if (!policy)
1251 		goto out;
1252 
1253 	if (unlikely(lock_policy_rwsem_read(cpu)))
1254 		goto out_policy;
1255 
1256 	ret_freq = __cpufreq_get(cpu);
1257 
1258 	unlock_policy_rwsem_read(cpu);
1259 
1260 out_policy:
1261 	cpufreq_cpu_put(policy);
1262 out:
1263 	return ret_freq;
1264 }
1265 EXPORT_SYMBOL(cpufreq_get);
1266 
1267 static struct subsys_interface cpufreq_interface = {
1268 	.name		= "cpufreq",
1269 	.subsys		= &cpu_subsys,
1270 	.add_dev	= cpufreq_add_dev,
1271 	.remove_dev	= cpufreq_remove_dev,
1272 };
1273 
1274 
1275 /**
1276  * cpufreq_bp_suspend - Prepare the boot CPU for system suspend.
1277  *
1278  * This function is only executed for the boot processor.  The other CPUs
1279  * have been put offline by means of CPU hotplug.
1280  */
1281 static int cpufreq_bp_suspend(void)
1282 {
1283 	int ret = 0;
1284 
1285 	int cpu = smp_processor_id();
1286 	struct cpufreq_policy *cpu_policy;
1287 
1288 	pr_debug("suspending cpu %u\n", cpu);
1289 
1290 	/* If there's no policy for the boot CPU, we have nothing to do. */
1291 	cpu_policy = cpufreq_cpu_get(cpu);
1292 	if (!cpu_policy)
1293 		return 0;
1294 
1295 	if (cpufreq_driver->suspend) {
1296 		ret = cpufreq_driver->suspend(cpu_policy);
1297 		if (ret)
1298 			printk(KERN_ERR "cpufreq: suspend failed in ->suspend "
1299 					"step on CPU %u\n", cpu_policy->cpu);
1300 	}
1301 
1302 	cpufreq_cpu_put(cpu_policy);
1303 	return ret;
1304 }
1305 
1306 /**
1307  * cpufreq_bp_resume - Restore proper frequency handling of the boot CPU.
1308  *
1309  *	1.) resume CPUfreq hardware support (cpufreq_driver->resume())
1310  *	2.) schedule call cpufreq_update_policy() ASAP as interrupts are
1311  *	    restored. It will verify that the current freq is in sync with
1312  *	    what we believe it to be. This is a bit later than when it
1313  *	    should be, but nonethteless it's better than calling
1314  *	    cpufreq_driver->get() here which might re-enable interrupts...
1315  *
1316  * This function is only executed for the boot CPU.  The other CPUs have not
1317  * been turned on yet.
1318  */
1319 static void cpufreq_bp_resume(void)
1320 {
1321 	int ret = 0;
1322 
1323 	int cpu = smp_processor_id();
1324 	struct cpufreq_policy *cpu_policy;
1325 
1326 	pr_debug("resuming cpu %u\n", cpu);
1327 
1328 	/* If there's no policy for the boot CPU, we have nothing to do. */
1329 	cpu_policy = cpufreq_cpu_get(cpu);
1330 	if (!cpu_policy)
1331 		return;
1332 
1333 	if (cpufreq_driver->resume) {
1334 		ret = cpufreq_driver->resume(cpu_policy);
1335 		if (ret) {
1336 			printk(KERN_ERR "cpufreq: resume failed in ->resume "
1337 					"step on CPU %u\n", cpu_policy->cpu);
1338 			goto fail;
1339 		}
1340 	}
1341 
1342 	schedule_work(&cpu_policy->update);
1343 
1344 fail:
1345 	cpufreq_cpu_put(cpu_policy);
1346 }
1347 
1348 static struct syscore_ops cpufreq_syscore_ops = {
1349 	.suspend	= cpufreq_bp_suspend,
1350 	.resume		= cpufreq_bp_resume,
1351 };
1352 
1353 /**
1354  *	cpufreq_get_current_driver - return current driver's name
1355  *
1356  *	Return the name string of the currently loaded cpufreq driver
1357  *	or NULL, if none.
1358  */
1359 const char *cpufreq_get_current_driver(void)
1360 {
1361 	if (cpufreq_driver)
1362 		return cpufreq_driver->name;
1363 
1364 	return NULL;
1365 }
1366 EXPORT_SYMBOL_GPL(cpufreq_get_current_driver);
1367 
1368 /*********************************************************************
1369  *                     NOTIFIER LISTS INTERFACE                      *
1370  *********************************************************************/
1371 
1372 /**
1373  *	cpufreq_register_notifier - register a driver with cpufreq
1374  *	@nb: notifier function to register
1375  *      @list: CPUFREQ_TRANSITION_NOTIFIER or CPUFREQ_POLICY_NOTIFIER
1376  *
1377  *	Add a driver to one of two lists: either a list of drivers that
1378  *      are notified about clock rate changes (once before and once after
1379  *      the transition), or a list of drivers that are notified about
1380  *      changes in cpufreq policy.
1381  *
1382  *	This function may sleep, and has the same return conditions as
1383  *	blocking_notifier_chain_register.
1384  */
1385 int cpufreq_register_notifier(struct notifier_block *nb, unsigned int list)
1386 {
1387 	int ret;
1388 
1389 	if (cpufreq_disabled())
1390 		return -EINVAL;
1391 
1392 	WARN_ON(!init_cpufreq_transition_notifier_list_called);
1393 
1394 	switch (list) {
1395 	case CPUFREQ_TRANSITION_NOTIFIER:
1396 		ret = srcu_notifier_chain_register(
1397 				&cpufreq_transition_notifier_list, nb);
1398 		break;
1399 	case CPUFREQ_POLICY_NOTIFIER:
1400 		ret = blocking_notifier_chain_register(
1401 				&cpufreq_policy_notifier_list, nb);
1402 		break;
1403 	default:
1404 		ret = -EINVAL;
1405 	}
1406 
1407 	return ret;
1408 }
1409 EXPORT_SYMBOL(cpufreq_register_notifier);
1410 
1411 
1412 /**
1413  *	cpufreq_unregister_notifier - unregister a driver with cpufreq
1414  *	@nb: notifier block to be unregistered
1415  *      @list: CPUFREQ_TRANSITION_NOTIFIER or CPUFREQ_POLICY_NOTIFIER
1416  *
1417  *	Remove a driver from the CPU frequency notifier list.
1418  *
1419  *	This function may sleep, and has the same return conditions as
1420  *	blocking_notifier_chain_unregister.
1421  */
1422 int cpufreq_unregister_notifier(struct notifier_block *nb, unsigned int list)
1423 {
1424 	int ret;
1425 
1426 	if (cpufreq_disabled())
1427 		return -EINVAL;
1428 
1429 	switch (list) {
1430 	case CPUFREQ_TRANSITION_NOTIFIER:
1431 		ret = srcu_notifier_chain_unregister(
1432 				&cpufreq_transition_notifier_list, nb);
1433 		break;
1434 	case CPUFREQ_POLICY_NOTIFIER:
1435 		ret = blocking_notifier_chain_unregister(
1436 				&cpufreq_policy_notifier_list, nb);
1437 		break;
1438 	default:
1439 		ret = -EINVAL;
1440 	}
1441 
1442 	return ret;
1443 }
1444 EXPORT_SYMBOL(cpufreq_unregister_notifier);
1445 
1446 
1447 /*********************************************************************
1448  *                              GOVERNORS                            *
1449  *********************************************************************/
1450 
1451 
1452 int __cpufreq_driver_target(struct cpufreq_policy *policy,
1453 			    unsigned int target_freq,
1454 			    unsigned int relation)
1455 {
1456 	int retval = -EINVAL;
1457 	unsigned int old_target_freq = target_freq;
1458 
1459 	if (cpufreq_disabled())
1460 		return -ENODEV;
1461 
1462 	/* Make sure that target_freq is within supported range */
1463 	if (target_freq > policy->max)
1464 		target_freq = policy->max;
1465 	if (target_freq < policy->min)
1466 		target_freq = policy->min;
1467 
1468 	pr_debug("target for CPU %u: %u kHz, relation %u, requested %u kHz\n",
1469 			policy->cpu, target_freq, relation, old_target_freq);
1470 
1471 	if (target_freq == policy->cur)
1472 		return 0;
1473 
1474 	if (cpufreq_driver->target)
1475 		retval = cpufreq_driver->target(policy, target_freq, relation);
1476 
1477 	return retval;
1478 }
1479 EXPORT_SYMBOL_GPL(__cpufreq_driver_target);
1480 
1481 int cpufreq_driver_target(struct cpufreq_policy *policy,
1482 			  unsigned int target_freq,
1483 			  unsigned int relation)
1484 {
1485 	int ret = -EINVAL;
1486 
1487 	policy = cpufreq_cpu_get(policy->cpu);
1488 	if (!policy)
1489 		goto no_policy;
1490 
1491 	if (unlikely(lock_policy_rwsem_write(policy->cpu)))
1492 		goto fail;
1493 
1494 	ret = __cpufreq_driver_target(policy, target_freq, relation);
1495 
1496 	unlock_policy_rwsem_write(policy->cpu);
1497 
1498 fail:
1499 	cpufreq_cpu_put(policy);
1500 no_policy:
1501 	return ret;
1502 }
1503 EXPORT_SYMBOL_GPL(cpufreq_driver_target);
1504 
1505 int __cpufreq_driver_getavg(struct cpufreq_policy *policy, unsigned int cpu)
1506 {
1507 	int ret = 0;
1508 
1509 	if (cpufreq_disabled())
1510 		return ret;
1511 
1512 	if (!cpufreq_driver->getavg)
1513 		return 0;
1514 
1515 	policy = cpufreq_cpu_get(policy->cpu);
1516 	if (!policy)
1517 		return -EINVAL;
1518 
1519 	ret = cpufreq_driver->getavg(policy, cpu);
1520 
1521 	cpufreq_cpu_put(policy);
1522 	return ret;
1523 }
1524 EXPORT_SYMBOL_GPL(__cpufreq_driver_getavg);
1525 
1526 /*
1527  * when "event" is CPUFREQ_GOV_LIMITS
1528  */
1529 
1530 static int __cpufreq_governor(struct cpufreq_policy *policy,
1531 					unsigned int event)
1532 {
1533 	int ret;
1534 
1535 	/* Only must be defined when default governor is known to have latency
1536 	   restrictions, like e.g. conservative or ondemand.
1537 	   That this is the case is already ensured in Kconfig
1538 	*/
1539 #ifdef CONFIG_CPU_FREQ_GOV_PERFORMANCE
1540 	struct cpufreq_governor *gov = &cpufreq_gov_performance;
1541 #else
1542 	struct cpufreq_governor *gov = NULL;
1543 #endif
1544 
1545 	if (policy->governor->max_transition_latency &&
1546 	    policy->cpuinfo.transition_latency >
1547 	    policy->governor->max_transition_latency) {
1548 		if (!gov)
1549 			return -EINVAL;
1550 		else {
1551 			printk(KERN_WARNING "%s governor failed, too long"
1552 			       " transition latency of HW, fallback"
1553 			       " to %s governor\n",
1554 			       policy->governor->name,
1555 			       gov->name);
1556 			policy->governor = gov;
1557 		}
1558 	}
1559 
1560 	if (!try_module_get(policy->governor->owner))
1561 		return -EINVAL;
1562 
1563 	pr_debug("__cpufreq_governor for CPU %u, event %u\n",
1564 						policy->cpu, event);
1565 	ret = policy->governor->governor(policy, event);
1566 
1567 	if (!ret) {
1568 		if (event == CPUFREQ_GOV_POLICY_INIT)
1569 			policy->governor->initialized++;
1570 		else if (event == CPUFREQ_GOV_POLICY_EXIT)
1571 			policy->governor->initialized--;
1572 	}
1573 
1574 	/* we keep one module reference alive for
1575 			each CPU governed by this CPU */
1576 	if ((event != CPUFREQ_GOV_START) || ret)
1577 		module_put(policy->governor->owner);
1578 	if ((event == CPUFREQ_GOV_STOP) && !ret)
1579 		module_put(policy->governor->owner);
1580 
1581 	return ret;
1582 }
1583 
1584 
1585 int cpufreq_register_governor(struct cpufreq_governor *governor)
1586 {
1587 	int err;
1588 
1589 	if (!governor)
1590 		return -EINVAL;
1591 
1592 	if (cpufreq_disabled())
1593 		return -ENODEV;
1594 
1595 	mutex_lock(&cpufreq_governor_mutex);
1596 
1597 	governor->initialized = 0;
1598 	err = -EBUSY;
1599 	if (__find_governor(governor->name) == NULL) {
1600 		err = 0;
1601 		list_add(&governor->governor_list, &cpufreq_governor_list);
1602 	}
1603 
1604 	mutex_unlock(&cpufreq_governor_mutex);
1605 	return err;
1606 }
1607 EXPORT_SYMBOL_GPL(cpufreq_register_governor);
1608 
1609 
1610 void cpufreq_unregister_governor(struct cpufreq_governor *governor)
1611 {
1612 #ifdef CONFIG_HOTPLUG_CPU
1613 	int cpu;
1614 #endif
1615 
1616 	if (!governor)
1617 		return;
1618 
1619 	if (cpufreq_disabled())
1620 		return;
1621 
1622 #ifdef CONFIG_HOTPLUG_CPU
1623 	for_each_present_cpu(cpu) {
1624 		if (cpu_online(cpu))
1625 			continue;
1626 		if (!strcmp(per_cpu(cpufreq_cpu_governor, cpu), governor->name))
1627 			strcpy(per_cpu(cpufreq_cpu_governor, cpu), "\0");
1628 	}
1629 #endif
1630 
1631 	mutex_lock(&cpufreq_governor_mutex);
1632 	list_del(&governor->governor_list);
1633 	mutex_unlock(&cpufreq_governor_mutex);
1634 	return;
1635 }
1636 EXPORT_SYMBOL_GPL(cpufreq_unregister_governor);
1637 
1638 
1639 
1640 /*********************************************************************
1641  *                          POLICY INTERFACE                         *
1642  *********************************************************************/
1643 
1644 /**
1645  * cpufreq_get_policy - get the current cpufreq_policy
1646  * @policy: struct cpufreq_policy into which the current cpufreq_policy
1647  *	is written
1648  *
1649  * Reads the current cpufreq policy.
1650  */
1651 int cpufreq_get_policy(struct cpufreq_policy *policy, unsigned int cpu)
1652 {
1653 	struct cpufreq_policy *cpu_policy;
1654 	if (!policy)
1655 		return -EINVAL;
1656 
1657 	cpu_policy = cpufreq_cpu_get(cpu);
1658 	if (!cpu_policy)
1659 		return -EINVAL;
1660 
1661 	memcpy(policy, cpu_policy, sizeof(struct cpufreq_policy));
1662 
1663 	cpufreq_cpu_put(cpu_policy);
1664 	return 0;
1665 }
1666 EXPORT_SYMBOL(cpufreq_get_policy);
1667 
1668 
1669 /*
1670  * data   : current policy.
1671  * policy : policy to be set.
1672  */
1673 static int __cpufreq_set_policy(struct cpufreq_policy *data,
1674 				struct cpufreq_policy *policy)
1675 {
1676 	int ret = 0, failed = 1;
1677 
1678 	pr_debug("setting new policy for CPU %u: %u - %u kHz\n", policy->cpu,
1679 		policy->min, policy->max);
1680 
1681 	memcpy(&policy->cpuinfo, &data->cpuinfo,
1682 				sizeof(struct cpufreq_cpuinfo));
1683 
1684 	if (policy->min > data->max || policy->max < data->min) {
1685 		ret = -EINVAL;
1686 		goto error_out;
1687 	}
1688 
1689 	/* verify the cpu speed can be set within this limit */
1690 	ret = cpufreq_driver->verify(policy);
1691 	if (ret)
1692 		goto error_out;
1693 
1694 	/* adjust if necessary - all reasons */
1695 	blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
1696 			CPUFREQ_ADJUST, policy);
1697 
1698 	/* adjust if necessary - hardware incompatibility*/
1699 	blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
1700 			CPUFREQ_INCOMPATIBLE, policy);
1701 
1702 	/* verify the cpu speed can be set within this limit,
1703 	   which might be different to the first one */
1704 	ret = cpufreq_driver->verify(policy);
1705 	if (ret)
1706 		goto error_out;
1707 
1708 	/* notification of the new policy */
1709 	blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
1710 			CPUFREQ_NOTIFY, policy);
1711 
1712 	data->min = policy->min;
1713 	data->max = policy->max;
1714 
1715 	pr_debug("new min and max freqs are %u - %u kHz\n",
1716 					data->min, data->max);
1717 
1718 	if (cpufreq_driver->setpolicy) {
1719 		data->policy = policy->policy;
1720 		pr_debug("setting range\n");
1721 		ret = cpufreq_driver->setpolicy(policy);
1722 	} else {
1723 		if (policy->governor != data->governor) {
1724 			/* save old, working values */
1725 			struct cpufreq_governor *old_gov = data->governor;
1726 
1727 			pr_debug("governor switch\n");
1728 
1729 			/* end old governor */
1730 			if (data->governor) {
1731 				__cpufreq_governor(data, CPUFREQ_GOV_STOP);
1732 				unlock_policy_rwsem_write(policy->cpu);
1733 				__cpufreq_governor(data,
1734 						CPUFREQ_GOV_POLICY_EXIT);
1735 				lock_policy_rwsem_write(policy->cpu);
1736 			}
1737 
1738 			/* start new governor */
1739 			data->governor = policy->governor;
1740 			if (!__cpufreq_governor(data, CPUFREQ_GOV_POLICY_INIT)) {
1741 				if (!__cpufreq_governor(data, CPUFREQ_GOV_START)) {
1742 					failed = 0;
1743 				} else {
1744 					unlock_policy_rwsem_write(policy->cpu);
1745 					__cpufreq_governor(data,
1746 							CPUFREQ_GOV_POLICY_EXIT);
1747 					lock_policy_rwsem_write(policy->cpu);
1748 				}
1749 			}
1750 
1751 			if (failed) {
1752 				/* new governor failed, so re-start old one */
1753 				pr_debug("starting governor %s failed\n",
1754 							data->governor->name);
1755 				if (old_gov) {
1756 					data->governor = old_gov;
1757 					__cpufreq_governor(data,
1758 							CPUFREQ_GOV_POLICY_INIT);
1759 					__cpufreq_governor(data,
1760 							   CPUFREQ_GOV_START);
1761 				}
1762 				ret = -EINVAL;
1763 				goto error_out;
1764 			}
1765 			/* might be a policy change, too, so fall through */
1766 		}
1767 		pr_debug("governor: change or update limits\n");
1768 		__cpufreq_governor(data, CPUFREQ_GOV_LIMITS);
1769 	}
1770 
1771 error_out:
1772 	return ret;
1773 }
1774 
1775 /**
1776  *	cpufreq_update_policy - re-evaluate an existing cpufreq policy
1777  *	@cpu: CPU which shall be re-evaluated
1778  *
1779  *	Useful for policy notifiers which have different necessities
1780  *	at different times.
1781  */
1782 int cpufreq_update_policy(unsigned int cpu)
1783 {
1784 	struct cpufreq_policy *data = cpufreq_cpu_get(cpu);
1785 	struct cpufreq_policy policy;
1786 	int ret;
1787 
1788 	if (!data) {
1789 		ret = -ENODEV;
1790 		goto no_policy;
1791 	}
1792 
1793 	if (unlikely(lock_policy_rwsem_write(cpu))) {
1794 		ret = -EINVAL;
1795 		goto fail;
1796 	}
1797 
1798 	pr_debug("updating policy for CPU %u\n", cpu);
1799 	memcpy(&policy, data, sizeof(struct cpufreq_policy));
1800 	policy.min = data->user_policy.min;
1801 	policy.max = data->user_policy.max;
1802 	policy.policy = data->user_policy.policy;
1803 	policy.governor = data->user_policy.governor;
1804 
1805 	/* BIOS might change freq behind our back
1806 	  -> ask driver for current freq and notify governors about a change */
1807 	if (cpufreq_driver->get) {
1808 		policy.cur = cpufreq_driver->get(cpu);
1809 		if (!data->cur) {
1810 			pr_debug("Driver did not initialize current freq");
1811 			data->cur = policy.cur;
1812 		} else {
1813 			if (data->cur != policy.cur && cpufreq_driver->target)
1814 				cpufreq_out_of_sync(cpu, data->cur,
1815 								policy.cur);
1816 		}
1817 	}
1818 
1819 	ret = __cpufreq_set_policy(data, &policy);
1820 
1821 	unlock_policy_rwsem_write(cpu);
1822 
1823 fail:
1824 	cpufreq_cpu_put(data);
1825 no_policy:
1826 	return ret;
1827 }
1828 EXPORT_SYMBOL(cpufreq_update_policy);
1829 
1830 static int __cpuinit cpufreq_cpu_callback(struct notifier_block *nfb,
1831 					unsigned long action, void *hcpu)
1832 {
1833 	unsigned int cpu = (unsigned long)hcpu;
1834 	struct device *dev;
1835 
1836 	dev = get_cpu_device(cpu);
1837 	if (dev) {
1838 		switch (action) {
1839 		case CPU_ONLINE:
1840 			cpufreq_add_dev(dev, NULL);
1841 			break;
1842 		case CPU_DOWN_PREPARE:
1843 		case CPU_UP_CANCELED_FROZEN:
1844 			__cpufreq_remove_dev(dev, NULL);
1845 			break;
1846 		case CPU_DOWN_FAILED:
1847 			cpufreq_add_dev(dev, NULL);
1848 			break;
1849 		}
1850 	}
1851 	return NOTIFY_OK;
1852 }
1853 
1854 static struct notifier_block __refdata cpufreq_cpu_notifier = {
1855     .notifier_call = cpufreq_cpu_callback,
1856 };
1857 
1858 /*********************************************************************
1859  *               REGISTER / UNREGISTER CPUFREQ DRIVER                *
1860  *********************************************************************/
1861 
1862 /**
1863  * cpufreq_register_driver - register a CPU Frequency driver
1864  * @driver_data: A struct cpufreq_driver containing the values#
1865  * submitted by the CPU Frequency driver.
1866  *
1867  *   Registers a CPU Frequency driver to this core code. This code
1868  * returns zero on success, -EBUSY when another driver got here first
1869  * (and isn't unregistered in the meantime).
1870  *
1871  */
1872 int cpufreq_register_driver(struct cpufreq_driver *driver_data)
1873 {
1874 	unsigned long flags;
1875 	int ret;
1876 
1877 	if (cpufreq_disabled())
1878 		return -ENODEV;
1879 
1880 	if (!driver_data || !driver_data->verify || !driver_data->init ||
1881 	    ((!driver_data->setpolicy) && (!driver_data->target)))
1882 		return -EINVAL;
1883 
1884 	pr_debug("trying to register driver %s\n", driver_data->name);
1885 
1886 	if (driver_data->setpolicy)
1887 		driver_data->flags |= CPUFREQ_CONST_LOOPS;
1888 
1889 	write_lock_irqsave(&cpufreq_driver_lock, flags);
1890 	if (cpufreq_driver) {
1891 		write_unlock_irqrestore(&cpufreq_driver_lock, flags);
1892 		return -EBUSY;
1893 	}
1894 	cpufreq_driver = driver_data;
1895 	write_unlock_irqrestore(&cpufreq_driver_lock, flags);
1896 
1897 	ret = subsys_interface_register(&cpufreq_interface);
1898 	if (ret)
1899 		goto err_null_driver;
1900 
1901 	if (!(cpufreq_driver->flags & CPUFREQ_STICKY)) {
1902 		int i;
1903 		ret = -ENODEV;
1904 
1905 		/* check for at least one working CPU */
1906 		for (i = 0; i < nr_cpu_ids; i++)
1907 			if (cpu_possible(i) && per_cpu(cpufreq_cpu_data, i)) {
1908 				ret = 0;
1909 				break;
1910 			}
1911 
1912 		/* if all ->init() calls failed, unregister */
1913 		if (ret) {
1914 			pr_debug("no CPU initialized for driver %s\n",
1915 							driver_data->name);
1916 			goto err_if_unreg;
1917 		}
1918 	}
1919 
1920 	register_hotcpu_notifier(&cpufreq_cpu_notifier);
1921 	pr_debug("driver %s up and running\n", driver_data->name);
1922 
1923 	return 0;
1924 err_if_unreg:
1925 	subsys_interface_unregister(&cpufreq_interface);
1926 err_null_driver:
1927 	write_lock_irqsave(&cpufreq_driver_lock, flags);
1928 	cpufreq_driver = NULL;
1929 	write_unlock_irqrestore(&cpufreq_driver_lock, flags);
1930 	return ret;
1931 }
1932 EXPORT_SYMBOL_GPL(cpufreq_register_driver);
1933 
1934 
1935 /**
1936  * cpufreq_unregister_driver - unregister the current CPUFreq driver
1937  *
1938  *    Unregister the current CPUFreq driver. Only call this if you have
1939  * the right to do so, i.e. if you have succeeded in initialising before!
1940  * Returns zero if successful, and -EINVAL if the cpufreq_driver is
1941  * currently not initialised.
1942  */
1943 int cpufreq_unregister_driver(struct cpufreq_driver *driver)
1944 {
1945 	unsigned long flags;
1946 
1947 	if (!cpufreq_driver || (driver != cpufreq_driver))
1948 		return -EINVAL;
1949 
1950 	pr_debug("unregistering driver %s\n", driver->name);
1951 
1952 	subsys_interface_unregister(&cpufreq_interface);
1953 	unregister_hotcpu_notifier(&cpufreq_cpu_notifier);
1954 
1955 	write_lock_irqsave(&cpufreq_driver_lock, flags);
1956 	cpufreq_driver = NULL;
1957 	write_unlock_irqrestore(&cpufreq_driver_lock, flags);
1958 
1959 	return 0;
1960 }
1961 EXPORT_SYMBOL_GPL(cpufreq_unregister_driver);
1962 
1963 static int __init cpufreq_core_init(void)
1964 {
1965 	int cpu;
1966 
1967 	if (cpufreq_disabled())
1968 		return -ENODEV;
1969 
1970 	for_each_possible_cpu(cpu) {
1971 		per_cpu(cpufreq_policy_cpu, cpu) = -1;
1972 		init_rwsem(&per_cpu(cpu_policy_rwsem, cpu));
1973 	}
1974 
1975 	cpufreq_global_kobject = kobject_create_and_add("cpufreq", &cpu_subsys.dev_root->kobj);
1976 	BUG_ON(!cpufreq_global_kobject);
1977 	register_syscore_ops(&cpufreq_syscore_ops);
1978 
1979 	return 0;
1980 }
1981 core_initcall(cpufreq_core_init);
1982