xref: /openbmc/linux/drivers/cpufreq/cpufreq.c (revision 7dd65feb)
1 /*
2  *  linux/drivers/cpufreq/cpufreq.c
3  *
4  *  Copyright (C) 2001 Russell King
5  *            (C) 2002 - 2003 Dominik Brodowski <linux@brodo.de>
6  *
7  *  Oct 2005 - Ashok Raj <ashok.raj@intel.com>
8  *	Added handling for CPU hotplug
9  *  Feb 2006 - Jacob Shin <jacob.shin@amd.com>
10  *	Fix handling for CPU hotplug -- affected CPUs
11  *
12  * This program is free software; you can redistribute it and/or modify
13  * it under the terms of the GNU General Public License version 2 as
14  * published by the Free Software Foundation.
15  *
16  */
17 
18 #include <linux/kernel.h>
19 #include <linux/module.h>
20 #include <linux/init.h>
21 #include <linux/notifier.h>
22 #include <linux/cpufreq.h>
23 #include <linux/delay.h>
24 #include <linux/interrupt.h>
25 #include <linux/spinlock.h>
26 #include <linux/device.h>
27 #include <linux/slab.h>
28 #include <linux/cpu.h>
29 #include <linux/completion.h>
30 #include <linux/mutex.h>
31 
32 #define dprintk(msg...) cpufreq_debug_printk(CPUFREQ_DEBUG_CORE, \
33 						"cpufreq-core", msg)
34 
35 /**
36  * The "cpufreq driver" - the arch- or hardware-dependent low
37  * level driver of CPUFreq support, and its spinlock. This lock
38  * also protects the cpufreq_cpu_data array.
39  */
40 static struct cpufreq_driver *cpufreq_driver;
41 static DEFINE_PER_CPU(struct cpufreq_policy *, cpufreq_cpu_data);
42 #ifdef CONFIG_HOTPLUG_CPU
43 /* This one keeps track of the previously set governor of a removed CPU */
44 static DEFINE_PER_CPU(char[CPUFREQ_NAME_LEN], cpufreq_cpu_governor);
45 #endif
46 static DEFINE_SPINLOCK(cpufreq_driver_lock);
47 
48 /*
49  * cpu_policy_rwsem is a per CPU reader-writer semaphore designed to cure
50  * all cpufreq/hotplug/workqueue/etc related lock issues.
51  *
52  * The rules for this semaphore:
53  * - Any routine that wants to read from the policy structure will
54  *   do a down_read on this semaphore.
55  * - Any routine that will write to the policy structure and/or may take away
56  *   the policy altogether (eg. CPU hotplug), will hold this lock in write
57  *   mode before doing so.
58  *
59  * Additional rules:
60  * - All holders of the lock should check to make sure that the CPU they
61  *   are concerned with are online after they get the lock.
62  * - Governor routines that can be called in cpufreq hotplug path should not
63  *   take this sem as top level hotplug notifier handler takes this.
64  * - Lock should not be held across
65  *     __cpufreq_governor(data, CPUFREQ_GOV_STOP);
66  */
67 static DEFINE_PER_CPU(int, cpufreq_policy_cpu);
68 static DEFINE_PER_CPU(struct rw_semaphore, cpu_policy_rwsem);
69 
70 #define lock_policy_rwsem(mode, cpu)					\
71 int lock_policy_rwsem_##mode						\
72 (int cpu)								\
73 {									\
74 	int policy_cpu = per_cpu(cpufreq_policy_cpu, cpu);		\
75 	BUG_ON(policy_cpu == -1);					\
76 	down_##mode(&per_cpu(cpu_policy_rwsem, policy_cpu));		\
77 	if (unlikely(!cpu_online(cpu))) {				\
78 		up_##mode(&per_cpu(cpu_policy_rwsem, policy_cpu));	\
79 		return -1;						\
80 	}								\
81 									\
82 	return 0;							\
83 }
84 
85 lock_policy_rwsem(read, cpu);
86 EXPORT_SYMBOL_GPL(lock_policy_rwsem_read);
87 
88 lock_policy_rwsem(write, cpu);
89 EXPORT_SYMBOL_GPL(lock_policy_rwsem_write);
90 
91 void unlock_policy_rwsem_read(int cpu)
92 {
93 	int policy_cpu = per_cpu(cpufreq_policy_cpu, cpu);
94 	BUG_ON(policy_cpu == -1);
95 	up_read(&per_cpu(cpu_policy_rwsem, policy_cpu));
96 }
97 EXPORT_SYMBOL_GPL(unlock_policy_rwsem_read);
98 
99 void unlock_policy_rwsem_write(int cpu)
100 {
101 	int policy_cpu = per_cpu(cpufreq_policy_cpu, cpu);
102 	BUG_ON(policy_cpu == -1);
103 	up_write(&per_cpu(cpu_policy_rwsem, policy_cpu));
104 }
105 EXPORT_SYMBOL_GPL(unlock_policy_rwsem_write);
106 
107 
108 /* internal prototypes */
109 static int __cpufreq_governor(struct cpufreq_policy *policy,
110 		unsigned int event);
111 static unsigned int __cpufreq_get(unsigned int cpu);
112 static void handle_update(struct work_struct *work);
113 
114 /**
115  * Two notifier lists: the "policy" list is involved in the
116  * validation process for a new CPU frequency policy; the
117  * "transition" list for kernel code that needs to handle
118  * changes to devices when the CPU clock speed changes.
119  * The mutex locks both lists.
120  */
121 static BLOCKING_NOTIFIER_HEAD(cpufreq_policy_notifier_list);
122 static struct srcu_notifier_head cpufreq_transition_notifier_list;
123 
124 static bool init_cpufreq_transition_notifier_list_called;
125 static int __init init_cpufreq_transition_notifier_list(void)
126 {
127 	srcu_init_notifier_head(&cpufreq_transition_notifier_list);
128 	init_cpufreq_transition_notifier_list_called = true;
129 	return 0;
130 }
131 pure_initcall(init_cpufreq_transition_notifier_list);
132 
133 static LIST_HEAD(cpufreq_governor_list);
134 static DEFINE_MUTEX(cpufreq_governor_mutex);
135 
136 struct cpufreq_policy *cpufreq_cpu_get(unsigned int cpu)
137 {
138 	struct cpufreq_policy *data;
139 	unsigned long flags;
140 
141 	if (cpu >= nr_cpu_ids)
142 		goto err_out;
143 
144 	/* get the cpufreq driver */
145 	spin_lock_irqsave(&cpufreq_driver_lock, flags);
146 
147 	if (!cpufreq_driver)
148 		goto err_out_unlock;
149 
150 	if (!try_module_get(cpufreq_driver->owner))
151 		goto err_out_unlock;
152 
153 
154 	/* get the CPU */
155 	data = per_cpu(cpufreq_cpu_data, cpu);
156 
157 	if (!data)
158 		goto err_out_put_module;
159 
160 	if (!kobject_get(&data->kobj))
161 		goto err_out_put_module;
162 
163 	spin_unlock_irqrestore(&cpufreq_driver_lock, flags);
164 	return data;
165 
166 err_out_put_module:
167 	module_put(cpufreq_driver->owner);
168 err_out_unlock:
169 	spin_unlock_irqrestore(&cpufreq_driver_lock, flags);
170 err_out:
171 	return NULL;
172 }
173 EXPORT_SYMBOL_GPL(cpufreq_cpu_get);
174 
175 
176 void cpufreq_cpu_put(struct cpufreq_policy *data)
177 {
178 	kobject_put(&data->kobj);
179 	module_put(cpufreq_driver->owner);
180 }
181 EXPORT_SYMBOL_GPL(cpufreq_cpu_put);
182 
183 
184 /*********************************************************************
185  *                     UNIFIED DEBUG HELPERS                         *
186  *********************************************************************/
187 #ifdef CONFIG_CPU_FREQ_DEBUG
188 
189 /* what part(s) of the CPUfreq subsystem are debugged? */
190 static unsigned int debug;
191 
192 /* is the debug output ratelimit'ed using printk_ratelimit? User can
193  * set or modify this value.
194  */
195 static unsigned int debug_ratelimit = 1;
196 
197 /* is the printk_ratelimit'ing enabled? It's enabled after a successful
198  * loading of a cpufreq driver, temporarily disabled when a new policy
199  * is set, and disabled upon cpufreq driver removal
200  */
201 static unsigned int disable_ratelimit = 1;
202 static DEFINE_SPINLOCK(disable_ratelimit_lock);
203 
204 static void cpufreq_debug_enable_ratelimit(void)
205 {
206 	unsigned long flags;
207 
208 	spin_lock_irqsave(&disable_ratelimit_lock, flags);
209 	if (disable_ratelimit)
210 		disable_ratelimit--;
211 	spin_unlock_irqrestore(&disable_ratelimit_lock, flags);
212 }
213 
214 static void cpufreq_debug_disable_ratelimit(void)
215 {
216 	unsigned long flags;
217 
218 	spin_lock_irqsave(&disable_ratelimit_lock, flags);
219 	disable_ratelimit++;
220 	spin_unlock_irqrestore(&disable_ratelimit_lock, flags);
221 }
222 
223 void cpufreq_debug_printk(unsigned int type, const char *prefix,
224 			const char *fmt, ...)
225 {
226 	char s[256];
227 	va_list args;
228 	unsigned int len;
229 	unsigned long flags;
230 
231 	WARN_ON(!prefix);
232 	if (type & debug) {
233 		spin_lock_irqsave(&disable_ratelimit_lock, flags);
234 		if (!disable_ratelimit && debug_ratelimit
235 					&& !printk_ratelimit()) {
236 			spin_unlock_irqrestore(&disable_ratelimit_lock, flags);
237 			return;
238 		}
239 		spin_unlock_irqrestore(&disable_ratelimit_lock, flags);
240 
241 		len = snprintf(s, 256, KERN_DEBUG "%s: ", prefix);
242 
243 		va_start(args, fmt);
244 		len += vsnprintf(&s[len], (256 - len), fmt, args);
245 		va_end(args);
246 
247 		printk(s);
248 
249 		WARN_ON(len < 5);
250 	}
251 }
252 EXPORT_SYMBOL(cpufreq_debug_printk);
253 
254 
255 module_param(debug, uint, 0644);
256 MODULE_PARM_DESC(debug, "CPUfreq debugging: add 1 to debug core,"
257 			" 2 to debug drivers, and 4 to debug governors.");
258 
259 module_param(debug_ratelimit, uint, 0644);
260 MODULE_PARM_DESC(debug_ratelimit, "CPUfreq debugging:"
261 					" set to 0 to disable ratelimiting.");
262 
263 #else /* !CONFIG_CPU_FREQ_DEBUG */
264 
265 static inline void cpufreq_debug_enable_ratelimit(void) { return; }
266 static inline void cpufreq_debug_disable_ratelimit(void) { return; }
267 
268 #endif /* CONFIG_CPU_FREQ_DEBUG */
269 
270 
271 /*********************************************************************
272  *            EXTERNALLY AFFECTING FREQUENCY CHANGES                 *
273  *********************************************************************/
274 
275 /**
276  * adjust_jiffies - adjust the system "loops_per_jiffy"
277  *
278  * This function alters the system "loops_per_jiffy" for the clock
279  * speed change. Note that loops_per_jiffy cannot be updated on SMP
280  * systems as each CPU might be scaled differently. So, use the arch
281  * per-CPU loops_per_jiffy value wherever possible.
282  */
283 #ifndef CONFIG_SMP
284 static unsigned long l_p_j_ref;
285 static unsigned int  l_p_j_ref_freq;
286 
287 static void adjust_jiffies(unsigned long val, struct cpufreq_freqs *ci)
288 {
289 	if (ci->flags & CPUFREQ_CONST_LOOPS)
290 		return;
291 
292 	if (!l_p_j_ref_freq) {
293 		l_p_j_ref = loops_per_jiffy;
294 		l_p_j_ref_freq = ci->old;
295 		dprintk("saving %lu as reference value for loops_per_jiffy; "
296 			"freq is %u kHz\n", l_p_j_ref, l_p_j_ref_freq);
297 	}
298 	if ((val == CPUFREQ_PRECHANGE  && ci->old < ci->new) ||
299 	    (val == CPUFREQ_POSTCHANGE && ci->old > ci->new) ||
300 	    (val == CPUFREQ_RESUMECHANGE || val == CPUFREQ_SUSPENDCHANGE)) {
301 		loops_per_jiffy = cpufreq_scale(l_p_j_ref, l_p_j_ref_freq,
302 								ci->new);
303 		dprintk("scaling loops_per_jiffy to %lu "
304 			"for frequency %u kHz\n", loops_per_jiffy, ci->new);
305 	}
306 }
307 #else
308 static inline void adjust_jiffies(unsigned long val, struct cpufreq_freqs *ci)
309 {
310 	return;
311 }
312 #endif
313 
314 
315 /**
316  * cpufreq_notify_transition - call notifier chain and adjust_jiffies
317  * on frequency transition.
318  *
319  * This function calls the transition notifiers and the "adjust_jiffies"
320  * function. It is called twice on all CPU frequency changes that have
321  * external effects.
322  */
323 void cpufreq_notify_transition(struct cpufreq_freqs *freqs, unsigned int state)
324 {
325 	struct cpufreq_policy *policy;
326 
327 	BUG_ON(irqs_disabled());
328 
329 	freqs->flags = cpufreq_driver->flags;
330 	dprintk("notification %u of frequency transition to %u kHz\n",
331 		state, freqs->new);
332 
333 	policy = per_cpu(cpufreq_cpu_data, freqs->cpu);
334 	switch (state) {
335 
336 	case CPUFREQ_PRECHANGE:
337 		/* detect if the driver reported a value as "old frequency"
338 		 * which is not equal to what the cpufreq core thinks is
339 		 * "old frequency".
340 		 */
341 		if (!(cpufreq_driver->flags & CPUFREQ_CONST_LOOPS)) {
342 			if ((policy) && (policy->cpu == freqs->cpu) &&
343 			    (policy->cur) && (policy->cur != freqs->old)) {
344 				dprintk("Warning: CPU frequency is"
345 					" %u, cpufreq assumed %u kHz.\n",
346 					freqs->old, policy->cur);
347 				freqs->old = policy->cur;
348 			}
349 		}
350 		srcu_notifier_call_chain(&cpufreq_transition_notifier_list,
351 				CPUFREQ_PRECHANGE, freqs);
352 		adjust_jiffies(CPUFREQ_PRECHANGE, freqs);
353 		break;
354 
355 	case CPUFREQ_POSTCHANGE:
356 		adjust_jiffies(CPUFREQ_POSTCHANGE, freqs);
357 		srcu_notifier_call_chain(&cpufreq_transition_notifier_list,
358 				CPUFREQ_POSTCHANGE, freqs);
359 		if (likely(policy) && likely(policy->cpu == freqs->cpu))
360 			policy->cur = freqs->new;
361 		break;
362 	}
363 }
364 EXPORT_SYMBOL_GPL(cpufreq_notify_transition);
365 
366 
367 
368 /*********************************************************************
369  *                          SYSFS INTERFACE                          *
370  *********************************************************************/
371 
372 static struct cpufreq_governor *__find_governor(const char *str_governor)
373 {
374 	struct cpufreq_governor *t;
375 
376 	list_for_each_entry(t, &cpufreq_governor_list, governor_list)
377 		if (!strnicmp(str_governor, t->name, CPUFREQ_NAME_LEN))
378 			return t;
379 
380 	return NULL;
381 }
382 
383 /**
384  * cpufreq_parse_governor - parse a governor string
385  */
386 static int cpufreq_parse_governor(char *str_governor, unsigned int *policy,
387 				struct cpufreq_governor **governor)
388 {
389 	int err = -EINVAL;
390 
391 	if (!cpufreq_driver)
392 		goto out;
393 
394 	if (cpufreq_driver->setpolicy) {
395 		if (!strnicmp(str_governor, "performance", CPUFREQ_NAME_LEN)) {
396 			*policy = CPUFREQ_POLICY_PERFORMANCE;
397 			err = 0;
398 		} else if (!strnicmp(str_governor, "powersave",
399 						CPUFREQ_NAME_LEN)) {
400 			*policy = CPUFREQ_POLICY_POWERSAVE;
401 			err = 0;
402 		}
403 	} else if (cpufreq_driver->target) {
404 		struct cpufreq_governor *t;
405 
406 		mutex_lock(&cpufreq_governor_mutex);
407 
408 		t = __find_governor(str_governor);
409 
410 		if (t == NULL) {
411 			char *name = kasprintf(GFP_KERNEL, "cpufreq_%s",
412 								str_governor);
413 
414 			if (name) {
415 				int ret;
416 
417 				mutex_unlock(&cpufreq_governor_mutex);
418 				ret = request_module("%s", name);
419 				mutex_lock(&cpufreq_governor_mutex);
420 
421 				if (ret == 0)
422 					t = __find_governor(str_governor);
423 			}
424 
425 			kfree(name);
426 		}
427 
428 		if (t != NULL) {
429 			*governor = t;
430 			err = 0;
431 		}
432 
433 		mutex_unlock(&cpufreq_governor_mutex);
434 	}
435 out:
436 	return err;
437 }
438 
439 
440 /**
441  * cpufreq_per_cpu_attr_read() / show_##file_name() -
442  * print out cpufreq information
443  *
444  * Write out information from cpufreq_driver->policy[cpu]; object must be
445  * "unsigned int".
446  */
447 
448 #define show_one(file_name, object)			\
449 static ssize_t show_##file_name				\
450 (struct cpufreq_policy *policy, char *buf)		\
451 {							\
452 	return sprintf(buf, "%u\n", policy->object);	\
453 }
454 
455 show_one(cpuinfo_min_freq, cpuinfo.min_freq);
456 show_one(cpuinfo_max_freq, cpuinfo.max_freq);
457 show_one(cpuinfo_transition_latency, cpuinfo.transition_latency);
458 show_one(scaling_min_freq, min);
459 show_one(scaling_max_freq, max);
460 show_one(scaling_cur_freq, cur);
461 
462 static int __cpufreq_set_policy(struct cpufreq_policy *data,
463 				struct cpufreq_policy *policy);
464 
465 /**
466  * cpufreq_per_cpu_attr_write() / store_##file_name() - sysfs write access
467  */
468 #define store_one(file_name, object)			\
469 static ssize_t store_##file_name					\
470 (struct cpufreq_policy *policy, const char *buf, size_t count)		\
471 {									\
472 	unsigned int ret = -EINVAL;					\
473 	struct cpufreq_policy new_policy;				\
474 									\
475 	ret = cpufreq_get_policy(&new_policy, policy->cpu);		\
476 	if (ret)							\
477 		return -EINVAL;						\
478 									\
479 	ret = sscanf(buf, "%u", &new_policy.object);			\
480 	if (ret != 1)							\
481 		return -EINVAL;						\
482 									\
483 	ret = __cpufreq_set_policy(policy, &new_policy);		\
484 	policy->user_policy.object = policy->object;			\
485 									\
486 	return ret ? ret : count;					\
487 }
488 
489 store_one(scaling_min_freq, min);
490 store_one(scaling_max_freq, max);
491 
492 /**
493  * show_cpuinfo_cur_freq - current CPU frequency as detected by hardware
494  */
495 static ssize_t show_cpuinfo_cur_freq(struct cpufreq_policy *policy,
496 					char *buf)
497 {
498 	unsigned int cur_freq = __cpufreq_get(policy->cpu);
499 	if (!cur_freq)
500 		return sprintf(buf, "<unknown>");
501 	return sprintf(buf, "%u\n", cur_freq);
502 }
503 
504 
505 /**
506  * show_scaling_governor - show the current policy for the specified CPU
507  */
508 static ssize_t show_scaling_governor(struct cpufreq_policy *policy, char *buf)
509 {
510 	if (policy->policy == CPUFREQ_POLICY_POWERSAVE)
511 		return sprintf(buf, "powersave\n");
512 	else if (policy->policy == CPUFREQ_POLICY_PERFORMANCE)
513 		return sprintf(buf, "performance\n");
514 	else if (policy->governor)
515 		return scnprintf(buf, CPUFREQ_NAME_LEN, "%s\n",
516 				policy->governor->name);
517 	return -EINVAL;
518 }
519 
520 
521 /**
522  * store_scaling_governor - store policy for the specified CPU
523  */
524 static ssize_t store_scaling_governor(struct cpufreq_policy *policy,
525 					const char *buf, size_t count)
526 {
527 	unsigned int ret = -EINVAL;
528 	char	str_governor[16];
529 	struct cpufreq_policy new_policy;
530 
531 	ret = cpufreq_get_policy(&new_policy, policy->cpu);
532 	if (ret)
533 		return ret;
534 
535 	ret = sscanf(buf, "%15s", str_governor);
536 	if (ret != 1)
537 		return -EINVAL;
538 
539 	if (cpufreq_parse_governor(str_governor, &new_policy.policy,
540 						&new_policy.governor))
541 		return -EINVAL;
542 
543 	/* Do not use cpufreq_set_policy here or the user_policy.max
544 	   will be wrongly overridden */
545 	ret = __cpufreq_set_policy(policy, &new_policy);
546 
547 	policy->user_policy.policy = policy->policy;
548 	policy->user_policy.governor = policy->governor;
549 
550 	if (ret)
551 		return ret;
552 	else
553 		return count;
554 }
555 
556 /**
557  * show_scaling_driver - show the cpufreq driver currently loaded
558  */
559 static ssize_t show_scaling_driver(struct cpufreq_policy *policy, char *buf)
560 {
561 	return scnprintf(buf, CPUFREQ_NAME_LEN, "%s\n", cpufreq_driver->name);
562 }
563 
564 /**
565  * show_scaling_available_governors - show the available CPUfreq governors
566  */
567 static ssize_t show_scaling_available_governors(struct cpufreq_policy *policy,
568 						char *buf)
569 {
570 	ssize_t i = 0;
571 	struct cpufreq_governor *t;
572 
573 	if (!cpufreq_driver->target) {
574 		i += sprintf(buf, "performance powersave");
575 		goto out;
576 	}
577 
578 	list_for_each_entry(t, &cpufreq_governor_list, governor_list) {
579 		if (i >= (ssize_t) ((PAGE_SIZE / sizeof(char))
580 		    - (CPUFREQ_NAME_LEN + 2)))
581 			goto out;
582 		i += scnprintf(&buf[i], CPUFREQ_NAME_LEN, "%s ", t->name);
583 	}
584 out:
585 	i += sprintf(&buf[i], "\n");
586 	return i;
587 }
588 
589 static ssize_t show_cpus(const struct cpumask *mask, char *buf)
590 {
591 	ssize_t i = 0;
592 	unsigned int cpu;
593 
594 	for_each_cpu(cpu, mask) {
595 		if (i)
596 			i += scnprintf(&buf[i], (PAGE_SIZE - i - 2), " ");
597 		i += scnprintf(&buf[i], (PAGE_SIZE - i - 2), "%u", cpu);
598 		if (i >= (PAGE_SIZE - 5))
599 			break;
600 	}
601 	i += sprintf(&buf[i], "\n");
602 	return i;
603 }
604 
605 /**
606  * show_related_cpus - show the CPUs affected by each transition even if
607  * hw coordination is in use
608  */
609 static ssize_t show_related_cpus(struct cpufreq_policy *policy, char *buf)
610 {
611 	if (cpumask_empty(policy->related_cpus))
612 		return show_cpus(policy->cpus, buf);
613 	return show_cpus(policy->related_cpus, buf);
614 }
615 
616 /**
617  * show_affected_cpus - show the CPUs affected by each transition
618  */
619 static ssize_t show_affected_cpus(struct cpufreq_policy *policy, char *buf)
620 {
621 	return show_cpus(policy->cpus, buf);
622 }
623 
624 static ssize_t store_scaling_setspeed(struct cpufreq_policy *policy,
625 					const char *buf, size_t count)
626 {
627 	unsigned int freq = 0;
628 	unsigned int ret;
629 
630 	if (!policy->governor || !policy->governor->store_setspeed)
631 		return -EINVAL;
632 
633 	ret = sscanf(buf, "%u", &freq);
634 	if (ret != 1)
635 		return -EINVAL;
636 
637 	policy->governor->store_setspeed(policy, freq);
638 
639 	return count;
640 }
641 
642 static ssize_t show_scaling_setspeed(struct cpufreq_policy *policy, char *buf)
643 {
644 	if (!policy->governor || !policy->governor->show_setspeed)
645 		return sprintf(buf, "<unsupported>\n");
646 
647 	return policy->governor->show_setspeed(policy, buf);
648 }
649 
650 /**
651  * show_scaling_driver - show the current cpufreq HW/BIOS limitation
652  */
653 static ssize_t show_bios_limit(struct cpufreq_policy *policy, char *buf)
654 {
655 	unsigned int limit;
656 	int ret;
657 	if (cpufreq_driver->bios_limit) {
658 		ret = cpufreq_driver->bios_limit(policy->cpu, &limit);
659 		if (!ret)
660 			return sprintf(buf, "%u\n", limit);
661 	}
662 	return sprintf(buf, "%u\n", policy->cpuinfo.max_freq);
663 }
664 
665 #define define_one_ro(_name) \
666 static struct freq_attr _name = \
667 __ATTR(_name, 0444, show_##_name, NULL)
668 
669 #define define_one_ro0400(_name) \
670 static struct freq_attr _name = \
671 __ATTR(_name, 0400, show_##_name, NULL)
672 
673 #define define_one_rw(_name) \
674 static struct freq_attr _name = \
675 __ATTR(_name, 0644, show_##_name, store_##_name)
676 
677 define_one_ro0400(cpuinfo_cur_freq);
678 define_one_ro(cpuinfo_min_freq);
679 define_one_ro(cpuinfo_max_freq);
680 define_one_ro(cpuinfo_transition_latency);
681 define_one_ro(scaling_available_governors);
682 define_one_ro(scaling_driver);
683 define_one_ro(scaling_cur_freq);
684 define_one_ro(bios_limit);
685 define_one_ro(related_cpus);
686 define_one_ro(affected_cpus);
687 define_one_rw(scaling_min_freq);
688 define_one_rw(scaling_max_freq);
689 define_one_rw(scaling_governor);
690 define_one_rw(scaling_setspeed);
691 
692 static struct attribute *default_attrs[] = {
693 	&cpuinfo_min_freq.attr,
694 	&cpuinfo_max_freq.attr,
695 	&cpuinfo_transition_latency.attr,
696 	&scaling_min_freq.attr,
697 	&scaling_max_freq.attr,
698 	&affected_cpus.attr,
699 	&related_cpus.attr,
700 	&scaling_governor.attr,
701 	&scaling_driver.attr,
702 	&scaling_available_governors.attr,
703 	&scaling_setspeed.attr,
704 	NULL
705 };
706 
707 struct kobject *cpufreq_global_kobject;
708 EXPORT_SYMBOL(cpufreq_global_kobject);
709 
710 #define to_policy(k) container_of(k, struct cpufreq_policy, kobj)
711 #define to_attr(a) container_of(a, struct freq_attr, attr)
712 
713 static ssize_t show(struct kobject *kobj, struct attribute *attr, char *buf)
714 {
715 	struct cpufreq_policy *policy = to_policy(kobj);
716 	struct freq_attr *fattr = to_attr(attr);
717 	ssize_t ret = -EINVAL;
718 	policy = cpufreq_cpu_get(policy->cpu);
719 	if (!policy)
720 		goto no_policy;
721 
722 	if (lock_policy_rwsem_read(policy->cpu) < 0)
723 		goto fail;
724 
725 	if (fattr->show)
726 		ret = fattr->show(policy, buf);
727 	else
728 		ret = -EIO;
729 
730 	unlock_policy_rwsem_read(policy->cpu);
731 fail:
732 	cpufreq_cpu_put(policy);
733 no_policy:
734 	return ret;
735 }
736 
737 static ssize_t store(struct kobject *kobj, struct attribute *attr,
738 		     const char *buf, size_t count)
739 {
740 	struct cpufreq_policy *policy = to_policy(kobj);
741 	struct freq_attr *fattr = to_attr(attr);
742 	ssize_t ret = -EINVAL;
743 	policy = cpufreq_cpu_get(policy->cpu);
744 	if (!policy)
745 		goto no_policy;
746 
747 	if (lock_policy_rwsem_write(policy->cpu) < 0)
748 		goto fail;
749 
750 	if (fattr->store)
751 		ret = fattr->store(policy, buf, count);
752 	else
753 		ret = -EIO;
754 
755 	unlock_policy_rwsem_write(policy->cpu);
756 fail:
757 	cpufreq_cpu_put(policy);
758 no_policy:
759 	return ret;
760 }
761 
762 static void cpufreq_sysfs_release(struct kobject *kobj)
763 {
764 	struct cpufreq_policy *policy = to_policy(kobj);
765 	dprintk("last reference is dropped\n");
766 	complete(&policy->kobj_unregister);
767 }
768 
769 static struct sysfs_ops sysfs_ops = {
770 	.show	= show,
771 	.store	= store,
772 };
773 
774 static struct kobj_type ktype_cpufreq = {
775 	.sysfs_ops	= &sysfs_ops,
776 	.default_attrs	= default_attrs,
777 	.release	= cpufreq_sysfs_release,
778 };
779 
780 /*
781  * Returns:
782  *   Negative: Failure
783  *   0:        Success
784  *   Positive: When we have a managed CPU and the sysfs got symlinked
785  */
786 static int cpufreq_add_dev_policy(unsigned int cpu,
787 				  struct cpufreq_policy *policy,
788 				  struct sys_device *sys_dev)
789 {
790 	int ret = 0;
791 #ifdef CONFIG_SMP
792 	unsigned long flags;
793 	unsigned int j;
794 #ifdef CONFIG_HOTPLUG_CPU
795 	struct cpufreq_governor *gov;
796 
797 	gov = __find_governor(per_cpu(cpufreq_cpu_governor, cpu));
798 	if (gov) {
799 		policy->governor = gov;
800 		dprintk("Restoring governor %s for cpu %d\n",
801 		       policy->governor->name, cpu);
802 	}
803 #endif
804 
805 	for_each_cpu(j, policy->cpus) {
806 		struct cpufreq_policy *managed_policy;
807 
808 		if (cpu == j)
809 			continue;
810 
811 		/* Check for existing affected CPUs.
812 		 * They may not be aware of it due to CPU Hotplug.
813 		 * cpufreq_cpu_put is called when the device is removed
814 		 * in __cpufreq_remove_dev()
815 		 */
816 		managed_policy = cpufreq_cpu_get(j);
817 		if (unlikely(managed_policy)) {
818 
819 			/* Set proper policy_cpu */
820 			unlock_policy_rwsem_write(cpu);
821 			per_cpu(cpufreq_policy_cpu, cpu) = managed_policy->cpu;
822 
823 			if (lock_policy_rwsem_write(cpu) < 0) {
824 				/* Should not go through policy unlock path */
825 				if (cpufreq_driver->exit)
826 					cpufreq_driver->exit(policy);
827 				cpufreq_cpu_put(managed_policy);
828 				return -EBUSY;
829 			}
830 
831 			spin_lock_irqsave(&cpufreq_driver_lock, flags);
832 			cpumask_copy(managed_policy->cpus, policy->cpus);
833 			per_cpu(cpufreq_cpu_data, cpu) = managed_policy;
834 			spin_unlock_irqrestore(&cpufreq_driver_lock, flags);
835 
836 			dprintk("CPU already managed, adding link\n");
837 			ret = sysfs_create_link(&sys_dev->kobj,
838 						&managed_policy->kobj,
839 						"cpufreq");
840 			if (ret)
841 				cpufreq_cpu_put(managed_policy);
842 			/*
843 			 * Success. We only needed to be added to the mask.
844 			 * Call driver->exit() because only the cpu parent of
845 			 * the kobj needed to call init().
846 			 */
847 			if (cpufreq_driver->exit)
848 				cpufreq_driver->exit(policy);
849 
850 			if (!ret)
851 				return 1;
852 			else
853 				return ret;
854 		}
855 	}
856 #endif
857 	return ret;
858 }
859 
860 
861 /* symlink affected CPUs */
862 static int cpufreq_add_dev_symlink(unsigned int cpu,
863 				   struct cpufreq_policy *policy)
864 {
865 	unsigned int j;
866 	int ret = 0;
867 
868 	for_each_cpu(j, policy->cpus) {
869 		struct cpufreq_policy *managed_policy;
870 		struct sys_device *cpu_sys_dev;
871 
872 		if (j == cpu)
873 			continue;
874 		if (!cpu_online(j))
875 			continue;
876 
877 		dprintk("CPU %u already managed, adding link\n", j);
878 		managed_policy = cpufreq_cpu_get(cpu);
879 		cpu_sys_dev = get_cpu_sysdev(j);
880 		ret = sysfs_create_link(&cpu_sys_dev->kobj, &policy->kobj,
881 					"cpufreq");
882 		if (ret) {
883 			cpufreq_cpu_put(managed_policy);
884 			return ret;
885 		}
886 	}
887 	return ret;
888 }
889 
890 static int cpufreq_add_dev_interface(unsigned int cpu,
891 				     struct cpufreq_policy *policy,
892 				     struct sys_device *sys_dev)
893 {
894 	struct cpufreq_policy new_policy;
895 	struct freq_attr **drv_attr;
896 	unsigned long flags;
897 	int ret = 0;
898 	unsigned int j;
899 
900 	/* prepare interface data */
901 	ret = kobject_init_and_add(&policy->kobj, &ktype_cpufreq,
902 				   &sys_dev->kobj, "cpufreq");
903 	if (ret)
904 		return ret;
905 
906 	/* set up files for this cpu device */
907 	drv_attr = cpufreq_driver->attr;
908 	while ((drv_attr) && (*drv_attr)) {
909 		ret = sysfs_create_file(&policy->kobj, &((*drv_attr)->attr));
910 		if (ret)
911 			goto err_out_kobj_put;
912 		drv_attr++;
913 	}
914 	if (cpufreq_driver->get) {
915 		ret = sysfs_create_file(&policy->kobj, &cpuinfo_cur_freq.attr);
916 		if (ret)
917 			goto err_out_kobj_put;
918 	}
919 	if (cpufreq_driver->target) {
920 		ret = sysfs_create_file(&policy->kobj, &scaling_cur_freq.attr);
921 		if (ret)
922 			goto err_out_kobj_put;
923 	}
924 	if (cpufreq_driver->bios_limit) {
925 		ret = sysfs_create_file(&policy->kobj, &bios_limit.attr);
926 		if (ret)
927 			goto err_out_kobj_put;
928 	}
929 
930 	spin_lock_irqsave(&cpufreq_driver_lock, flags);
931 	for_each_cpu(j, policy->cpus) {
932 	if (!cpu_online(j))
933 		continue;
934 		per_cpu(cpufreq_cpu_data, j) = policy;
935 		per_cpu(cpufreq_policy_cpu, j) = policy->cpu;
936 	}
937 	spin_unlock_irqrestore(&cpufreq_driver_lock, flags);
938 
939 	ret = cpufreq_add_dev_symlink(cpu, policy);
940 	if (ret)
941 		goto err_out_kobj_put;
942 
943 	memcpy(&new_policy, policy, sizeof(struct cpufreq_policy));
944 	/* assure that the starting sequence is run in __cpufreq_set_policy */
945 	policy->governor = NULL;
946 
947 	/* set default policy */
948 	ret = __cpufreq_set_policy(policy, &new_policy);
949 	policy->user_policy.policy = policy->policy;
950 	policy->user_policy.governor = policy->governor;
951 
952 	if (ret) {
953 		dprintk("setting policy failed\n");
954 		if (cpufreq_driver->exit)
955 			cpufreq_driver->exit(policy);
956 	}
957 	return ret;
958 
959 err_out_kobj_put:
960 	kobject_put(&policy->kobj);
961 	wait_for_completion(&policy->kobj_unregister);
962 	return ret;
963 }
964 
965 
966 /**
967  * cpufreq_add_dev - add a CPU device
968  *
969  * Adds the cpufreq interface for a CPU device.
970  *
971  * The Oracle says: try running cpufreq registration/unregistration concurrently
972  * with with cpu hotplugging and all hell will break loose. Tried to clean this
973  * mess up, but more thorough testing is needed. - Mathieu
974  */
975 static int cpufreq_add_dev(struct sys_device *sys_dev)
976 {
977 	unsigned int cpu = sys_dev->id;
978 	int ret = 0, found = 0;
979 	struct cpufreq_policy *policy;
980 	unsigned long flags;
981 	unsigned int j;
982 #ifdef CONFIG_HOTPLUG_CPU
983 	int sibling;
984 #endif
985 
986 	if (cpu_is_offline(cpu))
987 		return 0;
988 
989 	cpufreq_debug_disable_ratelimit();
990 	dprintk("adding CPU %u\n", cpu);
991 
992 #ifdef CONFIG_SMP
993 	/* check whether a different CPU already registered this
994 	 * CPU because it is in the same boat. */
995 	policy = cpufreq_cpu_get(cpu);
996 	if (unlikely(policy)) {
997 		cpufreq_cpu_put(policy);
998 		cpufreq_debug_enable_ratelimit();
999 		return 0;
1000 	}
1001 #endif
1002 
1003 	if (!try_module_get(cpufreq_driver->owner)) {
1004 		ret = -EINVAL;
1005 		goto module_out;
1006 	}
1007 
1008 	ret = -ENOMEM;
1009 	policy = kzalloc(sizeof(struct cpufreq_policy), GFP_KERNEL);
1010 	if (!policy)
1011 		goto nomem_out;
1012 
1013 	if (!alloc_cpumask_var(&policy->cpus, GFP_KERNEL))
1014 		goto err_free_policy;
1015 
1016 	if (!zalloc_cpumask_var(&policy->related_cpus, GFP_KERNEL))
1017 		goto err_free_cpumask;
1018 
1019 	policy->cpu = cpu;
1020 	cpumask_copy(policy->cpus, cpumask_of(cpu));
1021 
1022 	/* Initially set CPU itself as the policy_cpu */
1023 	per_cpu(cpufreq_policy_cpu, cpu) = cpu;
1024 	ret = (lock_policy_rwsem_write(cpu) < 0);
1025 	WARN_ON(ret);
1026 
1027 	init_completion(&policy->kobj_unregister);
1028 	INIT_WORK(&policy->update, handle_update);
1029 
1030 	/* Set governor before ->init, so that driver could check it */
1031 #ifdef CONFIG_HOTPLUG_CPU
1032 	for_each_online_cpu(sibling) {
1033 		struct cpufreq_policy *cp = per_cpu(cpufreq_cpu_data, sibling);
1034 		if (cp && cp->governor &&
1035 		    (cpumask_test_cpu(cpu, cp->related_cpus))) {
1036 			policy->governor = cp->governor;
1037 			found = 1;
1038 			break;
1039 		}
1040 	}
1041 #endif
1042 	if (!found)
1043 		policy->governor = CPUFREQ_DEFAULT_GOVERNOR;
1044 	/* call driver. From then on the cpufreq must be able
1045 	 * to accept all calls to ->verify and ->setpolicy for this CPU
1046 	 */
1047 	ret = cpufreq_driver->init(policy);
1048 	if (ret) {
1049 		dprintk("initialization failed\n");
1050 		goto err_unlock_policy;
1051 	}
1052 	policy->user_policy.min = policy->min;
1053 	policy->user_policy.max = policy->max;
1054 
1055 	blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
1056 				     CPUFREQ_START, policy);
1057 
1058 	ret = cpufreq_add_dev_policy(cpu, policy, sys_dev);
1059 	if (ret) {
1060 		if (ret > 0)
1061 			/* This is a managed cpu, symlink created,
1062 			   exit with 0 */
1063 			ret = 0;
1064 		goto err_unlock_policy;
1065 	}
1066 
1067 	ret = cpufreq_add_dev_interface(cpu, policy, sys_dev);
1068 	if (ret)
1069 		goto err_out_unregister;
1070 
1071 	unlock_policy_rwsem_write(cpu);
1072 
1073 	kobject_uevent(&policy->kobj, KOBJ_ADD);
1074 	module_put(cpufreq_driver->owner);
1075 	dprintk("initialization complete\n");
1076 	cpufreq_debug_enable_ratelimit();
1077 
1078 	return 0;
1079 
1080 
1081 err_out_unregister:
1082 	spin_lock_irqsave(&cpufreq_driver_lock, flags);
1083 	for_each_cpu(j, policy->cpus)
1084 		per_cpu(cpufreq_cpu_data, j) = NULL;
1085 	spin_unlock_irqrestore(&cpufreq_driver_lock, flags);
1086 
1087 	kobject_put(&policy->kobj);
1088 	wait_for_completion(&policy->kobj_unregister);
1089 
1090 err_unlock_policy:
1091 	unlock_policy_rwsem_write(cpu);
1092 err_free_cpumask:
1093 	free_cpumask_var(policy->cpus);
1094 err_free_policy:
1095 	kfree(policy);
1096 nomem_out:
1097 	module_put(cpufreq_driver->owner);
1098 module_out:
1099 	cpufreq_debug_enable_ratelimit();
1100 	return ret;
1101 }
1102 
1103 
1104 /**
1105  * __cpufreq_remove_dev - remove a CPU device
1106  *
1107  * Removes the cpufreq interface for a CPU device.
1108  * Caller should already have policy_rwsem in write mode for this CPU.
1109  * This routine frees the rwsem before returning.
1110  */
1111 static int __cpufreq_remove_dev(struct sys_device *sys_dev)
1112 {
1113 	unsigned int cpu = sys_dev->id;
1114 	unsigned long flags;
1115 	struct cpufreq_policy *data;
1116 #ifdef CONFIG_SMP
1117 	struct sys_device *cpu_sys_dev;
1118 	unsigned int j;
1119 #endif
1120 
1121 	cpufreq_debug_disable_ratelimit();
1122 	dprintk("unregistering CPU %u\n", cpu);
1123 
1124 	spin_lock_irqsave(&cpufreq_driver_lock, flags);
1125 	data = per_cpu(cpufreq_cpu_data, cpu);
1126 
1127 	if (!data) {
1128 		spin_unlock_irqrestore(&cpufreq_driver_lock, flags);
1129 		cpufreq_debug_enable_ratelimit();
1130 		unlock_policy_rwsem_write(cpu);
1131 		return -EINVAL;
1132 	}
1133 	per_cpu(cpufreq_cpu_data, cpu) = NULL;
1134 
1135 
1136 #ifdef CONFIG_SMP
1137 	/* if this isn't the CPU which is the parent of the kobj, we
1138 	 * only need to unlink, put and exit
1139 	 */
1140 	if (unlikely(cpu != data->cpu)) {
1141 		dprintk("removing link\n");
1142 		cpumask_clear_cpu(cpu, data->cpus);
1143 		spin_unlock_irqrestore(&cpufreq_driver_lock, flags);
1144 		sysfs_remove_link(&sys_dev->kobj, "cpufreq");
1145 		cpufreq_cpu_put(data);
1146 		cpufreq_debug_enable_ratelimit();
1147 		unlock_policy_rwsem_write(cpu);
1148 		return 0;
1149 	}
1150 #endif
1151 
1152 #ifdef CONFIG_SMP
1153 
1154 #ifdef CONFIG_HOTPLUG_CPU
1155 	strncpy(per_cpu(cpufreq_cpu_governor, cpu), data->governor->name,
1156 			CPUFREQ_NAME_LEN);
1157 #endif
1158 
1159 	/* if we have other CPUs still registered, we need to unlink them,
1160 	 * or else wait_for_completion below will lock up. Clean the
1161 	 * per_cpu(cpufreq_cpu_data) while holding the lock, and remove
1162 	 * the sysfs links afterwards.
1163 	 */
1164 	if (unlikely(cpumask_weight(data->cpus) > 1)) {
1165 		for_each_cpu(j, data->cpus) {
1166 			if (j == cpu)
1167 				continue;
1168 			per_cpu(cpufreq_cpu_data, j) = NULL;
1169 		}
1170 	}
1171 
1172 	spin_unlock_irqrestore(&cpufreq_driver_lock, flags);
1173 
1174 	if (unlikely(cpumask_weight(data->cpus) > 1)) {
1175 		for_each_cpu(j, data->cpus) {
1176 			if (j == cpu)
1177 				continue;
1178 			dprintk("removing link for cpu %u\n", j);
1179 #ifdef CONFIG_HOTPLUG_CPU
1180 			strncpy(per_cpu(cpufreq_cpu_governor, j),
1181 				data->governor->name, CPUFREQ_NAME_LEN);
1182 #endif
1183 			cpu_sys_dev = get_cpu_sysdev(j);
1184 			sysfs_remove_link(&cpu_sys_dev->kobj, "cpufreq");
1185 			cpufreq_cpu_put(data);
1186 		}
1187 	}
1188 #else
1189 	spin_unlock_irqrestore(&cpufreq_driver_lock, flags);
1190 #endif
1191 
1192 	if (cpufreq_driver->target)
1193 		__cpufreq_governor(data, CPUFREQ_GOV_STOP);
1194 
1195 	kobject_put(&data->kobj);
1196 
1197 	/* we need to make sure that the underlying kobj is actually
1198 	 * not referenced anymore by anybody before we proceed with
1199 	 * unloading.
1200 	 */
1201 	dprintk("waiting for dropping of refcount\n");
1202 	wait_for_completion(&data->kobj_unregister);
1203 	dprintk("wait complete\n");
1204 
1205 	if (cpufreq_driver->exit)
1206 		cpufreq_driver->exit(data);
1207 
1208 	unlock_policy_rwsem_write(cpu);
1209 
1210 	free_cpumask_var(data->related_cpus);
1211 	free_cpumask_var(data->cpus);
1212 	kfree(data);
1213 	per_cpu(cpufreq_cpu_data, cpu) = NULL;
1214 
1215 	cpufreq_debug_enable_ratelimit();
1216 	return 0;
1217 }
1218 
1219 
1220 static int cpufreq_remove_dev(struct sys_device *sys_dev)
1221 {
1222 	unsigned int cpu = sys_dev->id;
1223 	int retval;
1224 
1225 	if (cpu_is_offline(cpu))
1226 		return 0;
1227 
1228 	if (unlikely(lock_policy_rwsem_write(cpu)))
1229 		BUG();
1230 
1231 	retval = __cpufreq_remove_dev(sys_dev);
1232 	return retval;
1233 }
1234 
1235 
1236 static void handle_update(struct work_struct *work)
1237 {
1238 	struct cpufreq_policy *policy =
1239 		container_of(work, struct cpufreq_policy, update);
1240 	unsigned int cpu = policy->cpu;
1241 	dprintk("handle_update for cpu %u called\n", cpu);
1242 	cpufreq_update_policy(cpu);
1243 }
1244 
1245 /**
1246  *	cpufreq_out_of_sync - If actual and saved CPU frequency differs, we're in deep trouble.
1247  *	@cpu: cpu number
1248  *	@old_freq: CPU frequency the kernel thinks the CPU runs at
1249  *	@new_freq: CPU frequency the CPU actually runs at
1250  *
1251  *	We adjust to current frequency first, and need to clean up later.
1252  *	So either call to cpufreq_update_policy() or schedule handle_update()).
1253  */
1254 static void cpufreq_out_of_sync(unsigned int cpu, unsigned int old_freq,
1255 				unsigned int new_freq)
1256 {
1257 	struct cpufreq_freqs freqs;
1258 
1259 	dprintk("Warning: CPU frequency out of sync: cpufreq and timing "
1260 	       "core thinks of %u, is %u kHz.\n", old_freq, new_freq);
1261 
1262 	freqs.cpu = cpu;
1263 	freqs.old = old_freq;
1264 	freqs.new = new_freq;
1265 	cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE);
1266 	cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE);
1267 }
1268 
1269 
1270 /**
1271  * cpufreq_quick_get - get the CPU frequency (in kHz) from policy->cur
1272  * @cpu: CPU number
1273  *
1274  * This is the last known freq, without actually getting it from the driver.
1275  * Return value will be same as what is shown in scaling_cur_freq in sysfs.
1276  */
1277 unsigned int cpufreq_quick_get(unsigned int cpu)
1278 {
1279 	struct cpufreq_policy *policy = cpufreq_cpu_get(cpu);
1280 	unsigned int ret_freq = 0;
1281 
1282 	if (policy) {
1283 		ret_freq = policy->cur;
1284 		cpufreq_cpu_put(policy);
1285 	}
1286 
1287 	return ret_freq;
1288 }
1289 EXPORT_SYMBOL(cpufreq_quick_get);
1290 
1291 
1292 static unsigned int __cpufreq_get(unsigned int cpu)
1293 {
1294 	struct cpufreq_policy *policy = per_cpu(cpufreq_cpu_data, cpu);
1295 	unsigned int ret_freq = 0;
1296 
1297 	if (!cpufreq_driver->get)
1298 		return ret_freq;
1299 
1300 	ret_freq = cpufreq_driver->get(cpu);
1301 
1302 	if (ret_freq && policy->cur &&
1303 		!(cpufreq_driver->flags & CPUFREQ_CONST_LOOPS)) {
1304 		/* verify no discrepancy between actual and
1305 					saved value exists */
1306 		if (unlikely(ret_freq != policy->cur)) {
1307 			cpufreq_out_of_sync(cpu, policy->cur, ret_freq);
1308 			schedule_work(&policy->update);
1309 		}
1310 	}
1311 
1312 	return ret_freq;
1313 }
1314 
1315 /**
1316  * cpufreq_get - get the current CPU frequency (in kHz)
1317  * @cpu: CPU number
1318  *
1319  * Get the CPU current (static) CPU frequency
1320  */
1321 unsigned int cpufreq_get(unsigned int cpu)
1322 {
1323 	unsigned int ret_freq = 0;
1324 	struct cpufreq_policy *policy = cpufreq_cpu_get(cpu);
1325 
1326 	if (!policy)
1327 		goto out;
1328 
1329 	if (unlikely(lock_policy_rwsem_read(cpu)))
1330 		goto out_policy;
1331 
1332 	ret_freq = __cpufreq_get(cpu);
1333 
1334 	unlock_policy_rwsem_read(cpu);
1335 
1336 out_policy:
1337 	cpufreq_cpu_put(policy);
1338 out:
1339 	return ret_freq;
1340 }
1341 EXPORT_SYMBOL(cpufreq_get);
1342 
1343 
1344 /**
1345  *	cpufreq_suspend - let the low level driver prepare for suspend
1346  */
1347 
1348 static int cpufreq_suspend(struct sys_device *sysdev, pm_message_t pmsg)
1349 {
1350 	int ret = 0;
1351 
1352 	int cpu = sysdev->id;
1353 	struct cpufreq_policy *cpu_policy;
1354 
1355 	dprintk("suspending cpu %u\n", cpu);
1356 
1357 	if (!cpu_online(cpu))
1358 		return 0;
1359 
1360 	/* we may be lax here as interrupts are off. Nonetheless
1361 	 * we need to grab the correct cpu policy, as to check
1362 	 * whether we really run on this CPU.
1363 	 */
1364 
1365 	cpu_policy = cpufreq_cpu_get(cpu);
1366 	if (!cpu_policy)
1367 		return -EINVAL;
1368 
1369 	/* only handle each CPU group once */
1370 	if (unlikely(cpu_policy->cpu != cpu))
1371 		goto out;
1372 
1373 	if (cpufreq_driver->suspend) {
1374 		ret = cpufreq_driver->suspend(cpu_policy, pmsg);
1375 		if (ret)
1376 			printk(KERN_ERR "cpufreq: suspend failed in ->suspend "
1377 					"step on CPU %u\n", cpu_policy->cpu);
1378 	}
1379 
1380 out:
1381 	cpufreq_cpu_put(cpu_policy);
1382 	return ret;
1383 }
1384 
1385 /**
1386  *	cpufreq_resume -  restore proper CPU frequency handling after resume
1387  *
1388  *	1.) resume CPUfreq hardware support (cpufreq_driver->resume())
1389  *	2.) schedule call cpufreq_update_policy() ASAP as interrupts are
1390  *	    restored. It will verify that the current freq is in sync with
1391  *	    what we believe it to be. This is a bit later than when it
1392  *	    should be, but nonethteless it's better than calling
1393  *	    cpufreq_driver->get() here which might re-enable interrupts...
1394  */
1395 static int cpufreq_resume(struct sys_device *sysdev)
1396 {
1397 	int ret = 0;
1398 
1399 	int cpu = sysdev->id;
1400 	struct cpufreq_policy *cpu_policy;
1401 
1402 	dprintk("resuming cpu %u\n", cpu);
1403 
1404 	if (!cpu_online(cpu))
1405 		return 0;
1406 
1407 	/* we may be lax here as interrupts are off. Nonetheless
1408 	 * we need to grab the correct cpu policy, as to check
1409 	 * whether we really run on this CPU.
1410 	 */
1411 
1412 	cpu_policy = cpufreq_cpu_get(cpu);
1413 	if (!cpu_policy)
1414 		return -EINVAL;
1415 
1416 	/* only handle each CPU group once */
1417 	if (unlikely(cpu_policy->cpu != cpu))
1418 		goto fail;
1419 
1420 	if (cpufreq_driver->resume) {
1421 		ret = cpufreq_driver->resume(cpu_policy);
1422 		if (ret) {
1423 			printk(KERN_ERR "cpufreq: resume failed in ->resume "
1424 					"step on CPU %u\n", cpu_policy->cpu);
1425 			goto fail;
1426 		}
1427 	}
1428 
1429 	schedule_work(&cpu_policy->update);
1430 
1431 fail:
1432 	cpufreq_cpu_put(cpu_policy);
1433 	return ret;
1434 }
1435 
1436 static struct sysdev_driver cpufreq_sysdev_driver = {
1437 	.add		= cpufreq_add_dev,
1438 	.remove		= cpufreq_remove_dev,
1439 	.suspend	= cpufreq_suspend,
1440 	.resume		= cpufreq_resume,
1441 };
1442 
1443 
1444 /*********************************************************************
1445  *                     NOTIFIER LISTS INTERFACE                      *
1446  *********************************************************************/
1447 
1448 /**
1449  *	cpufreq_register_notifier - register a driver with cpufreq
1450  *	@nb: notifier function to register
1451  *      @list: CPUFREQ_TRANSITION_NOTIFIER or CPUFREQ_POLICY_NOTIFIER
1452  *
1453  *	Add a driver to one of two lists: either a list of drivers that
1454  *      are notified about clock rate changes (once before and once after
1455  *      the transition), or a list of drivers that are notified about
1456  *      changes in cpufreq policy.
1457  *
1458  *	This function may sleep, and has the same return conditions as
1459  *	blocking_notifier_chain_register.
1460  */
1461 int cpufreq_register_notifier(struct notifier_block *nb, unsigned int list)
1462 {
1463 	int ret;
1464 
1465 	WARN_ON(!init_cpufreq_transition_notifier_list_called);
1466 
1467 	switch (list) {
1468 	case CPUFREQ_TRANSITION_NOTIFIER:
1469 		ret = srcu_notifier_chain_register(
1470 				&cpufreq_transition_notifier_list, nb);
1471 		break;
1472 	case CPUFREQ_POLICY_NOTIFIER:
1473 		ret = blocking_notifier_chain_register(
1474 				&cpufreq_policy_notifier_list, nb);
1475 		break;
1476 	default:
1477 		ret = -EINVAL;
1478 	}
1479 
1480 	return ret;
1481 }
1482 EXPORT_SYMBOL(cpufreq_register_notifier);
1483 
1484 
1485 /**
1486  *	cpufreq_unregister_notifier - unregister a driver with cpufreq
1487  *	@nb: notifier block to be unregistered
1488  *      @list: CPUFREQ_TRANSITION_NOTIFIER or CPUFREQ_POLICY_NOTIFIER
1489  *
1490  *	Remove a driver from the CPU frequency notifier list.
1491  *
1492  *	This function may sleep, and has the same return conditions as
1493  *	blocking_notifier_chain_unregister.
1494  */
1495 int cpufreq_unregister_notifier(struct notifier_block *nb, unsigned int list)
1496 {
1497 	int ret;
1498 
1499 	switch (list) {
1500 	case CPUFREQ_TRANSITION_NOTIFIER:
1501 		ret = srcu_notifier_chain_unregister(
1502 				&cpufreq_transition_notifier_list, nb);
1503 		break;
1504 	case CPUFREQ_POLICY_NOTIFIER:
1505 		ret = blocking_notifier_chain_unregister(
1506 				&cpufreq_policy_notifier_list, nb);
1507 		break;
1508 	default:
1509 		ret = -EINVAL;
1510 	}
1511 
1512 	return ret;
1513 }
1514 EXPORT_SYMBOL(cpufreq_unregister_notifier);
1515 
1516 
1517 /*********************************************************************
1518  *                              GOVERNORS                            *
1519  *********************************************************************/
1520 
1521 
1522 int __cpufreq_driver_target(struct cpufreq_policy *policy,
1523 			    unsigned int target_freq,
1524 			    unsigned int relation)
1525 {
1526 	int retval = -EINVAL;
1527 
1528 	dprintk("target for CPU %u: %u kHz, relation %u\n", policy->cpu,
1529 		target_freq, relation);
1530 	if (cpu_online(policy->cpu) && cpufreq_driver->target)
1531 		retval = cpufreq_driver->target(policy, target_freq, relation);
1532 
1533 	return retval;
1534 }
1535 EXPORT_SYMBOL_GPL(__cpufreq_driver_target);
1536 
1537 int cpufreq_driver_target(struct cpufreq_policy *policy,
1538 			  unsigned int target_freq,
1539 			  unsigned int relation)
1540 {
1541 	int ret = -EINVAL;
1542 
1543 	policy = cpufreq_cpu_get(policy->cpu);
1544 	if (!policy)
1545 		goto no_policy;
1546 
1547 	if (unlikely(lock_policy_rwsem_write(policy->cpu)))
1548 		goto fail;
1549 
1550 	ret = __cpufreq_driver_target(policy, target_freq, relation);
1551 
1552 	unlock_policy_rwsem_write(policy->cpu);
1553 
1554 fail:
1555 	cpufreq_cpu_put(policy);
1556 no_policy:
1557 	return ret;
1558 }
1559 EXPORT_SYMBOL_GPL(cpufreq_driver_target);
1560 
1561 int __cpufreq_driver_getavg(struct cpufreq_policy *policy, unsigned int cpu)
1562 {
1563 	int ret = 0;
1564 
1565 	policy = cpufreq_cpu_get(policy->cpu);
1566 	if (!policy)
1567 		return -EINVAL;
1568 
1569 	if (cpu_online(cpu) && cpufreq_driver->getavg)
1570 		ret = cpufreq_driver->getavg(policy, cpu);
1571 
1572 	cpufreq_cpu_put(policy);
1573 	return ret;
1574 }
1575 EXPORT_SYMBOL_GPL(__cpufreq_driver_getavg);
1576 
1577 /*
1578  * when "event" is CPUFREQ_GOV_LIMITS
1579  */
1580 
1581 static int __cpufreq_governor(struct cpufreq_policy *policy,
1582 					unsigned int event)
1583 {
1584 	int ret;
1585 
1586 	/* Only must be defined when default governor is known to have latency
1587 	   restrictions, like e.g. conservative or ondemand.
1588 	   That this is the case is already ensured in Kconfig
1589 	*/
1590 #ifdef CONFIG_CPU_FREQ_GOV_PERFORMANCE
1591 	struct cpufreq_governor *gov = &cpufreq_gov_performance;
1592 #else
1593 	struct cpufreq_governor *gov = NULL;
1594 #endif
1595 
1596 	if (policy->governor->max_transition_latency &&
1597 	    policy->cpuinfo.transition_latency >
1598 	    policy->governor->max_transition_latency) {
1599 		if (!gov)
1600 			return -EINVAL;
1601 		else {
1602 			printk(KERN_WARNING "%s governor failed, too long"
1603 			       " transition latency of HW, fallback"
1604 			       " to %s governor\n",
1605 			       policy->governor->name,
1606 			       gov->name);
1607 			policy->governor = gov;
1608 		}
1609 	}
1610 
1611 	if (!try_module_get(policy->governor->owner))
1612 		return -EINVAL;
1613 
1614 	dprintk("__cpufreq_governor for CPU %u, event %u\n",
1615 						policy->cpu, event);
1616 	ret = policy->governor->governor(policy, event);
1617 
1618 	/* we keep one module reference alive for
1619 			each CPU governed by this CPU */
1620 	if ((event != CPUFREQ_GOV_START) || ret)
1621 		module_put(policy->governor->owner);
1622 	if ((event == CPUFREQ_GOV_STOP) && !ret)
1623 		module_put(policy->governor->owner);
1624 
1625 	return ret;
1626 }
1627 
1628 
1629 int cpufreq_register_governor(struct cpufreq_governor *governor)
1630 {
1631 	int err;
1632 
1633 	if (!governor)
1634 		return -EINVAL;
1635 
1636 	mutex_lock(&cpufreq_governor_mutex);
1637 
1638 	err = -EBUSY;
1639 	if (__find_governor(governor->name) == NULL) {
1640 		err = 0;
1641 		list_add(&governor->governor_list, &cpufreq_governor_list);
1642 	}
1643 
1644 	mutex_unlock(&cpufreq_governor_mutex);
1645 	return err;
1646 }
1647 EXPORT_SYMBOL_GPL(cpufreq_register_governor);
1648 
1649 
1650 void cpufreq_unregister_governor(struct cpufreq_governor *governor)
1651 {
1652 #ifdef CONFIG_HOTPLUG_CPU
1653 	int cpu;
1654 #endif
1655 
1656 	if (!governor)
1657 		return;
1658 
1659 #ifdef CONFIG_HOTPLUG_CPU
1660 	for_each_present_cpu(cpu) {
1661 		if (cpu_online(cpu))
1662 			continue;
1663 		if (!strcmp(per_cpu(cpufreq_cpu_governor, cpu), governor->name))
1664 			strcpy(per_cpu(cpufreq_cpu_governor, cpu), "\0");
1665 	}
1666 #endif
1667 
1668 	mutex_lock(&cpufreq_governor_mutex);
1669 	list_del(&governor->governor_list);
1670 	mutex_unlock(&cpufreq_governor_mutex);
1671 	return;
1672 }
1673 EXPORT_SYMBOL_GPL(cpufreq_unregister_governor);
1674 
1675 
1676 
1677 /*********************************************************************
1678  *                          POLICY INTERFACE                         *
1679  *********************************************************************/
1680 
1681 /**
1682  * cpufreq_get_policy - get the current cpufreq_policy
1683  * @policy: struct cpufreq_policy into which the current cpufreq_policy
1684  *	is written
1685  *
1686  * Reads the current cpufreq policy.
1687  */
1688 int cpufreq_get_policy(struct cpufreq_policy *policy, unsigned int cpu)
1689 {
1690 	struct cpufreq_policy *cpu_policy;
1691 	if (!policy)
1692 		return -EINVAL;
1693 
1694 	cpu_policy = cpufreq_cpu_get(cpu);
1695 	if (!cpu_policy)
1696 		return -EINVAL;
1697 
1698 	memcpy(policy, cpu_policy, sizeof(struct cpufreq_policy));
1699 
1700 	cpufreq_cpu_put(cpu_policy);
1701 	return 0;
1702 }
1703 EXPORT_SYMBOL(cpufreq_get_policy);
1704 
1705 
1706 /*
1707  * data   : current policy.
1708  * policy : policy to be set.
1709  */
1710 static int __cpufreq_set_policy(struct cpufreq_policy *data,
1711 				struct cpufreq_policy *policy)
1712 {
1713 	int ret = 0;
1714 
1715 	cpufreq_debug_disable_ratelimit();
1716 	dprintk("setting new policy for CPU %u: %u - %u kHz\n", policy->cpu,
1717 		policy->min, policy->max);
1718 
1719 	memcpy(&policy->cpuinfo, &data->cpuinfo,
1720 				sizeof(struct cpufreq_cpuinfo));
1721 
1722 	if (policy->min > data->max || policy->max < data->min) {
1723 		ret = -EINVAL;
1724 		goto error_out;
1725 	}
1726 
1727 	/* verify the cpu speed can be set within this limit */
1728 	ret = cpufreq_driver->verify(policy);
1729 	if (ret)
1730 		goto error_out;
1731 
1732 	/* adjust if necessary - all reasons */
1733 	blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
1734 			CPUFREQ_ADJUST, policy);
1735 
1736 	/* adjust if necessary - hardware incompatibility*/
1737 	blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
1738 			CPUFREQ_INCOMPATIBLE, policy);
1739 
1740 	/* verify the cpu speed can be set within this limit,
1741 	   which might be different to the first one */
1742 	ret = cpufreq_driver->verify(policy);
1743 	if (ret)
1744 		goto error_out;
1745 
1746 	/* notification of the new policy */
1747 	blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
1748 			CPUFREQ_NOTIFY, policy);
1749 
1750 	data->min = policy->min;
1751 	data->max = policy->max;
1752 
1753 	dprintk("new min and max freqs are %u - %u kHz\n",
1754 					data->min, data->max);
1755 
1756 	if (cpufreq_driver->setpolicy) {
1757 		data->policy = policy->policy;
1758 		dprintk("setting range\n");
1759 		ret = cpufreq_driver->setpolicy(policy);
1760 	} else {
1761 		if (policy->governor != data->governor) {
1762 			/* save old, working values */
1763 			struct cpufreq_governor *old_gov = data->governor;
1764 
1765 			dprintk("governor switch\n");
1766 
1767 			/* end old governor */
1768 			if (data->governor) {
1769 				/*
1770 				 * Need to release the rwsem around governor
1771 				 * stop due to lock dependency between
1772 				 * cancel_delayed_work_sync and the read lock
1773 				 * taken in the delayed work handler.
1774 				 */
1775 				unlock_policy_rwsem_write(data->cpu);
1776 				__cpufreq_governor(data, CPUFREQ_GOV_STOP);
1777 				lock_policy_rwsem_write(data->cpu);
1778 			}
1779 
1780 			/* start new governor */
1781 			data->governor = policy->governor;
1782 			if (__cpufreq_governor(data, CPUFREQ_GOV_START)) {
1783 				/* new governor failed, so re-start old one */
1784 				dprintk("starting governor %s failed\n",
1785 							data->governor->name);
1786 				if (old_gov) {
1787 					data->governor = old_gov;
1788 					__cpufreq_governor(data,
1789 							   CPUFREQ_GOV_START);
1790 				}
1791 				ret = -EINVAL;
1792 				goto error_out;
1793 			}
1794 			/* might be a policy change, too, so fall through */
1795 		}
1796 		dprintk("governor: change or update limits\n");
1797 		__cpufreq_governor(data, CPUFREQ_GOV_LIMITS);
1798 	}
1799 
1800 error_out:
1801 	cpufreq_debug_enable_ratelimit();
1802 	return ret;
1803 }
1804 
1805 /**
1806  *	cpufreq_update_policy - re-evaluate an existing cpufreq policy
1807  *	@cpu: CPU which shall be re-evaluated
1808  *
1809  *	Usefull for policy notifiers which have different necessities
1810  *	at different times.
1811  */
1812 int cpufreq_update_policy(unsigned int cpu)
1813 {
1814 	struct cpufreq_policy *data = cpufreq_cpu_get(cpu);
1815 	struct cpufreq_policy policy;
1816 	int ret;
1817 
1818 	if (!data) {
1819 		ret = -ENODEV;
1820 		goto no_policy;
1821 	}
1822 
1823 	if (unlikely(lock_policy_rwsem_write(cpu))) {
1824 		ret = -EINVAL;
1825 		goto fail;
1826 	}
1827 
1828 	dprintk("updating policy for CPU %u\n", cpu);
1829 	memcpy(&policy, data, sizeof(struct cpufreq_policy));
1830 	policy.min = data->user_policy.min;
1831 	policy.max = data->user_policy.max;
1832 	policy.policy = data->user_policy.policy;
1833 	policy.governor = data->user_policy.governor;
1834 
1835 	/* BIOS might change freq behind our back
1836 	  -> ask driver for current freq and notify governors about a change */
1837 	if (cpufreq_driver->get) {
1838 		policy.cur = cpufreq_driver->get(cpu);
1839 		if (!data->cur) {
1840 			dprintk("Driver did not initialize current freq");
1841 			data->cur = policy.cur;
1842 		} else {
1843 			if (data->cur != policy.cur)
1844 				cpufreq_out_of_sync(cpu, data->cur,
1845 								policy.cur);
1846 		}
1847 	}
1848 
1849 	ret = __cpufreq_set_policy(data, &policy);
1850 
1851 	unlock_policy_rwsem_write(cpu);
1852 
1853 fail:
1854 	cpufreq_cpu_put(data);
1855 no_policy:
1856 	return ret;
1857 }
1858 EXPORT_SYMBOL(cpufreq_update_policy);
1859 
1860 static int __cpuinit cpufreq_cpu_callback(struct notifier_block *nfb,
1861 					unsigned long action, void *hcpu)
1862 {
1863 	unsigned int cpu = (unsigned long)hcpu;
1864 	struct sys_device *sys_dev;
1865 
1866 	sys_dev = get_cpu_sysdev(cpu);
1867 	if (sys_dev) {
1868 		switch (action) {
1869 		case CPU_ONLINE:
1870 		case CPU_ONLINE_FROZEN:
1871 			cpufreq_add_dev(sys_dev);
1872 			break;
1873 		case CPU_DOWN_PREPARE:
1874 		case CPU_DOWN_PREPARE_FROZEN:
1875 			if (unlikely(lock_policy_rwsem_write(cpu)))
1876 				BUG();
1877 
1878 			__cpufreq_remove_dev(sys_dev);
1879 			break;
1880 		case CPU_DOWN_FAILED:
1881 		case CPU_DOWN_FAILED_FROZEN:
1882 			cpufreq_add_dev(sys_dev);
1883 			break;
1884 		}
1885 	}
1886 	return NOTIFY_OK;
1887 }
1888 
1889 static struct notifier_block __refdata cpufreq_cpu_notifier =
1890 {
1891     .notifier_call = cpufreq_cpu_callback,
1892 };
1893 
1894 /*********************************************************************
1895  *               REGISTER / UNREGISTER CPUFREQ DRIVER                *
1896  *********************************************************************/
1897 
1898 /**
1899  * cpufreq_register_driver - register a CPU Frequency driver
1900  * @driver_data: A struct cpufreq_driver containing the values#
1901  * submitted by the CPU Frequency driver.
1902  *
1903  *   Registers a CPU Frequency driver to this core code. This code
1904  * returns zero on success, -EBUSY when another driver got here first
1905  * (and isn't unregistered in the meantime).
1906  *
1907  */
1908 int cpufreq_register_driver(struct cpufreq_driver *driver_data)
1909 {
1910 	unsigned long flags;
1911 	int ret;
1912 
1913 	if (!driver_data || !driver_data->verify || !driver_data->init ||
1914 	    ((!driver_data->setpolicy) && (!driver_data->target)))
1915 		return -EINVAL;
1916 
1917 	dprintk("trying to register driver %s\n", driver_data->name);
1918 
1919 	if (driver_data->setpolicy)
1920 		driver_data->flags |= CPUFREQ_CONST_LOOPS;
1921 
1922 	spin_lock_irqsave(&cpufreq_driver_lock, flags);
1923 	if (cpufreq_driver) {
1924 		spin_unlock_irqrestore(&cpufreq_driver_lock, flags);
1925 		return -EBUSY;
1926 	}
1927 	cpufreq_driver = driver_data;
1928 	spin_unlock_irqrestore(&cpufreq_driver_lock, flags);
1929 
1930 	ret = sysdev_driver_register(&cpu_sysdev_class,
1931 					&cpufreq_sysdev_driver);
1932 
1933 	if ((!ret) && !(cpufreq_driver->flags & CPUFREQ_STICKY)) {
1934 		int i;
1935 		ret = -ENODEV;
1936 
1937 		/* check for at least one working CPU */
1938 		for (i = 0; i < nr_cpu_ids; i++)
1939 			if (cpu_possible(i) && per_cpu(cpufreq_cpu_data, i)) {
1940 				ret = 0;
1941 				break;
1942 			}
1943 
1944 		/* if all ->init() calls failed, unregister */
1945 		if (ret) {
1946 			dprintk("no CPU initialized for driver %s\n",
1947 							driver_data->name);
1948 			sysdev_driver_unregister(&cpu_sysdev_class,
1949 						&cpufreq_sysdev_driver);
1950 
1951 			spin_lock_irqsave(&cpufreq_driver_lock, flags);
1952 			cpufreq_driver = NULL;
1953 			spin_unlock_irqrestore(&cpufreq_driver_lock, flags);
1954 		}
1955 	}
1956 
1957 	if (!ret) {
1958 		register_hotcpu_notifier(&cpufreq_cpu_notifier);
1959 		dprintk("driver %s up and running\n", driver_data->name);
1960 		cpufreq_debug_enable_ratelimit();
1961 	}
1962 
1963 	return ret;
1964 }
1965 EXPORT_SYMBOL_GPL(cpufreq_register_driver);
1966 
1967 
1968 /**
1969  * cpufreq_unregister_driver - unregister the current CPUFreq driver
1970  *
1971  *    Unregister the current CPUFreq driver. Only call this if you have
1972  * the right to do so, i.e. if you have succeeded in initialising before!
1973  * Returns zero if successful, and -EINVAL if the cpufreq_driver is
1974  * currently not initialised.
1975  */
1976 int cpufreq_unregister_driver(struct cpufreq_driver *driver)
1977 {
1978 	unsigned long flags;
1979 
1980 	cpufreq_debug_disable_ratelimit();
1981 
1982 	if (!cpufreq_driver || (driver != cpufreq_driver)) {
1983 		cpufreq_debug_enable_ratelimit();
1984 		return -EINVAL;
1985 	}
1986 
1987 	dprintk("unregistering driver %s\n", driver->name);
1988 
1989 	sysdev_driver_unregister(&cpu_sysdev_class, &cpufreq_sysdev_driver);
1990 	unregister_hotcpu_notifier(&cpufreq_cpu_notifier);
1991 
1992 	spin_lock_irqsave(&cpufreq_driver_lock, flags);
1993 	cpufreq_driver = NULL;
1994 	spin_unlock_irqrestore(&cpufreq_driver_lock, flags);
1995 
1996 	return 0;
1997 }
1998 EXPORT_SYMBOL_GPL(cpufreq_unregister_driver);
1999 
2000 static int __init cpufreq_core_init(void)
2001 {
2002 	int cpu;
2003 
2004 	for_each_possible_cpu(cpu) {
2005 		per_cpu(cpufreq_policy_cpu, cpu) = -1;
2006 		init_rwsem(&per_cpu(cpu_policy_rwsem, cpu));
2007 	}
2008 
2009 	cpufreq_global_kobject = kobject_create_and_add("cpufreq",
2010 						&cpu_sysdev_class.kset.kobj);
2011 	BUG_ON(!cpufreq_global_kobject);
2012 
2013 	return 0;
2014 }
2015 core_initcall(cpufreq_core_init);
2016