cpufreq.c (5b929bd11df23922daf1be5d52731cc3900c1d79) cpufreq.c (0b275352872b2641ed5c94d0f0f8c7e907bf3e3f)
1/*
2 * linux/drivers/cpufreq/cpufreq.c
3 *
4 * Copyright (C) 2001 Russell King
5 * (C) 2002 - 2003 Dominik Brodowski <linux@brodo.de>
6 * (C) 2013 Viresh Kumar <viresh.kumar@linaro.org>
7 *
8 * Oct 2005 - Ashok Raj <ashok.raj@intel.com>

--- 98 unchanged lines hidden (view full) ---

107/* Flag to suspend/resume CPUFreq governors */
108static bool cpufreq_suspended;
109
110static inline bool has_target(void)
111{
112 return cpufreq_driver->target_index || cpufreq_driver->target;
113}
114
1/*
2 * linux/drivers/cpufreq/cpufreq.c
3 *
4 * Copyright (C) 2001 Russell King
5 * (C) 2002 - 2003 Dominik Brodowski <linux@brodo.de>
6 * (C) 2013 Viresh Kumar <viresh.kumar@linaro.org>
7 *
8 * Oct 2005 - Ashok Raj <ashok.raj@intel.com>

--- 98 unchanged lines hidden (view full) ---

107/* Flag to suspend/resume CPUFreq governors */
108static bool cpufreq_suspended;
109
110static inline bool has_target(void)
111{
112 return cpufreq_driver->target_index || cpufreq_driver->target;
113}
114
115/*
116 * rwsem to guarantee that cpufreq driver module doesn't unload during critical
117 * sections
118 */
119static DECLARE_RWSEM(cpufreq_rwsem);
120
121/* internal prototypes */
122static int __cpufreq_governor(struct cpufreq_policy *policy,
123 unsigned int event);
124static unsigned int __cpufreq_get(struct cpufreq_policy *policy);
125static void handle_update(struct work_struct *work);
126
127/**
128 * Two notifier lists: the "policy" list is involved in the

--- 143 unchanged lines hidden (view full) ---

272 * @cpu: cpu to find policy for.
273 *
274 * This returns policy for 'cpu', returns NULL if it doesn't exist.
275 * It also increments the kobject reference count to mark it busy and so would
276 * require a corresponding call to cpufreq_cpu_put() to decrement it back.
277 * If corresponding call cpufreq_cpu_put() isn't made, the policy wouldn't be
278 * freed as that depends on the kobj count.
279 *
115/* internal prototypes */
116static int __cpufreq_governor(struct cpufreq_policy *policy,
117 unsigned int event);
118static unsigned int __cpufreq_get(struct cpufreq_policy *policy);
119static void handle_update(struct work_struct *work);
120
121/**
122 * Two notifier lists: the "policy" list is involved in the

--- 143 unchanged lines hidden (view full) ---

266 * @cpu: cpu to find policy for.
267 *
268 * This returns policy for 'cpu', returns NULL if it doesn't exist.
269 * It also increments the kobject reference count to mark it busy and so would
270 * require a corresponding call to cpufreq_cpu_put() to decrement it back.
271 * If corresponding call cpufreq_cpu_put() isn't made, the policy wouldn't be
272 * freed as that depends on the kobj count.
273 *
280 * It also takes a read-lock of 'cpufreq_rwsem' and doesn't put it back if a
281 * valid policy is found. This is done to make sure the driver doesn't get
282 * unregistered while the policy is being used.
283 *
284 * Return: A valid policy on success, otherwise NULL on failure.
285 */
286struct cpufreq_policy *cpufreq_cpu_get(unsigned int cpu)
287{
288 struct cpufreq_policy *policy = NULL;
289 unsigned long flags;
290
291 if (WARN_ON(cpu >= nr_cpu_ids))
292 return NULL;
293
274 * Return: A valid policy on success, otherwise NULL on failure.
275 */
276struct cpufreq_policy *cpufreq_cpu_get(unsigned int cpu)
277{
278 struct cpufreq_policy *policy = NULL;
279 unsigned long flags;
280
281 if (WARN_ON(cpu >= nr_cpu_ids))
282 return NULL;
283
294 if (!down_read_trylock(&cpufreq_rwsem))
295 return NULL;
296
297 /* get the cpufreq driver */
298 read_lock_irqsave(&cpufreq_driver_lock, flags);
299
300 if (cpufreq_driver) {
301 /* get the CPU */
302 policy = cpufreq_cpu_get_raw(cpu);
303 if (policy)
304 kobject_get(&policy->kobj);
305 }
306
307 read_unlock_irqrestore(&cpufreq_driver_lock, flags);
308
284 /* get the cpufreq driver */
285 read_lock_irqsave(&cpufreq_driver_lock, flags);
286
287 if (cpufreq_driver) {
288 /* get the CPU */
289 policy = cpufreq_cpu_get_raw(cpu);
290 if (policy)
291 kobject_get(&policy->kobj);
292 }
293
294 read_unlock_irqrestore(&cpufreq_driver_lock, flags);
295
309 if (!policy)
310 up_read(&cpufreq_rwsem);
311
312 return policy;
313}
314EXPORT_SYMBOL_GPL(cpufreq_cpu_get);
315
316/**
317 * cpufreq_cpu_put: Decrements the usage count of a policy
318 *
319 * @policy: policy earlier returned by cpufreq_cpu_get().
320 *
321 * This decrements the kobject reference count incremented earlier by calling
322 * cpufreq_cpu_get().
296 return policy;
297}
298EXPORT_SYMBOL_GPL(cpufreq_cpu_get);
299
300/**
301 * cpufreq_cpu_put: Decrements the usage count of a policy
302 *
303 * @policy: policy earlier returned by cpufreq_cpu_get().
304 *
305 * This decrements the kobject reference count incremented earlier by calling
306 * cpufreq_cpu_get().
323 *
324 * It also drops the read-lock of 'cpufreq_rwsem' taken at cpufreq_cpu_get().
325 */
326void cpufreq_cpu_put(struct cpufreq_policy *policy)
327{
328 kobject_put(&policy->kobj);
307 */
308void cpufreq_cpu_put(struct cpufreq_policy *policy)
309{
310 kobject_put(&policy->kobj);
329 up_read(&cpufreq_rwsem);
330}
331EXPORT_SYMBOL_GPL(cpufreq_cpu_put);
332
333/*********************************************************************
334 * EXTERNALLY AFFECTING FREQUENCY CHANGES *
335 *********************************************************************/
336
337/**

--- 508 unchanged lines hidden (view full) ---

846#define to_attr(a) container_of(a, struct freq_attr, attr)
847
848static ssize_t show(struct kobject *kobj, struct attribute *attr, char *buf)
849{
850 struct cpufreq_policy *policy = to_policy(kobj);
851 struct freq_attr *fattr = to_attr(attr);
852 ssize_t ret;
853
311}
312EXPORT_SYMBOL_GPL(cpufreq_cpu_put);
313
314/*********************************************************************
315 * EXTERNALLY AFFECTING FREQUENCY CHANGES *
316 *********************************************************************/
317
318/**

--- 508 unchanged lines hidden (view full) ---

827#define to_attr(a) container_of(a, struct freq_attr, attr)
828
829static ssize_t show(struct kobject *kobj, struct attribute *attr, char *buf)
830{
831 struct cpufreq_policy *policy = to_policy(kobj);
832 struct freq_attr *fattr = to_attr(attr);
833 ssize_t ret;
834
854 if (!down_read_trylock(&cpufreq_rwsem))
855 return -EINVAL;
856
857 down_read(&policy->rwsem);
858
859 if (fattr->show)
860 ret = fattr->show(policy, buf);
861 else
862 ret = -EIO;
863
864 up_read(&policy->rwsem);
835 down_read(&policy->rwsem);
836
837 if (fattr->show)
838 ret = fattr->show(policy, buf);
839 else
840 ret = -EIO;
841
842 up_read(&policy->rwsem);
865 up_read(&cpufreq_rwsem);
866
867 return ret;
868}
869
870static ssize_t store(struct kobject *kobj, struct attribute *attr,
871 const char *buf, size_t count)
872{
873 struct cpufreq_policy *policy = to_policy(kobj);
874 struct freq_attr *fattr = to_attr(attr);
875 ssize_t ret = -EINVAL;
876
877 get_online_cpus();
878
879 if (!cpu_online(policy->cpu))
880 goto unlock;
881
843
844 return ret;
845}
846
847static ssize_t store(struct kobject *kobj, struct attribute *attr,
848 const char *buf, size_t count)
849{
850 struct cpufreq_policy *policy = to_policy(kobj);
851 struct freq_attr *fattr = to_attr(attr);
852 ssize_t ret = -EINVAL;
853
854 get_online_cpus();
855
856 if (!cpu_online(policy->cpu))
857 goto unlock;
858
882 if (!down_read_trylock(&cpufreq_rwsem))
883 goto unlock;
884
885 down_write(&policy->rwsem);
886
887 /* Updating inactive policies is invalid, so avoid doing that. */
888 if (unlikely(policy_is_inactive(policy))) {
889 ret = -EBUSY;
890 goto unlock_policy_rwsem;
891 }
892
893 if (fattr->store)
894 ret = fattr->store(policy, buf, count);
895 else
896 ret = -EIO;
897
898unlock_policy_rwsem:
899 up_write(&policy->rwsem);
859 down_write(&policy->rwsem);
860
861 /* Updating inactive policies is invalid, so avoid doing that. */
862 if (unlikely(policy_is_inactive(policy))) {
863 ret = -EBUSY;
864 goto unlock_policy_rwsem;
865 }
866
867 if (fattr->store)
868 ret = fattr->store(policy, buf, count);
869 else
870 ret = -EIO;
871
872unlock_policy_rwsem:
873 up_write(&policy->rwsem);
900
901 up_read(&cpufreq_rwsem);
902unlock:
903 put_online_cpus();
904
905 return ret;
906}
907
908static void cpufreq_sysfs_release(struct kobject *kobj)
909{

--- 87 unchanged lines hidden (view full) ---

997
998/* Add/remove symlinks for all related CPUs */
999static int cpufreq_add_dev_symlink(struct cpufreq_policy *policy)
1000{
1001 unsigned int j;
1002 int ret = 0;
1003
1004 /* Some related CPUs might not be present (physically hotplugged) */
874unlock:
875 put_online_cpus();
876
877 return ret;
878}
879
880static void cpufreq_sysfs_release(struct kobject *kobj)
881{

--- 87 unchanged lines hidden (view full) ---

969
970/* Add/remove symlinks for all related CPUs */
971static int cpufreq_add_dev_symlink(struct cpufreq_policy *policy)
972{
973 unsigned int j;
974 int ret = 0;
975
976 /* Some related CPUs might not be present (physically hotplugged) */
1005 for_each_cpu_and(j, policy->related_cpus, cpu_present_mask) {
977 for_each_cpu(j, policy->real_cpus) {
1006 if (j == policy->kobj_cpu)
1007 continue;
1008
1009 ret = add_cpu_dev_symlink(policy, j);
1010 if (ret)
1011 break;
1012 }
1013
1014 return ret;
1015}
1016
1017static void cpufreq_remove_dev_symlink(struct cpufreq_policy *policy)
1018{
1019 unsigned int j;
1020
1021 /* Some related CPUs might not be present (physically hotplugged) */
978 if (j == policy->kobj_cpu)
979 continue;
980
981 ret = add_cpu_dev_symlink(policy, j);
982 if (ret)
983 break;
984 }
985
986 return ret;
987}
988
989static void cpufreq_remove_dev_symlink(struct cpufreq_policy *policy)
990{
991 unsigned int j;
992
993 /* Some related CPUs might not be present (physically hotplugged) */
1022 for_each_cpu_and(j, policy->related_cpus, cpu_present_mask) {
994 for_each_cpu(j, policy->real_cpus) {
1023 if (j == policy->kobj_cpu)
1024 continue;
1025
1026 remove_cpu_dev_symlink(policy, j);
1027 }
1028}
1029
995 if (j == policy->kobj_cpu)
996 continue;
997
998 remove_cpu_dev_symlink(policy, j);
999 }
1000}
1001
1030static int cpufreq_add_dev_interface(struct cpufreq_policy *policy,
1031 struct device *dev)
1002static int cpufreq_add_dev_interface(struct cpufreq_policy *policy)
1032{
1033 struct freq_attr **drv_attr;
1034 int ret = 0;
1035
1036 /* set up files for this cpu device */
1037 drv_attr = cpufreq_driver->attr;
1038 while (drv_attr && *drv_attr) {
1039 ret = sysfs_create_file(&policy->kobj, &((*drv_attr)->attr));

--- 15 unchanged lines hidden (view full) ---

1055 ret = sysfs_create_file(&policy->kobj, &bios_limit.attr);
1056 if (ret)
1057 return ret;
1058 }
1059
1060 return cpufreq_add_dev_symlink(policy);
1061}
1062
1003{
1004 struct freq_attr **drv_attr;
1005 int ret = 0;
1006
1007 /* set up files for this cpu device */
1008 drv_attr = cpufreq_driver->attr;
1009 while (drv_attr && *drv_attr) {
1010 ret = sysfs_create_file(&policy->kobj, &((*drv_attr)->attr));

--- 15 unchanged lines hidden (view full) ---

1026 ret = sysfs_create_file(&policy->kobj, &bios_limit.attr);
1027 if (ret)
1028 return ret;
1029 }
1030
1031 return cpufreq_add_dev_symlink(policy);
1032}
1033
1063static void cpufreq_init_policy(struct cpufreq_policy *policy)
1034static int cpufreq_init_policy(struct cpufreq_policy *policy)
1064{
1065 struct cpufreq_governor *gov = NULL;
1066 struct cpufreq_policy new_policy;
1035{
1036 struct cpufreq_governor *gov = NULL;
1037 struct cpufreq_policy new_policy;
1067 int ret = 0;
1068
1069 memcpy(&new_policy, policy, sizeof(*policy));
1070
1071 /* Update governor of new_policy to the governor used before hotplug */
1072 gov = find_governor(policy->last_governor);
1073 if (gov)
1074 pr_debug("Restoring governor %s for cpu %d\n",
1075 policy->governor->name, policy->cpu);
1076 else
1077 gov = CPUFREQ_DEFAULT_GOVERNOR;
1078
1079 new_policy.governor = gov;
1080
1081 /* Use the default policy if its valid. */
1082 if (cpufreq_driver->setpolicy)
1083 cpufreq_parse_governor(gov->name, &new_policy.policy, NULL);
1084
1085 /* set default policy */
1038
1039 memcpy(&new_policy, policy, sizeof(*policy));
1040
1041 /* Update governor of new_policy to the governor used before hotplug */
1042 gov = find_governor(policy->last_governor);
1043 if (gov)
1044 pr_debug("Restoring governor %s for cpu %d\n",
1045 policy->governor->name, policy->cpu);
1046 else
1047 gov = CPUFREQ_DEFAULT_GOVERNOR;
1048
1049 new_policy.governor = gov;
1050
1051 /* Use the default policy if its valid. */
1052 if (cpufreq_driver->setpolicy)
1053 cpufreq_parse_governor(gov->name, &new_policy.policy, NULL);
1054
1055 /* set default policy */
1086 ret = cpufreq_set_policy(policy, &new_policy);
1087 if (ret) {
1088 pr_debug("setting policy failed\n");
1089 if (cpufreq_driver->exit)
1090 cpufreq_driver->exit(policy);
1091 }
1056 return cpufreq_set_policy(policy, &new_policy);
1092}
1093
1057}
1058
1094static int cpufreq_add_policy_cpu(struct cpufreq_policy *policy,
1095 unsigned int cpu, struct device *dev)
1059static int cpufreq_add_policy_cpu(struct cpufreq_policy *policy, unsigned int cpu)
1096{
1097 int ret = 0;
1098
1099 /* Has this CPU been taken care of already? */
1100 if (cpumask_test_cpu(cpu, policy->cpus))
1101 return 0;
1102
1103 if (has_target()) {

--- 17 unchanged lines hidden (view full) ---

1121 pr_err("%s: Failed to start governor\n", __func__);
1122 return ret;
1123 }
1124 }
1125
1126 return 0;
1127}
1128
1060{
1061 int ret = 0;
1062
1063 /* Has this CPU been taken care of already? */
1064 if (cpumask_test_cpu(cpu, policy->cpus))
1065 return 0;
1066
1067 if (has_target()) {

--- 17 unchanged lines hidden (view full) ---

1085 pr_err("%s: Failed to start governor\n", __func__);
1086 return ret;
1087 }
1088 }
1089
1090 return 0;
1091}
1092
1129static struct cpufreq_policy *cpufreq_policy_restore(unsigned int cpu)
1093static struct cpufreq_policy *cpufreq_policy_alloc(unsigned int cpu)
1130{
1094{
1095 struct device *dev = get_cpu_device(cpu);
1131 struct cpufreq_policy *policy;
1096 struct cpufreq_policy *policy;
1132 unsigned long flags;
1133
1134 read_lock_irqsave(&cpufreq_driver_lock, flags);
1135 policy = per_cpu(cpufreq_cpu_data, cpu);
1136 read_unlock_irqrestore(&cpufreq_driver_lock, flags);
1137
1138 if (likely(policy)) {
1139 /* Policy should be inactive here */
1140 WARN_ON(!policy_is_inactive(policy));
1141
1142 down_write(&policy->rwsem);
1143 policy->cpu = cpu;
1144 policy->governor = NULL;
1145 up_write(&policy->rwsem);
1146 }
1147
1148 return policy;
1149}
1150
1151static struct cpufreq_policy *cpufreq_policy_alloc(struct device *dev)
1152{
1153 struct cpufreq_policy *policy;
1154 int ret;
1155
1097 int ret;
1098
1099 if (WARN_ON(!dev))
1100 return NULL;
1101
1156 policy = kzalloc(sizeof(*policy), GFP_KERNEL);
1157 if (!policy)
1158 return NULL;
1159
1160 if (!alloc_cpumask_var(&policy->cpus, GFP_KERNEL))
1161 goto err_free_policy;
1162
1163 if (!zalloc_cpumask_var(&policy->related_cpus, GFP_KERNEL))
1164 goto err_free_cpumask;
1165
1102 policy = kzalloc(sizeof(*policy), GFP_KERNEL);
1103 if (!policy)
1104 return NULL;
1105
1106 if (!alloc_cpumask_var(&policy->cpus, GFP_KERNEL))
1107 goto err_free_policy;
1108
1109 if (!zalloc_cpumask_var(&policy->related_cpus, GFP_KERNEL))
1110 goto err_free_cpumask;
1111
1112 if (!zalloc_cpumask_var(&policy->real_cpus, GFP_KERNEL))
1113 goto err_free_rcpumask;
1114
1166 ret = kobject_init_and_add(&policy->kobj, &ktype_cpufreq, &dev->kobj,
1167 "cpufreq");
1168 if (ret) {
1169 pr_err("%s: failed to init policy->kobj: %d\n", __func__, ret);
1115 ret = kobject_init_and_add(&policy->kobj, &ktype_cpufreq, &dev->kobj,
1116 "cpufreq");
1117 if (ret) {
1118 pr_err("%s: failed to init policy->kobj: %d\n", __func__, ret);
1170 goto err_free_rcpumask;
1119 goto err_free_real_cpus;
1171 }
1172
1173 INIT_LIST_HEAD(&policy->policy_list);
1174 init_rwsem(&policy->rwsem);
1175 spin_lock_init(&policy->transition_lock);
1176 init_waitqueue_head(&policy->transition_wait);
1177 init_completion(&policy->kobj_unregister);
1178 INIT_WORK(&policy->update, handle_update);
1179
1120 }
1121
1122 INIT_LIST_HEAD(&policy->policy_list);
1123 init_rwsem(&policy->rwsem);
1124 spin_lock_init(&policy->transition_lock);
1125 init_waitqueue_head(&policy->transition_wait);
1126 init_completion(&policy->kobj_unregister);
1127 INIT_WORK(&policy->update, handle_update);
1128
1180 policy->cpu = dev->id;
1129 policy->cpu = cpu;
1181
1182 /* Set this once on allocation */
1130
1131 /* Set this once on allocation */
1183 policy->kobj_cpu = dev->id;
1132 policy->kobj_cpu = cpu;
1184
1185 return policy;
1186
1133
1134 return policy;
1135
1136err_free_real_cpus:
1137 free_cpumask_var(policy->real_cpus);
1187err_free_rcpumask:
1188 free_cpumask_var(policy->related_cpus);
1189err_free_cpumask:
1190 free_cpumask_var(policy->cpus);
1191err_free_policy:
1192 kfree(policy);
1193
1194 return NULL;

--- 34 unchanged lines hidden (view full) ---

1229 write_lock_irqsave(&cpufreq_driver_lock, flags);
1230 list_del(&policy->policy_list);
1231
1232 for_each_cpu(cpu, policy->related_cpus)
1233 per_cpu(cpufreq_cpu_data, cpu) = NULL;
1234 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
1235
1236 cpufreq_policy_put_kobj(policy, notify);
1138err_free_rcpumask:
1139 free_cpumask_var(policy->related_cpus);
1140err_free_cpumask:
1141 free_cpumask_var(policy->cpus);
1142err_free_policy:
1143 kfree(policy);
1144
1145 return NULL;

--- 34 unchanged lines hidden (view full) ---

1180 write_lock_irqsave(&cpufreq_driver_lock, flags);
1181 list_del(&policy->policy_list);
1182
1183 for_each_cpu(cpu, policy->related_cpus)
1184 per_cpu(cpufreq_cpu_data, cpu) = NULL;
1185 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
1186
1187 cpufreq_policy_put_kobj(policy, notify);
1188 free_cpumask_var(policy->real_cpus);
1237 free_cpumask_var(policy->related_cpus);
1238 free_cpumask_var(policy->cpus);
1239 kfree(policy);
1240}
1241
1189 free_cpumask_var(policy->related_cpus);
1190 free_cpumask_var(policy->cpus);
1191 kfree(policy);
1192}
1193
1242/**
1243 * cpufreq_add_dev - add a CPU device
1244 *
1245 * Adds the cpufreq interface for a CPU device.
1246 *
1247 * The Oracle says: try running cpufreq registration/unregistration concurrently
1248 * with with cpu hotplugging and all hell will break loose. Tried to clean this
1249 * mess up, but more thorough testing is needed. - Mathieu
1250 */
1251static int cpufreq_add_dev(struct device *dev, struct subsys_interface *sif)
1194static int cpufreq_online(unsigned int cpu)
1252{
1195{
1253 unsigned int j, cpu = dev->id;
1254 int ret = -ENOMEM;
1255 struct cpufreq_policy *policy;
1196 struct cpufreq_policy *policy;
1197 bool recover_policy;
1256 unsigned long flags;
1198 unsigned long flags;
1257 bool recover_policy = !sif;
1199 unsigned int j;
1200 int ret;
1258
1201
1259 pr_debug("adding CPU %u\n", cpu);
1202 pr_debug("%s: bringing CPU%u online\n", __func__, cpu);
1260
1203
1261 /*
1262 * Only possible if 'cpu' wasn't physically present earlier and we are
1263 * here from subsys_interface add callback. A hotplug notifier will
1264 * follow and we will handle it like logical CPU hotplug then. For now,
1265 * just create the sysfs link.
1266 */
1267 if (cpu_is_offline(cpu))
1268 return add_cpu_dev_symlink(per_cpu(cpufreq_cpu_data, cpu), cpu);
1269
1270 if (!down_read_trylock(&cpufreq_rwsem))
1271 return 0;
1272
1273 /* Check if this CPU already has a policy to manage it */
1274 policy = per_cpu(cpufreq_cpu_data, cpu);
1204 /* Check if this CPU already has a policy to manage it */
1205 policy = per_cpu(cpufreq_cpu_data, cpu);
1275 if (policy && !policy_is_inactive(policy)) {
1206 if (policy) {
1276 WARN_ON(!cpumask_test_cpu(cpu, policy->related_cpus));
1207 WARN_ON(!cpumask_test_cpu(cpu, policy->related_cpus));
1277 ret = cpufreq_add_policy_cpu(policy, cpu, dev);
1278 up_read(&cpufreq_rwsem);
1279 return ret;
1280 }
1208 if (!policy_is_inactive(policy))
1209 return cpufreq_add_policy_cpu(policy, cpu);
1281
1210
1282 /*
1283 * Restore the saved policy when doing light-weight init and fall back
1284 * to the full init if that fails.
1285 */
1286 policy = recover_policy ? cpufreq_policy_restore(cpu) : NULL;
1287 if (!policy) {
1211 /* This is the only online CPU for the policy. Start over. */
1212 recover_policy = true;
1213 down_write(&policy->rwsem);
1214 policy->cpu = cpu;
1215 policy->governor = NULL;
1216 up_write(&policy->rwsem);
1217 } else {
1288 recover_policy = false;
1218 recover_policy = false;
1289 policy = cpufreq_policy_alloc(dev);
1219 policy = cpufreq_policy_alloc(cpu);
1290 if (!policy)
1220 if (!policy)
1291 goto nomem_out;
1221 return -ENOMEM;
1292 }
1293
1294 cpumask_copy(policy->cpus, cpumask_of(cpu));
1295
1296 /* call driver. From then on the cpufreq must be able
1297 * to accept all calls to ->verify and ->setpolicy for this CPU
1298 */
1299 ret = cpufreq_driver->init(policy);
1300 if (ret) {
1301 pr_debug("initialization failed\n");
1222 }
1223
1224 cpumask_copy(policy->cpus, cpumask_of(cpu));
1225
1226 /* call driver. From then on the cpufreq must be able
1227 * to accept all calls to ->verify and ->setpolicy for this CPU
1228 */
1229 ret = cpufreq_driver->init(policy);
1230 if (ret) {
1231 pr_debug("initialization failed\n");
1302 goto err_set_policy_cpu;
1232 goto out_free_policy;
1303 }
1304
1305 down_write(&policy->rwsem);
1306
1233 }
1234
1235 down_write(&policy->rwsem);
1236
1307 /* related cpus should atleast have policy->cpus */
1308 cpumask_or(policy->related_cpus, policy->related_cpus, policy->cpus);
1237 if (!recover_policy) {
1238 /* related_cpus should at least include policy->cpus. */
1239 cpumask_or(policy->related_cpus, policy->related_cpus, policy->cpus);
1240 /* Remember CPUs present at the policy creation time. */
1241 cpumask_and(policy->real_cpus, policy->cpus, cpu_present_mask);
1242 }
1309
1310 /*
1311 * affected cpus must always be the one, which are online. We aren't
1312 * managing offline cpus here.
1313 */
1314 cpumask_and(policy->cpus, policy->cpus, cpu_online_mask);
1315
1316 if (!recover_policy) {

--- 5 unchanged lines hidden (view full) ---

1322 per_cpu(cpufreq_cpu_data, j) = policy;
1323 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
1324 }
1325
1326 if (cpufreq_driver->get && !cpufreq_driver->setpolicy) {
1327 policy->cur = cpufreq_driver->get(policy->cpu);
1328 if (!policy->cur) {
1329 pr_err("%s: ->get() failed\n", __func__);
1243
1244 /*
1245 * affected cpus must always be the one, which are online. We aren't
1246 * managing offline cpus here.
1247 */
1248 cpumask_and(policy->cpus, policy->cpus, cpu_online_mask);
1249
1250 if (!recover_policy) {

--- 5 unchanged lines hidden (view full) ---

1256 per_cpu(cpufreq_cpu_data, j) = policy;
1257 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
1258 }
1259
1260 if (cpufreq_driver->get && !cpufreq_driver->setpolicy) {
1261 policy->cur = cpufreq_driver->get(policy->cpu);
1262 if (!policy->cur) {
1263 pr_err("%s: ->get() failed\n", __func__);
1330 goto err_get_freq;
1264 goto out_exit_policy;
1331 }
1332 }
1333
1334 /*
1335 * Sometimes boot loaders set CPU frequency to a value outside of
1336 * frequency table present with cpufreq core. In such cases CPU might be
1337 * unstable if it has to run on that frequency for long duration of time
1338 * and so its better to set it to a frequency which is specified in

--- 31 unchanged lines hidden (view full) ---

1370 __func__, policy->cpu, policy->cur);
1371 }
1372 }
1373
1374 blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
1375 CPUFREQ_START, policy);
1376
1377 if (!recover_policy) {
1265 }
1266 }
1267
1268 /*
1269 * Sometimes boot loaders set CPU frequency to a value outside of
1270 * frequency table present with cpufreq core. In such cases CPU might be
1271 * unstable if it has to run on that frequency for long duration of time
1272 * and so its better to set it to a frequency which is specified in

--- 31 unchanged lines hidden (view full) ---

1304 __func__, policy->cpu, policy->cur);
1305 }
1306 }
1307
1308 blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
1309 CPUFREQ_START, policy);
1310
1311 if (!recover_policy) {
1378 ret = cpufreq_add_dev_interface(policy, dev);
1312 ret = cpufreq_add_dev_interface(policy);
1379 if (ret)
1313 if (ret)
1380 goto err_out_unregister;
1314 goto out_exit_policy;
1381 blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
1382 CPUFREQ_CREATE_POLICY, policy);
1383
1384 write_lock_irqsave(&cpufreq_driver_lock, flags);
1385 list_add(&policy->policy_list, &cpufreq_policy_list);
1386 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
1387 }
1388
1315 blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
1316 CPUFREQ_CREATE_POLICY, policy);
1317
1318 write_lock_irqsave(&cpufreq_driver_lock, flags);
1319 list_add(&policy->policy_list, &cpufreq_policy_list);
1320 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
1321 }
1322
1389 cpufreq_init_policy(policy);
1323 ret = cpufreq_init_policy(policy);
1324 if (ret) {
1325 pr_err("%s: Failed to initialize policy for cpu: %d (%d)\n",
1326 __func__, cpu, ret);
1327 goto out_remove_policy_notify;
1328 }
1390
1391 if (!recover_policy) {
1392 policy->user_policy.policy = policy->policy;
1393 policy->user_policy.governor = policy->governor;
1394 }
1395 up_write(&policy->rwsem);
1396
1397 kobject_uevent(&policy->kobj, KOBJ_ADD);
1398
1329
1330 if (!recover_policy) {
1331 policy->user_policy.policy = policy->policy;
1332 policy->user_policy.governor = policy->governor;
1333 }
1334 up_write(&policy->rwsem);
1335
1336 kobject_uevent(&policy->kobj, KOBJ_ADD);
1337
1399 up_read(&cpufreq_rwsem);
1400
1401 /* Callback for handling stuff after policy is ready */
1402 if (cpufreq_driver->ready)
1403 cpufreq_driver->ready(policy);
1404
1405 pr_debug("initialization complete\n");
1406
1407 return 0;
1408
1338 /* Callback for handling stuff after policy is ready */
1339 if (cpufreq_driver->ready)
1340 cpufreq_driver->ready(policy);
1341
1342 pr_debug("initialization complete\n");
1343
1344 return 0;
1345
1409err_out_unregister:
1410err_get_freq:
1346out_remove_policy_notify:
1347 /* cpufreq_policy_free() will notify based on this */
1348 recover_policy = true;
1349out_exit_policy:
1411 up_write(&policy->rwsem);
1412
1413 if (cpufreq_driver->exit)
1414 cpufreq_driver->exit(policy);
1350 up_write(&policy->rwsem);
1351
1352 if (cpufreq_driver->exit)
1353 cpufreq_driver->exit(policy);
1415err_set_policy_cpu:
1354out_free_policy:
1416 cpufreq_policy_free(policy, recover_policy);
1355 cpufreq_policy_free(policy, recover_policy);
1417nomem_out:
1418 up_read(&cpufreq_rwsem);
1356 return ret;
1357}
1419
1358
1359/**
1360 * cpufreq_add_dev - the cpufreq interface for a CPU device.
1361 * @dev: CPU device.
1362 * @sif: Subsystem interface structure pointer (not used)
1363 */
1364static int cpufreq_add_dev(struct device *dev, struct subsys_interface *sif)
1365{
1366 unsigned cpu = dev->id;
1367 int ret;
1368
1369 dev_dbg(dev, "%s: adding CPU%u\n", __func__, cpu);
1370
1371 if (cpu_online(cpu)) {
1372 ret = cpufreq_online(cpu);
1373 } else {
1374 /*
1375 * A hotplug notifier will follow and we will handle it as CPU
1376 * online then. For now, just create the sysfs link, unless
1377 * there is no policy or the link is already present.
1378 */
1379 struct cpufreq_policy *policy = per_cpu(cpufreq_cpu_data, cpu);
1380
1381 ret = policy && !cpumask_test_and_set_cpu(cpu, policy->real_cpus)
1382 ? add_cpu_dev_symlink(policy, cpu) : 0;
1383 }
1384
1420 return ret;
1421}
1422
1385 return ret;
1386}
1387
1423static int __cpufreq_remove_dev_prepare(struct device *dev,
1424 struct subsys_interface *sif)
1388static void cpufreq_offline_prepare(unsigned int cpu)
1425{
1389{
1426 unsigned int cpu = dev->id;
1427 int ret = 0;
1428 struct cpufreq_policy *policy;
1429
1430 pr_debug("%s: unregistering CPU %u\n", __func__, cpu);
1431
1432 policy = cpufreq_cpu_get_raw(cpu);
1433 if (!policy) {
1434 pr_debug("%s: No cpu_data found\n", __func__);
1390 struct cpufreq_policy *policy;
1391
1392 pr_debug("%s: unregistering CPU %u\n", __func__, cpu);
1393
1394 policy = cpufreq_cpu_get_raw(cpu);
1395 if (!policy) {
1396 pr_debug("%s: No cpu_data found\n", __func__);
1435 return -EINVAL;
1397 return;
1436 }
1437
1438 if (has_target()) {
1398 }
1399
1400 if (has_target()) {
1439 ret = __cpufreq_governor(policy, CPUFREQ_GOV_STOP);
1440 if (ret) {
1401 int ret = __cpufreq_governor(policy, CPUFREQ_GOV_STOP);
1402 if (ret)
1441 pr_err("%s: Failed to stop governor\n", __func__);
1403 pr_err("%s: Failed to stop governor\n", __func__);
1442 return ret;
1443 }
1444 }
1445
1446 down_write(&policy->rwsem);
1447 cpumask_clear_cpu(cpu, policy->cpus);
1448
1449 if (policy_is_inactive(policy)) {
1450 if (has_target())
1451 strncpy(policy->last_governor, policy->governor->name,
1452 CPUFREQ_NAME_LEN);
1453 } else if (cpu == policy->cpu) {
1454 /* Nominate new CPU */
1455 policy->cpu = cpumask_any(policy->cpus);
1456 }
1457 up_write(&policy->rwsem);
1458
1459 /* Start governor again for active policy */
1460 if (!policy_is_inactive(policy)) {
1461 if (has_target()) {
1404 }
1405
1406 down_write(&policy->rwsem);
1407 cpumask_clear_cpu(cpu, policy->cpus);
1408
1409 if (policy_is_inactive(policy)) {
1410 if (has_target())
1411 strncpy(policy->last_governor, policy->governor->name,
1412 CPUFREQ_NAME_LEN);
1413 } else if (cpu == policy->cpu) {
1414 /* Nominate new CPU */
1415 policy->cpu = cpumask_any(policy->cpus);
1416 }
1417 up_write(&policy->rwsem);
1418
1419 /* Start governor again for active policy */
1420 if (!policy_is_inactive(policy)) {
1421 if (has_target()) {
1462 ret = __cpufreq_governor(policy, CPUFREQ_GOV_START);
1422 int ret = __cpufreq_governor(policy, CPUFREQ_GOV_START);
1463 if (!ret)
1464 ret = __cpufreq_governor(policy, CPUFREQ_GOV_LIMITS);
1465
1466 if (ret)
1467 pr_err("%s: Failed to start governor\n", __func__);
1468 }
1469 } else if (cpufreq_driver->stop_cpu) {
1470 cpufreq_driver->stop_cpu(policy);
1471 }
1423 if (!ret)
1424 ret = __cpufreq_governor(policy, CPUFREQ_GOV_LIMITS);
1425
1426 if (ret)
1427 pr_err("%s: Failed to start governor\n", __func__);
1428 }
1429 } else if (cpufreq_driver->stop_cpu) {
1430 cpufreq_driver->stop_cpu(policy);
1431 }
1472
1473 return ret;
1474}
1475
1432}
1433
1476static int __cpufreq_remove_dev_finish(struct device *dev,
1477 struct subsys_interface *sif)
1434static void cpufreq_offline_finish(unsigned int cpu)
1478{
1435{
1479 unsigned int cpu = dev->id;
1480 int ret;
1481 struct cpufreq_policy *policy = per_cpu(cpufreq_cpu_data, cpu);
1482
1483 if (!policy) {
1484 pr_debug("%s: No cpu_data found\n", __func__);
1436 struct cpufreq_policy *policy = per_cpu(cpufreq_cpu_data, cpu);
1437
1438 if (!policy) {
1439 pr_debug("%s: No cpu_data found\n", __func__);
1485 return -EINVAL;
1440 return;
1486 }
1487
1488 /* Only proceed for inactive policies */
1489 if (!policy_is_inactive(policy))
1441 }
1442
1443 /* Only proceed for inactive policies */
1444 if (!policy_is_inactive(policy))
1490 return 0;
1445 return;
1491
1492 /* If cpu is last user of policy, free policy */
1493 if (has_target()) {
1446
1447 /* If cpu is last user of policy, free policy */
1448 if (has_target()) {
1494 ret = __cpufreq_governor(policy, CPUFREQ_GOV_POLICY_EXIT);
1495 if (ret) {
1449 int ret = __cpufreq_governor(policy, CPUFREQ_GOV_POLICY_EXIT);
1450 if (ret)
1496 pr_err("%s: Failed to exit governor\n", __func__);
1451 pr_err("%s: Failed to exit governor\n", __func__);
1497 return ret;
1498 }
1499 }
1500
1501 /*
1502 * Perform the ->exit() even during light-weight tear-down,
1503 * since this is a core component, and is essential for the
1504 * subsequent light-weight ->init() to succeed.
1505 */
1506 if (cpufreq_driver->exit)
1507 cpufreq_driver->exit(policy);
1452 }
1453
1454 /*
1455 * Perform the ->exit() even during light-weight tear-down,
1456 * since this is a core component, and is essential for the
1457 * subsequent light-weight ->init() to succeed.
1458 */
1459 if (cpufreq_driver->exit)
1460 cpufreq_driver->exit(policy);
1508
1509 /* Free the policy only if the driver is getting removed. */
1510 if (sif)
1511 cpufreq_policy_free(policy, true);
1512
1513 return 0;
1514}
1515
1516/**
1517 * cpufreq_remove_dev - remove a CPU device
1518 *
1519 * Removes the cpufreq interface for a CPU device.
1520 */
1521static int cpufreq_remove_dev(struct device *dev, struct subsys_interface *sif)
1522{
1523 unsigned int cpu = dev->id;
1461}
1462
1463/**
1464 * cpufreq_remove_dev - remove a CPU device
1465 *
1466 * Removes the cpufreq interface for a CPU device.
1467 */
1468static int cpufreq_remove_dev(struct device *dev, struct subsys_interface *sif)
1469{
1470 unsigned int cpu = dev->id;
1524 int ret;
1471 struct cpufreq_policy *policy = per_cpu(cpufreq_cpu_data, cpu);
1525
1472
1526 /*
1527 * Only possible if 'cpu' is getting physically removed now. A hotplug
1528 * notifier should have already been called and we just need to remove
1529 * link or free policy here.
1530 */
1531 if (cpu_is_offline(cpu)) {
1532 struct cpufreq_policy *policy = per_cpu(cpufreq_cpu_data, cpu);
1533 struct cpumask mask;
1473 if (!policy)
1474 return 0;
1534
1475
1535 if (!policy)
1536 return 0;
1476 if (cpu_online(cpu)) {
1477 cpufreq_offline_prepare(cpu);
1478 cpufreq_offline_finish(cpu);
1479 }
1537
1480
1538 cpumask_copy(&mask, policy->related_cpus);
1539 cpumask_clear_cpu(cpu, &mask);
1481 cpumask_clear_cpu(cpu, policy->real_cpus);
1540
1482
1541 /*
1542 * Free policy only if all policy->related_cpus are removed
1543 * physically.
1544 */
1545 if (cpumask_intersects(&mask, cpu_present_mask)) {
1546 remove_cpu_dev_symlink(policy, cpu);
1547 return 0;
1548 }
1549
1483 if (cpumask_empty(policy->real_cpus)) {
1550 cpufreq_policy_free(policy, true);
1551 return 0;
1552 }
1553
1484 cpufreq_policy_free(policy, true);
1485 return 0;
1486 }
1487
1554 ret = __cpufreq_remove_dev_prepare(dev, sif);
1488 if (cpu != policy->kobj_cpu) {
1489 remove_cpu_dev_symlink(policy, cpu);
1490 } else {
1491 /*
1492 * The CPU owning the policy object is going away. Move it to
1493 * another suitable CPU.
1494 */
1495 unsigned int new_cpu = cpumask_first(policy->real_cpus);
1496 struct device *new_dev = get_cpu_device(new_cpu);
1555
1497
1556 if (!ret)
1557 ret = __cpufreq_remove_dev_finish(dev, sif);
1498 dev_dbg(dev, "%s: Moving policy object to CPU%u\n", __func__, new_cpu);
1558
1499
1559 return ret;
1500 sysfs_remove_link(&new_dev->kobj, "cpufreq");
1501 policy->kobj_cpu = new_cpu;
1502 WARN_ON(kobject_move(&policy->kobj, &new_dev->kobj));
1503 }
1504
1505 return 0;
1560}
1561
1562static void handle_update(struct work_struct *work)
1563{
1564 struct cpufreq_policy *policy =
1565 container_of(work, struct cpufreq_policy, update);
1566 unsigned int cpu = policy->cpu;
1567 pr_debug("handle_update for cpu %u called\n", cpu);

--- 721 unchanged lines hidden (view full) ---

2289 goto out;
2290
2291 pr_debug("governor switch\n");
2292
2293 /* save old, working values */
2294 old_gov = policy->governor;
2295 /* end old governor */
2296 if (old_gov) {
1506}
1507
1508static void handle_update(struct work_struct *work)
1509{
1510 struct cpufreq_policy *policy =
1511 container_of(work, struct cpufreq_policy, update);
1512 unsigned int cpu = policy->cpu;
1513 pr_debug("handle_update for cpu %u called\n", cpu);

--- 721 unchanged lines hidden (view full) ---

2235 goto out;
2236
2237 pr_debug("governor switch\n");
2238
2239 /* save old, working values */
2240 old_gov = policy->governor;
2241 /* end old governor */
2242 if (old_gov) {
2297 __cpufreq_governor(policy, CPUFREQ_GOV_STOP);
2243 ret = __cpufreq_governor(policy, CPUFREQ_GOV_STOP);
2244 if (ret) {
2245 /* This can happen due to race with other operations */
2246 pr_debug("%s: Failed to Stop Governor: %s (%d)\n",
2247 __func__, old_gov->name, ret);
2248 return ret;
2249 }
2250
2298 up_write(&policy->rwsem);
2251 up_write(&policy->rwsem);
2299 __cpufreq_governor(policy, CPUFREQ_GOV_POLICY_EXIT);
2252 ret = __cpufreq_governor(policy, CPUFREQ_GOV_POLICY_EXIT);
2300 down_write(&policy->rwsem);
2253 down_write(&policy->rwsem);
2254
2255 if (ret) {
2256 pr_err("%s: Failed to Exit Governor: %s (%d)\n",
2257 __func__, old_gov->name, ret);
2258 return ret;
2259 }
2301 }
2302
2303 /* start new governor */
2304 policy->governor = new_policy->governor;
2260 }
2261
2262 /* start new governor */
2263 policy->governor = new_policy->governor;
2305 if (!__cpufreq_governor(policy, CPUFREQ_GOV_POLICY_INIT)) {
2306 if (!__cpufreq_governor(policy, CPUFREQ_GOV_START))
2264 ret = __cpufreq_governor(policy, CPUFREQ_GOV_POLICY_INIT);
2265 if (!ret) {
2266 ret = __cpufreq_governor(policy, CPUFREQ_GOV_START);
2267 if (!ret)
2307 goto out;
2308
2309 up_write(&policy->rwsem);
2310 __cpufreq_governor(policy, CPUFREQ_GOV_POLICY_EXIT);
2311 down_write(&policy->rwsem);
2312 }
2313
2314 /* new governor failed, so re-start old one */
2315 pr_debug("starting governor %s failed\n", policy->governor->name);
2316 if (old_gov) {
2317 policy->governor = old_gov;
2268 goto out;
2269
2270 up_write(&policy->rwsem);
2271 __cpufreq_governor(policy, CPUFREQ_GOV_POLICY_EXIT);
2272 down_write(&policy->rwsem);
2273 }
2274
2275 /* new governor failed, so re-start old one */
2276 pr_debug("starting governor %s failed\n", policy->governor->name);
2277 if (old_gov) {
2278 policy->governor = old_gov;
2318 __cpufreq_governor(policy, CPUFREQ_GOV_POLICY_INIT);
2319 __cpufreq_governor(policy, CPUFREQ_GOV_START);
2279 if (__cpufreq_governor(policy, CPUFREQ_GOV_POLICY_INIT))
2280 policy->governor = NULL;
2281 else
2282 __cpufreq_governor(policy, CPUFREQ_GOV_START);
2320 }
2321
2283 }
2284
2322 return -EINVAL;
2285 return ret;
2323
2324 out:
2325 pr_debug("governor: change or update limits\n");
2326 return __cpufreq_governor(policy, CPUFREQ_GOV_LIMITS);
2327}
2328
2329/**
2330 * cpufreq_update_policy - re-evaluate an existing cpufreq policy

--- 49 unchanged lines hidden (view full) ---

2380 return ret;
2381}
2382EXPORT_SYMBOL(cpufreq_update_policy);
2383
2384static int cpufreq_cpu_callback(struct notifier_block *nfb,
2385 unsigned long action, void *hcpu)
2386{
2387 unsigned int cpu = (unsigned long)hcpu;
2286
2287 out:
2288 pr_debug("governor: change or update limits\n");
2289 return __cpufreq_governor(policy, CPUFREQ_GOV_LIMITS);
2290}
2291
2292/**
2293 * cpufreq_update_policy - re-evaluate an existing cpufreq policy

--- 49 unchanged lines hidden (view full) ---

2343 return ret;
2344}
2345EXPORT_SYMBOL(cpufreq_update_policy);
2346
2347static int cpufreq_cpu_callback(struct notifier_block *nfb,
2348 unsigned long action, void *hcpu)
2349{
2350 unsigned int cpu = (unsigned long)hcpu;
2388 struct device *dev;
2389
2351
2390 dev = get_cpu_device(cpu);
2391 if (dev) {
2392 switch (action & ~CPU_TASKS_FROZEN) {
2393 case CPU_ONLINE:
2394 cpufreq_add_dev(dev, NULL);
2395 break;
2352 switch (action & ~CPU_TASKS_FROZEN) {
2353 case CPU_ONLINE:
2354 cpufreq_online(cpu);
2355 break;
2396
2356
2397 case CPU_DOWN_PREPARE:
2398 __cpufreq_remove_dev_prepare(dev, NULL);
2399 break;
2357 case CPU_DOWN_PREPARE:
2358 cpufreq_offline_prepare(cpu);
2359 break;
2400
2360
2401 case CPU_POST_DEAD:
2402 __cpufreq_remove_dev_finish(dev, NULL);
2403 break;
2361 case CPU_POST_DEAD:
2362 cpufreq_offline_finish(cpu);
2363 break;
2404
2364
2405 case CPU_DOWN_FAILED:
2406 cpufreq_add_dev(dev, NULL);
2407 break;
2408 }
2365 case CPU_DOWN_FAILED:
2366 cpufreq_online(cpu);
2367 break;
2409 }
2410 return NOTIFY_OK;
2411}
2412
2413static struct notifier_block __refdata cpufreq_cpu_notifier = {
2414 .notifier_call = cpufreq_cpu_callback,
2415};
2416

--- 164 unchanged lines hidden (view full) ---

2581{
2582 unsigned long flags;
2583
2584 if (!cpufreq_driver || (driver != cpufreq_driver))
2585 return -EINVAL;
2586
2587 pr_debug("unregistering driver %s\n", driver->name);
2588
2368 }
2369 return NOTIFY_OK;
2370}
2371
2372static struct notifier_block __refdata cpufreq_cpu_notifier = {
2373 .notifier_call = cpufreq_cpu_callback,
2374};
2375

--- 164 unchanged lines hidden (view full) ---

2540{
2541 unsigned long flags;
2542
2543 if (!cpufreq_driver || (driver != cpufreq_driver))
2544 return -EINVAL;
2545
2546 pr_debug("unregistering driver %s\n", driver->name);
2547
2548 /* Protect against concurrent cpu hotplug */
2549 get_online_cpus();
2589 subsys_interface_unregister(&cpufreq_interface);
2590 if (cpufreq_boost_supported())
2591 cpufreq_sysfs_remove_file(&boost.attr);
2592
2593 unregister_hotcpu_notifier(&cpufreq_cpu_notifier);
2594
2550 subsys_interface_unregister(&cpufreq_interface);
2551 if (cpufreq_boost_supported())
2552 cpufreq_sysfs_remove_file(&boost.attr);
2553
2554 unregister_hotcpu_notifier(&cpufreq_cpu_notifier);
2555
2595 down_write(&cpufreq_rwsem);
2596 write_lock_irqsave(&cpufreq_driver_lock, flags);
2597
2598 cpufreq_driver = NULL;
2599
2600 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
2556 write_lock_irqsave(&cpufreq_driver_lock, flags);
2557
2558 cpufreq_driver = NULL;
2559
2560 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
2601 up_write(&cpufreq_rwsem);
2561 put_online_cpus();
2602
2603 return 0;
2604}
2605EXPORT_SYMBOL_GPL(cpufreq_unregister_driver);
2606
2607/*
2608 * Stop cpufreq at shutdown to make sure it isn't holding any locks
2609 * or mutexes when secondary CPUs are halted.

--- 18 unchanged lines hidden ---
2562
2563 return 0;
2564}
2565EXPORT_SYMBOL_GPL(cpufreq_unregister_driver);
2566
2567/*
2568 * Stop cpufreq at shutdown to make sure it isn't holding any locks
2569 * or mutexes when secondary CPUs are halted.

--- 18 unchanged lines hidden ---