cpufreq.c (acf403ecc4155153e5e2c1640be90fc166e56ba7) cpufreq.c (7c45cf31b3ab9be270a7bf6af2926631dc566436)
1/*
2 * linux/drivers/cpufreq/cpufreq.c
3 *
4 * Copyright (C) 2001 Russell King
5 * (C) 2002 - 2003 Dominik Brodowski <linux@brodo.de>
6 * (C) 2013 Viresh Kumar <viresh.kumar@linaro.org>
7 *
8 * Oct 2005 - Ashok Raj <ashok.raj@intel.com>

--- 521 unchanged lines hidden (view full) ---

530
531/**
532 * cpufreq_per_cpu_attr_write() / store_##file_name() - sysfs write access
533 */
534#define store_one(file_name, object) \
535static ssize_t store_##file_name \
536(struct cpufreq_policy *policy, const char *buf, size_t count) \
537{ \
1/*
2 * linux/drivers/cpufreq/cpufreq.c
3 *
4 * Copyright (C) 2001 Russell King
5 * (C) 2002 - 2003 Dominik Brodowski <linux@brodo.de>
6 * (C) 2013 Viresh Kumar <viresh.kumar@linaro.org>
7 *
8 * Oct 2005 - Ashok Raj <ashok.raj@intel.com>

--- 521 unchanged lines hidden (view full) ---

530
531/**
532 * cpufreq_per_cpu_attr_write() / store_##file_name() - sysfs write access
533 */
534#define store_one(file_name, object) \
535static ssize_t store_##file_name \
536(struct cpufreq_policy *policy, const char *buf, size_t count) \
537{ \
538 int ret; \
538 int ret, temp; \
539 struct cpufreq_policy new_policy; \
540 \
541 ret = cpufreq_get_policy(&new_policy, policy->cpu); \
542 if (ret) \
543 return -EINVAL; \
544 \
545 ret = sscanf(buf, "%u", &new_policy.object); \
546 if (ret != 1) \
547 return -EINVAL; \
548 \
539 struct cpufreq_policy new_policy; \
540 \
541 ret = cpufreq_get_policy(&new_policy, policy->cpu); \
542 if (ret) \
543 return -EINVAL; \
544 \
545 ret = sscanf(buf, "%u", &new_policy.object); \
546 if (ret != 1) \
547 return -EINVAL; \
548 \
549 temp = new_policy.object; \
549 ret = cpufreq_set_policy(policy, &new_policy); \
550 ret = cpufreq_set_policy(policy, &new_policy); \
550 policy->user_policy.object = policy->object; \
551 if (!ret) \
552 policy->user_policy.object = temp; \
551 \
552 return ret ? ret : count; \
553}
554
555store_one(scaling_min_freq, min);
556store_one(scaling_max_freq, max);
557
558/**

--- 334 unchanged lines hidden (view full) ---

893}
894
895static int cpufreq_add_dev_interface(struct cpufreq_policy *policy,
896 struct device *dev)
897{
898 struct freq_attr **drv_attr;
899 int ret = 0;
900
553 \
554 return ret ? ret : count; \
555}
556
557store_one(scaling_min_freq, min);
558store_one(scaling_max_freq, max);
559
560/**

--- 334 unchanged lines hidden (view full) ---

895}
896
897static int cpufreq_add_dev_interface(struct cpufreq_policy *policy,
898 struct device *dev)
899{
900 struct freq_attr **drv_attr;
901 int ret = 0;
902
901 /* prepare interface data */
902 ret = kobject_init_and_add(&policy->kobj, &ktype_cpufreq,
903 &dev->kobj, "cpufreq");
904 if (ret)
905 return ret;
906
907 /* set up files for this cpu device */
908 drv_attr = cpufreq_driver->attr;
909 while ((drv_attr) && (*drv_attr)) {
910 ret = sysfs_create_file(&policy->kobj, &((*drv_attr)->attr));
911 if (ret)
903 /* set up files for this cpu device */
904 drv_attr = cpufreq_driver->attr;
905 while ((drv_attr) && (*drv_attr)) {
906 ret = sysfs_create_file(&policy->kobj, &((*drv_attr)->attr));
907 if (ret)
912 goto err_out_kobj_put;
908 return ret;
913 drv_attr++;
914 }
915 if (cpufreq_driver->get) {
916 ret = sysfs_create_file(&policy->kobj, &cpuinfo_cur_freq.attr);
917 if (ret)
909 drv_attr++;
910 }
911 if (cpufreq_driver->get) {
912 ret = sysfs_create_file(&policy->kobj, &cpuinfo_cur_freq.attr);
913 if (ret)
918 goto err_out_kobj_put;
914 return ret;
919 }
920
921 ret = sysfs_create_file(&policy->kobj, &scaling_cur_freq.attr);
922 if (ret)
915 }
916
917 ret = sysfs_create_file(&policy->kobj, &scaling_cur_freq.attr);
918 if (ret)
923 goto err_out_kobj_put;
919 return ret;
924
925 if (cpufreq_driver->bios_limit) {
926 ret = sysfs_create_file(&policy->kobj, &bios_limit.attr);
927 if (ret)
920
921 if (cpufreq_driver->bios_limit) {
922 ret = sysfs_create_file(&policy->kobj, &bios_limit.attr);
923 if (ret)
928 goto err_out_kobj_put;
924 return ret;
929 }
930
925 }
926
931 ret = cpufreq_add_dev_symlink(policy);
932 if (ret)
933 goto err_out_kobj_put;
934
935 return ret;
936
937err_out_kobj_put:
938 kobject_put(&policy->kobj);
939 wait_for_completion(&policy->kobj_unregister);
940 return ret;
927 return cpufreq_add_dev_symlink(policy);
941}
942
943static void cpufreq_init_policy(struct cpufreq_policy *policy)
944{
945 struct cpufreq_governor *gov = NULL;
946 struct cpufreq_policy new_policy;
947 int ret = 0;
948

--- 242 unchanged lines hidden (view full) ---

1191 * to accept all calls to ->verify and ->setpolicy for this CPU
1192 */
1193 ret = cpufreq_driver->init(policy);
1194 if (ret) {
1195 pr_debug("initialization failed\n");
1196 goto err_set_policy_cpu;
1197 }
1198
928}
929
930static void cpufreq_init_policy(struct cpufreq_policy *policy)
931{
932 struct cpufreq_governor *gov = NULL;
933 struct cpufreq_policy new_policy;
934 int ret = 0;
935

--- 242 unchanged lines hidden (view full) ---

1178 * to accept all calls to ->verify and ->setpolicy for this CPU
1179 */
1180 ret = cpufreq_driver->init(policy);
1181 if (ret) {
1182 pr_debug("initialization failed\n");
1183 goto err_set_policy_cpu;
1184 }
1185
1186 down_write(&policy->rwsem);
1187
1199 /* related cpus should atleast have policy->cpus */
1200 cpumask_or(policy->related_cpus, policy->related_cpus, policy->cpus);
1201
1202 /*
1203 * affected cpus must always be the one, which are online. We aren't
1204 * managing offline cpus here.
1205 */
1206 cpumask_and(policy->cpus, policy->cpus, cpu_online_mask);
1207
1208 if (!recover_policy) {
1209 policy->user_policy.min = policy->min;
1210 policy->user_policy.max = policy->max;
1188 /* related cpus should atleast have policy->cpus */
1189 cpumask_or(policy->related_cpus, policy->related_cpus, policy->cpus);
1190
1191 /*
1192 * affected cpus must always be the one, which are online. We aren't
1193 * managing offline cpus here.
1194 */
1195 cpumask_and(policy->cpus, policy->cpus, cpu_online_mask);
1196
1197 if (!recover_policy) {
1198 policy->user_policy.min = policy->min;
1199 policy->user_policy.max = policy->max;
1200
1201 /* prepare interface data */
1202 ret = kobject_init_and_add(&policy->kobj, &ktype_cpufreq,
1203 &dev->kobj, "cpufreq");
1204 if (ret) {
1205 pr_err("%s: failed to init policy->kobj: %d\n",
1206 __func__, ret);
1207 goto err_init_policy_kobj;
1208 }
1211 }
1212
1209 }
1210
1213 down_write(&policy->rwsem);
1214 write_lock_irqsave(&cpufreq_driver_lock, flags);
1215 for_each_cpu(j, policy->cpus)
1216 per_cpu(cpufreq_cpu_data, j) = policy;
1217 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
1218
1219 if (cpufreq_driver->get && !cpufreq_driver->setpolicy) {
1220 policy->cur = cpufreq_driver->get(policy->cpu);
1221 if (!policy->cur) {

--- 61 unchanged lines hidden (view full) ---

1283
1284 if (!recover_policy) {
1285 policy->user_policy.policy = policy->policy;
1286 policy->user_policy.governor = policy->governor;
1287 }
1288 up_write(&policy->rwsem);
1289
1290 kobject_uevent(&policy->kobj, KOBJ_ADD);
1211 write_lock_irqsave(&cpufreq_driver_lock, flags);
1212 for_each_cpu(j, policy->cpus)
1213 per_cpu(cpufreq_cpu_data, j) = policy;
1214 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
1215
1216 if (cpufreq_driver->get && !cpufreq_driver->setpolicy) {
1217 policy->cur = cpufreq_driver->get(policy->cpu);
1218 if (!policy->cur) {

--- 61 unchanged lines hidden (view full) ---

1280
1281 if (!recover_policy) {
1282 policy->user_policy.policy = policy->policy;
1283 policy->user_policy.governor = policy->governor;
1284 }
1285 up_write(&policy->rwsem);
1286
1287 kobject_uevent(&policy->kobj, KOBJ_ADD);
1288
1291 up_read(&cpufreq_rwsem);
1292
1289 up_read(&cpufreq_rwsem);
1290
1291 /* Callback for handling stuff after policy is ready */
1292 if (cpufreq_driver->ready)
1293 cpufreq_driver->ready(policy);
1294
1293 pr_debug("initialization complete\n");
1294
1295 return 0;
1296
1297err_out_unregister:
1298err_get_freq:
1299 write_lock_irqsave(&cpufreq_driver_lock, flags);
1300 for_each_cpu(j, policy->cpus)
1301 per_cpu(cpufreq_cpu_data, j) = NULL;
1302 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
1303
1295 pr_debug("initialization complete\n");
1296
1297 return 0;
1298
1299err_out_unregister:
1300err_get_freq:
1301 write_lock_irqsave(&cpufreq_driver_lock, flags);
1302 for_each_cpu(j, policy->cpus)
1303 per_cpu(cpufreq_cpu_data, j) = NULL;
1304 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
1305
1306 if (!recover_policy) {
1307 kobject_put(&policy->kobj);
1308 wait_for_completion(&policy->kobj_unregister);
1309 }
1310err_init_policy_kobj:
1304 up_write(&policy->rwsem);
1305
1306 if (cpufreq_driver->exit)
1307 cpufreq_driver->exit(policy);
1308err_set_policy_cpu:
1309 if (recover_policy) {
1310 /* Do not leave stale fallback data behind. */
1311 per_cpu(cpufreq_cpu_data_fallback, cpu) = NULL;

--- 1245 unchanged lines hidden ---
1311 up_write(&policy->rwsem);
1312
1313 if (cpufreq_driver->exit)
1314 cpufreq_driver->exit(policy);
1315err_set_policy_cpu:
1316 if (recover_policy) {
1317 /* Do not leave stale fallback data behind. */
1318 per_cpu(cpufreq_cpu_data_fallback, cpu) = NULL;

--- 1245 unchanged lines hidden ---