1 /*
2  *  drivers/cpufreq/cpufreq_stats.c
3  *
4  *  Copyright (C) 2003-2004 Venkatesh Pallipadi <venkatesh.pallipadi@intel.com>.
5  *  (C) 2004 Zou Nan hai <nanhai.zou@intel.com>.
6  *
7  * This program is free software; you can redistribute it and/or modify
8  * it under the terms of the GNU General Public License version 2 as
9  * published by the Free Software Foundation.
10  */
11 
12 #include <linux/cpu.h>
13 #include <linux/cpufreq.h>
14 #include <linux/module.h>
15 #include <linux/slab.h>
16 #include <asm/cputime.h>
17 
18 static spinlock_t cpufreq_stats_lock;
19 
20 struct cpufreq_stats {
21 	unsigned int cpu;
22 	unsigned int total_trans;
23 	unsigned long long last_time;
24 	unsigned int max_state;
25 	unsigned int state_num;
26 	unsigned int last_index;
27 	u64 *time_in_state;
28 	unsigned int *freq_table;
29 #ifdef CONFIG_CPU_FREQ_STAT_DETAILS
30 	unsigned int *trans_table;
31 #endif
32 };
33 
34 static DEFINE_PER_CPU(struct cpufreq_stats *, cpufreq_stats_table);
35 
36 struct cpufreq_stats_attribute {
37 	struct attribute attr;
38 	ssize_t(*show) (struct cpufreq_stats *, char *);
39 };
40 
41 static int cpufreq_stats_update(unsigned int cpu)
42 {
43 	struct cpufreq_stats *stat;
44 	unsigned long long cur_time;
45 
46 	cur_time = get_jiffies_64();
47 	spin_lock(&cpufreq_stats_lock);
48 	stat = per_cpu(cpufreq_stats_table, cpu);
49 	if (stat->time_in_state)
50 		stat->time_in_state[stat->last_index] +=
51 			cur_time - stat->last_time;
52 	stat->last_time = cur_time;
53 	spin_unlock(&cpufreq_stats_lock);
54 	return 0;
55 }
56 
57 static ssize_t show_total_trans(struct cpufreq_policy *policy, char *buf)
58 {
59 	struct cpufreq_stats *stat = per_cpu(cpufreq_stats_table, policy->cpu);
60 	if (!stat)
61 		return 0;
62 	return sprintf(buf, "%d\n",
63 			per_cpu(cpufreq_stats_table, stat->cpu)->total_trans);
64 }
65 
66 static ssize_t show_time_in_state(struct cpufreq_policy *policy, char *buf)
67 {
68 	ssize_t len = 0;
69 	int i;
70 	struct cpufreq_stats *stat = per_cpu(cpufreq_stats_table, policy->cpu);
71 	if (!stat)
72 		return 0;
73 	cpufreq_stats_update(stat->cpu);
74 	for (i = 0; i < stat->state_num; i++) {
75 		len += sprintf(buf + len, "%u %llu\n", stat->freq_table[i],
76 			(unsigned long long)
77 			jiffies_64_to_clock_t(stat->time_in_state[i]));
78 	}
79 	return len;
80 }
81 
82 #ifdef CONFIG_CPU_FREQ_STAT_DETAILS
83 static ssize_t show_trans_table(struct cpufreq_policy *policy, char *buf)
84 {
85 	ssize_t len = 0;
86 	int i, j;
87 
88 	struct cpufreq_stats *stat = per_cpu(cpufreq_stats_table, policy->cpu);
89 	if (!stat)
90 		return 0;
91 	cpufreq_stats_update(stat->cpu);
92 	len += snprintf(buf + len, PAGE_SIZE - len, "   From  :    To\n");
93 	len += snprintf(buf + len, PAGE_SIZE - len, "         : ");
94 	for (i = 0; i < stat->state_num; i++) {
95 		if (len >= PAGE_SIZE)
96 			break;
97 		len += snprintf(buf + len, PAGE_SIZE - len, "%9u ",
98 				stat->freq_table[i]);
99 	}
100 	if (len >= PAGE_SIZE)
101 		return PAGE_SIZE;
102 
103 	len += snprintf(buf + len, PAGE_SIZE - len, "\n");
104 
105 	for (i = 0; i < stat->state_num; i++) {
106 		if (len >= PAGE_SIZE)
107 			break;
108 
109 		len += snprintf(buf + len, PAGE_SIZE - len, "%9u: ",
110 				stat->freq_table[i]);
111 
112 		for (j = 0; j < stat->state_num; j++) {
113 			if (len >= PAGE_SIZE)
114 				break;
115 			len += snprintf(buf + len, PAGE_SIZE - len, "%9u ",
116 					stat->trans_table[i*stat->max_state+j]);
117 		}
118 		if (len >= PAGE_SIZE)
119 			break;
120 		len += snprintf(buf + len, PAGE_SIZE - len, "\n");
121 	}
122 	if (len >= PAGE_SIZE)
123 		return PAGE_SIZE;
124 	return len;
125 }
126 cpufreq_freq_attr_ro(trans_table);
127 #endif
128 
129 cpufreq_freq_attr_ro(total_trans);
130 cpufreq_freq_attr_ro(time_in_state);
131 
132 static struct attribute *default_attrs[] = {
133 	&total_trans.attr,
134 	&time_in_state.attr,
135 #ifdef CONFIG_CPU_FREQ_STAT_DETAILS
136 	&trans_table.attr,
137 #endif
138 	NULL
139 };
140 static struct attribute_group stats_attr_group = {
141 	.attrs = default_attrs,
142 	.name = "stats"
143 };
144 
145 static int freq_table_get_index(struct cpufreq_stats *stat, unsigned int freq)
146 {
147 	int index;
148 	for (index = 0; index < stat->max_state; index++)
149 		if (stat->freq_table[index] == freq)
150 			return index;
151 	return -1;
152 }
153 
154 static void __cpufreq_stats_free_table(struct cpufreq_policy *policy)
155 {
156 	struct cpufreq_stats *stat = per_cpu(cpufreq_stats_table, policy->cpu);
157 
158 	if (!stat)
159 		return;
160 
161 	pr_debug("%s: Free stat table\n", __func__);
162 
163 	sysfs_remove_group(&policy->kobj, &stats_attr_group);
164 	kfree(stat->time_in_state);
165 	kfree(stat);
166 	per_cpu(cpufreq_stats_table, policy->cpu) = NULL;
167 }
168 
169 static void cpufreq_stats_free_table(unsigned int cpu)
170 {
171 	struct cpufreq_policy *policy;
172 
173 	policy = cpufreq_cpu_get(cpu);
174 	if (!policy)
175 		return;
176 
177 	if (cpufreq_frequency_get_table(policy->cpu))
178 		__cpufreq_stats_free_table(policy);
179 
180 	cpufreq_cpu_put(policy);
181 }
182 
183 static int __cpufreq_stats_create_table(struct cpufreq_policy *policy,
184 		struct cpufreq_frequency_table *table)
185 {
186 	unsigned int i, j, count = 0, ret = 0;
187 	struct cpufreq_stats *stat;
188 	struct cpufreq_policy *current_policy;
189 	unsigned int alloc_size;
190 	unsigned int cpu = policy->cpu;
191 	if (per_cpu(cpufreq_stats_table, cpu))
192 		return -EBUSY;
193 	stat = kzalloc(sizeof(*stat), GFP_KERNEL);
194 	if ((stat) == NULL)
195 		return -ENOMEM;
196 
197 	current_policy = cpufreq_cpu_get(cpu);
198 	if (current_policy == NULL) {
199 		ret = -EINVAL;
200 		goto error_get_fail;
201 	}
202 
203 	ret = sysfs_create_group(&current_policy->kobj, &stats_attr_group);
204 	if (ret)
205 		goto error_out;
206 
207 	stat->cpu = cpu;
208 	per_cpu(cpufreq_stats_table, cpu) = stat;
209 
210 	for (i = 0; table[i].frequency != CPUFREQ_TABLE_END; i++) {
211 		unsigned int freq = table[i].frequency;
212 		if (freq == CPUFREQ_ENTRY_INVALID)
213 			continue;
214 		count++;
215 	}
216 
217 	alloc_size = count * sizeof(int) + count * sizeof(u64);
218 
219 #ifdef CONFIG_CPU_FREQ_STAT_DETAILS
220 	alloc_size += count * count * sizeof(int);
221 #endif
222 	stat->max_state = count;
223 	stat->time_in_state = kzalloc(alloc_size, GFP_KERNEL);
224 	if (!stat->time_in_state) {
225 		ret = -ENOMEM;
226 		goto error_out;
227 	}
228 	stat->freq_table = (unsigned int *)(stat->time_in_state + count);
229 
230 #ifdef CONFIG_CPU_FREQ_STAT_DETAILS
231 	stat->trans_table = stat->freq_table + count;
232 #endif
233 	j = 0;
234 	for (i = 0; table[i].frequency != CPUFREQ_TABLE_END; i++) {
235 		unsigned int freq = table[i].frequency;
236 		if (freq == CPUFREQ_ENTRY_INVALID)
237 			continue;
238 		if (freq_table_get_index(stat, freq) == -1)
239 			stat->freq_table[j++] = freq;
240 	}
241 	stat->state_num = j;
242 	spin_lock(&cpufreq_stats_lock);
243 	stat->last_time = get_jiffies_64();
244 	stat->last_index = freq_table_get_index(stat, policy->cur);
245 	spin_unlock(&cpufreq_stats_lock);
246 	cpufreq_cpu_put(current_policy);
247 	return 0;
248 error_out:
249 	cpufreq_cpu_put(current_policy);
250 error_get_fail:
251 	kfree(stat);
252 	per_cpu(cpufreq_stats_table, cpu) = NULL;
253 	return ret;
254 }
255 
256 static void cpufreq_stats_create_table(unsigned int cpu)
257 {
258 	struct cpufreq_policy *policy;
259 	struct cpufreq_frequency_table *table;
260 
261 	/*
262 	 * "likely(!policy)" because normally cpufreq_stats will be registered
263 	 * before cpufreq driver
264 	 */
265 	policy = cpufreq_cpu_get(cpu);
266 	if (likely(!policy))
267 		return;
268 
269 	table = cpufreq_frequency_get_table(policy->cpu);
270 	if (likely(table))
271 		__cpufreq_stats_create_table(policy, table);
272 
273 	cpufreq_cpu_put(policy);
274 }
275 
276 static void cpufreq_stats_update_policy_cpu(struct cpufreq_policy *policy)
277 {
278 	struct cpufreq_stats *stat = per_cpu(cpufreq_stats_table,
279 			policy->last_cpu);
280 
281 	pr_debug("Updating stats_table for new_cpu %u from last_cpu %u\n",
282 			policy->cpu, policy->last_cpu);
283 	per_cpu(cpufreq_stats_table, policy->cpu) = per_cpu(cpufreq_stats_table,
284 			policy->last_cpu);
285 	per_cpu(cpufreq_stats_table, policy->last_cpu) = NULL;
286 	stat->cpu = policy->cpu;
287 }
288 
289 static int cpufreq_stat_notifier_policy(struct notifier_block *nb,
290 		unsigned long val, void *data)
291 {
292 	int ret = 0;
293 	struct cpufreq_policy *policy = data;
294 	struct cpufreq_frequency_table *table;
295 	unsigned int cpu = policy->cpu;
296 
297 	if (val == CPUFREQ_UPDATE_POLICY_CPU) {
298 		cpufreq_stats_update_policy_cpu(policy);
299 		return 0;
300 	}
301 
302 	table = cpufreq_frequency_get_table(cpu);
303 	if (!table)
304 		return 0;
305 
306 	if (val == CPUFREQ_CREATE_POLICY)
307 		ret = __cpufreq_stats_create_table(policy, table);
308 	else if (val == CPUFREQ_REMOVE_POLICY)
309 		__cpufreq_stats_free_table(policy);
310 
311 	return ret;
312 }
313 
314 static int cpufreq_stat_notifier_trans(struct notifier_block *nb,
315 		unsigned long val, void *data)
316 {
317 	struct cpufreq_freqs *freq = data;
318 	struct cpufreq_stats *stat;
319 	int old_index, new_index;
320 
321 	if (val != CPUFREQ_POSTCHANGE)
322 		return 0;
323 
324 	stat = per_cpu(cpufreq_stats_table, freq->cpu);
325 	if (!stat)
326 		return 0;
327 
328 	old_index = stat->last_index;
329 	new_index = freq_table_get_index(stat, freq->new);
330 
331 	/* We can't do stat->time_in_state[-1]= .. */
332 	if (old_index == -1 || new_index == -1)
333 		return 0;
334 
335 	cpufreq_stats_update(freq->cpu);
336 
337 	if (old_index == new_index)
338 		return 0;
339 
340 	spin_lock(&cpufreq_stats_lock);
341 	stat->last_index = new_index;
342 #ifdef CONFIG_CPU_FREQ_STAT_DETAILS
343 	stat->trans_table[old_index * stat->max_state + new_index]++;
344 #endif
345 	stat->total_trans++;
346 	spin_unlock(&cpufreq_stats_lock);
347 	return 0;
348 }
349 
350 static struct notifier_block notifier_policy_block = {
351 	.notifier_call = cpufreq_stat_notifier_policy
352 };
353 
354 static struct notifier_block notifier_trans_block = {
355 	.notifier_call = cpufreq_stat_notifier_trans
356 };
357 
358 static int __init cpufreq_stats_init(void)
359 {
360 	int ret;
361 	unsigned int cpu;
362 
363 	spin_lock_init(&cpufreq_stats_lock);
364 	ret = cpufreq_register_notifier(&notifier_policy_block,
365 				CPUFREQ_POLICY_NOTIFIER);
366 	if (ret)
367 		return ret;
368 
369 	for_each_online_cpu(cpu)
370 		cpufreq_stats_create_table(cpu);
371 
372 	ret = cpufreq_register_notifier(&notifier_trans_block,
373 				CPUFREQ_TRANSITION_NOTIFIER);
374 	if (ret) {
375 		cpufreq_unregister_notifier(&notifier_policy_block,
376 				CPUFREQ_POLICY_NOTIFIER);
377 		for_each_online_cpu(cpu)
378 			cpufreq_stats_free_table(cpu);
379 		return ret;
380 	}
381 
382 	return 0;
383 }
384 static void __exit cpufreq_stats_exit(void)
385 {
386 	unsigned int cpu;
387 
388 	cpufreq_unregister_notifier(&notifier_policy_block,
389 			CPUFREQ_POLICY_NOTIFIER);
390 	cpufreq_unregister_notifier(&notifier_trans_block,
391 			CPUFREQ_TRANSITION_NOTIFIER);
392 	for_each_online_cpu(cpu)
393 		cpufreq_stats_free_table(cpu);
394 }
395 
396 MODULE_AUTHOR("Zou Nan hai <nanhai.zou@intel.com>");
397 MODULE_DESCRIPTION("'cpufreq_stats' - A driver to export cpufreq stats "
398 				"through sysfs filesystem");
399 MODULE_LICENSE("GPL");
400 
401 module_init(cpufreq_stats_init);
402 module_exit(cpufreq_stats_exit);
403