xref: /openbmc/linux/tools/power/x86/turbostat/turbostat.c (revision 650a37f32d2bc16fa802075be579802bc4ec4132)
1 /*
2  * turbostat -- show CPU frequency and C-state residency
3  * on modern Intel turbo-capable processors.
4  *
5  * Copyright (c) 2012 Intel Corporation.
6  * Len Brown <len.brown@intel.com>
7  *
8  * This program is free software; you can redistribute it and/or modify it
9  * under the terms and conditions of the GNU General Public License,
10  * version 2, as published by the Free Software Foundation.
11  *
12  * This program is distributed in the hope it will be useful, but WITHOUT
13  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
14  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
15  * more details.
16  *
17  * You should have received a copy of the GNU General Public License along with
18  * this program; if not, write to the Free Software Foundation, Inc.,
19  * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
20  */
21 
22 #define _GNU_SOURCE
23 #include <stdio.h>
24 #include <unistd.h>
25 #include <sys/types.h>
26 #include <sys/wait.h>
27 #include <sys/stat.h>
28 #include <sys/resource.h>
29 #include <fcntl.h>
30 #include <signal.h>
31 #include <sys/time.h>
32 #include <stdlib.h>
33 #include <dirent.h>
34 #include <string.h>
35 #include <ctype.h>
36 #include <sched.h>
37 
38 #define MSR_TSC	0x10
39 #define MSR_NEHALEM_PLATFORM_INFO	0xCE
40 #define MSR_NEHALEM_TURBO_RATIO_LIMIT	0x1AD
41 #define MSR_APERF	0xE8
42 #define MSR_MPERF	0xE7
43 #define MSR_PKG_C2_RESIDENCY	0x60D	/* SNB only */
44 #define MSR_PKG_C3_RESIDENCY	0x3F8
45 #define MSR_PKG_C6_RESIDENCY	0x3F9
46 #define MSR_PKG_C7_RESIDENCY	0x3FA	/* SNB only */
47 #define MSR_CORE_C3_RESIDENCY	0x3FC
48 #define MSR_CORE_C6_RESIDENCY	0x3FD
49 #define MSR_CORE_C7_RESIDENCY	0x3FE	/* SNB only */
50 
51 char *proc_stat = "/proc/stat";
52 unsigned int interval_sec = 5;	/* set with -i interval_sec */
53 unsigned int verbose;		/* set with -v */
54 unsigned int summary_only;	/* set with -s */
55 unsigned int skip_c0;
56 unsigned int skip_c1;
57 unsigned int do_nhm_cstates;
58 unsigned int do_snb_cstates;
59 unsigned int has_aperf;
60 unsigned int units = 1000000000;	/* Ghz etc */
61 unsigned int genuine_intel;
62 unsigned int has_invariant_tsc;
63 unsigned int do_nehalem_platform_info;
64 unsigned int do_nehalem_turbo_ratio_limit;
65 unsigned int extra_msr_offset;
66 double bclk;
67 unsigned int show_pkg;
68 unsigned int show_core;
69 unsigned int show_cpu;
70 
71 int aperf_mperf_unstable;
72 int backwards_count;
73 char *progname;
74 
75 int num_cpus;
76 cpu_set_t *cpu_present_set, *cpu_mask;
77 size_t cpu_present_setsize, cpu_mask_size;
78 
79 struct counters {
80 	unsigned long long tsc;		/* per thread */
81 	unsigned long long aperf;	/* per thread */
82 	unsigned long long mperf;	/* per thread */
83 	unsigned long long c1;	/* per thread (calculated) */
84 	unsigned long long c3;	/* per core */
85 	unsigned long long c6;	/* per core */
86 	unsigned long long c7;	/* per core */
87 	unsigned long long pc2;	/* per package */
88 	unsigned long long pc3;	/* per package */
89 	unsigned long long pc6;	/* per package */
90 	unsigned long long pc7;	/* per package */
91 	unsigned long long extra_msr;	/* per thread */
92 	int pkg;
93 	int core;
94 	int cpu;
95 	struct counters *next;
96 };
97 
98 struct counters *cnt_even;
99 struct counters *cnt_odd;
100 struct counters *cnt_delta;
101 struct counters *cnt_average;
102 struct timeval tv_even;
103 struct timeval tv_odd;
104 struct timeval tv_delta;
105 
106 int mark_cpu_present(int pkg, int core, int cpu)
107 {
108 	CPU_SET_S(cpu, cpu_present_setsize, cpu_present_set);
109 	return 0;
110 }
111 
112 /*
113  * cpu_mask_init(ncpus)
114  *
115  * allocate and clear cpu_mask
116  * set cpu_mask_size
117  */
118 void cpu_mask_init(int ncpus)
119 {
120 	cpu_mask = CPU_ALLOC(ncpus);
121 	if (cpu_mask == NULL) {
122 		perror("CPU_ALLOC");
123 		exit(3);
124 	}
125 	cpu_mask_size = CPU_ALLOC_SIZE(ncpus);
126 	CPU_ZERO_S(cpu_mask_size, cpu_mask);
127 
128 	/*
129 	 * Allocate and initialize cpu_present_set
130 	 */
131 	cpu_present_set = CPU_ALLOC(ncpus);
132 	if (cpu_present_set == NULL) {
133 		perror("CPU_ALLOC");
134 		exit(3);
135 	}
136 	cpu_present_setsize = CPU_ALLOC_SIZE(ncpus);
137 	CPU_ZERO_S(cpu_present_setsize, cpu_present_set);
138 	for_all_cpus(mark_cpu_present);
139 }
140 
141 void cpu_mask_uninit()
142 {
143 	CPU_FREE(cpu_mask);
144 	cpu_mask = NULL;
145 	cpu_mask_size = 0;
146 	CPU_FREE(cpu_present_set);
147 	cpu_present_set = NULL;
148 	cpu_present_setsize = 0;
149 }
150 
151 int cpu_migrate(int cpu)
152 {
153 	CPU_ZERO_S(cpu_mask_size, cpu_mask);
154 	CPU_SET_S(cpu, cpu_mask_size, cpu_mask);
155 	if (sched_setaffinity(0, cpu_mask_size, cpu_mask) == -1)
156 		return -1;
157 	else
158 		return 0;
159 }
160 
161 int get_msr(int cpu, off_t offset, unsigned long long *msr)
162 {
163 	ssize_t retval;
164 	char pathname[32];
165 	int fd;
166 
167 	sprintf(pathname, "/dev/cpu/%d/msr", cpu);
168 	fd = open(pathname, O_RDONLY);
169 	if (fd < 0)
170 		return -1;
171 
172 	retval = pread(fd, msr, sizeof *msr, offset);
173 	close(fd);
174 
175 	if (retval != sizeof *msr)
176 		return -1;
177 
178 	return 0;
179 }
180 
181 void print_header(void)
182 {
183 	if (show_pkg)
184 		fprintf(stderr, "pk");
185 	if (show_pkg)
186 		fprintf(stderr, " ");
187 	if (show_core)
188 		fprintf(stderr, "cor");
189 	if (show_cpu)
190 		fprintf(stderr, " CPU");
191 	if (show_pkg || show_core || show_cpu)
192 		fprintf(stderr, " ");
193 	if (do_nhm_cstates)
194 		fprintf(stderr, "   %%c0");
195 	if (has_aperf)
196 		fprintf(stderr, "  GHz");
197 	fprintf(stderr, "  TSC");
198 	if (do_nhm_cstates)
199 		fprintf(stderr, "    %%c1");
200 	if (do_nhm_cstates)
201 		fprintf(stderr, "    %%c3");
202 	if (do_nhm_cstates)
203 		fprintf(stderr, "    %%c6");
204 	if (do_snb_cstates)
205 		fprintf(stderr, "    %%c7");
206 	if (do_snb_cstates)
207 		fprintf(stderr, "   %%pc2");
208 	if (do_nhm_cstates)
209 		fprintf(stderr, "   %%pc3");
210 	if (do_nhm_cstates)
211 		fprintf(stderr, "   %%pc6");
212 	if (do_snb_cstates)
213 		fprintf(stderr, "   %%pc7");
214 	if (extra_msr_offset)
215 		fprintf(stderr, "        MSR 0x%x ", extra_msr_offset);
216 
217 	putc('\n', stderr);
218 }
219 
220 void dump_cnt(struct counters *cnt)
221 {
222 	if (!cnt)
223 		return;
224 	if (cnt->pkg) fprintf(stderr, "package: %d ", cnt->pkg);
225 	if (cnt->core) fprintf(stderr, "core:: %d ", cnt->core);
226 	if (cnt->cpu) fprintf(stderr, "CPU: %d ", cnt->cpu);
227 	if (cnt->tsc) fprintf(stderr, "TSC: %016llX\n", cnt->tsc);
228 	if (cnt->c3) fprintf(stderr, "c3: %016llX\n", cnt->c3);
229 	if (cnt->c6) fprintf(stderr, "c6: %016llX\n", cnt->c6);
230 	if (cnt->c7) fprintf(stderr, "c7: %016llX\n", cnt->c7);
231 	if (cnt->aperf) fprintf(stderr, "aperf: %016llX\n", cnt->aperf);
232 	if (cnt->pc2) fprintf(stderr, "pc2: %016llX\n", cnt->pc2);
233 	if (cnt->pc3) fprintf(stderr, "pc3: %016llX\n", cnt->pc3);
234 	if (cnt->pc6) fprintf(stderr, "pc6: %016llX\n", cnt->pc6);
235 	if (cnt->pc7) fprintf(stderr, "pc7: %016llX\n", cnt->pc7);
236 	if (cnt->extra_msr) fprintf(stderr, "msr0x%x: %016llX\n", extra_msr_offset, cnt->extra_msr);
237 }
238 
239 void dump_list(struct counters *cnt)
240 {
241 	printf("dump_list 0x%p\n", cnt);
242 
243 	for (; cnt; cnt = cnt->next)
244 		dump_cnt(cnt);
245 }
246 
247 /*
248  * column formatting convention & formats
249  * package: "pk" 2 columns %2d
250  * core: "cor" 3 columns %3d
251  * CPU: "CPU" 3 columns %3d
252  * GHz: "GHz" 3 columns %3.2
253  * TSC: "TSC" 3 columns %3.2
254  * percentage " %pc3" %6.2
255  */
256 void print_cnt(struct counters *p)
257 {
258 	double interval_float;
259 
260 	interval_float = tv_delta.tv_sec + tv_delta.tv_usec/1000000.0;
261 
262 	/* topology columns, print blanks on 1st (average) line */
263 	if (p == cnt_average) {
264 		if (show_pkg)
265 			fprintf(stderr, "  ");
266 		if (show_pkg && show_core)
267 			fprintf(stderr, " ");
268 		if (show_core)
269 			fprintf(stderr, "   ");
270 		if (show_cpu)
271 			fprintf(stderr, " " "   ");
272 	} else {
273 		if (show_pkg)
274 			fprintf(stderr, "%2d", p->pkg);
275 		if (show_pkg && show_core)
276 			fprintf(stderr, " ");
277 		if (show_core)
278 			fprintf(stderr, "%3d", p->core);
279 		if (show_cpu)
280 			fprintf(stderr, " %3d", p->cpu);
281 	}
282 
283 	/* %c0 */
284 	if (do_nhm_cstates) {
285 		if (show_pkg || show_core || show_cpu)
286 			fprintf(stderr, " ");
287 		if (!skip_c0)
288 			fprintf(stderr, "%6.2f", 100.0 * p->mperf/p->tsc);
289 		else
290 			fprintf(stderr, "  ****");
291 	}
292 
293 	/* GHz */
294 	if (has_aperf) {
295 		if (!aperf_mperf_unstable) {
296 			fprintf(stderr, " %3.2f",
297 				1.0 * p->tsc / units * p->aperf /
298 				p->mperf / interval_float);
299 		} else {
300 			if (p->aperf > p->tsc || p->mperf > p->tsc) {
301 				fprintf(stderr, " ***");
302 			} else {
303 				fprintf(stderr, "%3.1f*",
304 					1.0 * p->tsc /
305 					units * p->aperf /
306 					p->mperf / interval_float);
307 			}
308 		}
309 	}
310 
311 	/* TSC */
312 	fprintf(stderr, "%5.2f", 1.0 * p->tsc/units/interval_float);
313 
314 	if (do_nhm_cstates) {
315 		if (!skip_c1)
316 			fprintf(stderr, " %6.2f", 100.0 * p->c1/p->tsc);
317 		else
318 			fprintf(stderr, "  ****");
319 	}
320 	if (do_nhm_cstates)
321 		fprintf(stderr, " %6.2f", 100.0 * p->c3/p->tsc);
322 	if (do_nhm_cstates)
323 		fprintf(stderr, " %6.2f", 100.0 * p->c6/p->tsc);
324 	if (do_snb_cstates)
325 		fprintf(stderr, " %6.2f", 100.0 * p->c7/p->tsc);
326 	if (do_snb_cstates)
327 		fprintf(stderr, " %6.2f", 100.0 * p->pc2/p->tsc);
328 	if (do_nhm_cstates)
329 		fprintf(stderr, " %6.2f", 100.0 * p->pc3/p->tsc);
330 	if (do_nhm_cstates)
331 		fprintf(stderr, " %6.2f", 100.0 * p->pc6/p->tsc);
332 	if (do_snb_cstates)
333 		fprintf(stderr, " %6.2f", 100.0 * p->pc7/p->tsc);
334 	if (extra_msr_offset)
335 		fprintf(stderr, "  0x%016llx", p->extra_msr);
336 	putc('\n', stderr);
337 }
338 
339 void print_counters(struct counters *counters)
340 {
341 	struct counters *cnt;
342 	static int printed;
343 
344 
345 	if (!printed || !summary_only)
346 		print_header();
347 
348 	if (num_cpus > 1)
349 		print_cnt(cnt_average);
350 
351 	printed = 1;
352 
353 	if (summary_only)
354 		return;
355 
356 	for (cnt = counters; cnt != NULL; cnt = cnt->next)
357 		print_cnt(cnt);
358 
359 }
360 
361 #define SUBTRACT_COUNTER(after, before, delta) (delta = (after - before), (before > after))
362 
363 int compute_delta(struct counters *after,
364 	struct counters *before, struct counters *delta)
365 {
366 	int errors = 0;
367 	int perf_err = 0;
368 
369 	skip_c0 = skip_c1 = 0;
370 
371 	for ( ; after && before && delta;
372 		after = after->next, before = before->next, delta = delta->next) {
373 		if (before->cpu != after->cpu) {
374 			printf("cpu configuration changed: %d != %d\n",
375 				before->cpu, after->cpu);
376 			return -1;
377 		}
378 
379 		if (SUBTRACT_COUNTER(after->tsc, before->tsc, delta->tsc)) {
380 			fprintf(stderr, "cpu%d TSC went backwards %llX to %llX\n",
381 				before->cpu, before->tsc, after->tsc);
382 			errors++;
383 		}
384 		/* check for TSC < 1 Mcycles over interval */
385 		if (delta->tsc < (1000 * 1000)) {
386 			fprintf(stderr, "Insanely slow TSC rate,"
387 				" TSC stops in idle?\n");
388 			fprintf(stderr, "You can disable all c-states"
389 				" by booting with \"idle=poll\"\n");
390 			fprintf(stderr, "or just the deep ones with"
391 				" \"processor.max_cstate=1\"\n");
392 			exit(-3);
393 		}
394 		if (SUBTRACT_COUNTER(after->c3, before->c3, delta->c3)) {
395 			fprintf(stderr, "cpu%d c3 counter went backwards %llX to %llX\n",
396 				before->cpu, before->c3, after->c3);
397 			errors++;
398 		}
399 		if (SUBTRACT_COUNTER(after->c6, before->c6, delta->c6)) {
400 			fprintf(stderr, "cpu%d c6 counter went backwards %llX to %llX\n",
401 				before->cpu, before->c6, after->c6);
402 			errors++;
403 		}
404 		if (SUBTRACT_COUNTER(after->c7, before->c7, delta->c7)) {
405 			fprintf(stderr, "cpu%d c7 counter went backwards %llX to %llX\n",
406 				before->cpu, before->c7, after->c7);
407 			errors++;
408 		}
409 		if (SUBTRACT_COUNTER(after->pc2, before->pc2, delta->pc2)) {
410 			fprintf(stderr, "cpu%d pc2 counter went backwards %llX to %llX\n",
411 				before->cpu, before->pc2, after->pc2);
412 			errors++;
413 		}
414 		if (SUBTRACT_COUNTER(after->pc3, before->pc3, delta->pc3)) {
415 			fprintf(stderr, "cpu%d pc3 counter went backwards %llX to %llX\n",
416 				before->cpu, before->pc3, after->pc3);
417 			errors++;
418 		}
419 		if (SUBTRACT_COUNTER(after->pc6, before->pc6, delta->pc6)) {
420 			fprintf(stderr, "cpu%d pc6 counter went backwards %llX to %llX\n",
421 				before->cpu, before->pc6, after->pc6);
422 			errors++;
423 		}
424 		if (SUBTRACT_COUNTER(after->pc7, before->pc7, delta->pc7)) {
425 			fprintf(stderr, "cpu%d pc7 counter went backwards %llX to %llX\n",
426 				before->cpu, before->pc7, after->pc7);
427 			errors++;
428 		}
429 
430 		perf_err = SUBTRACT_COUNTER(after->aperf, before->aperf, delta->aperf);
431 		if (perf_err) {
432 			fprintf(stderr, "cpu%d aperf counter went backwards %llX to %llX\n",
433 				before->cpu, before->aperf, after->aperf);
434 		}
435 		perf_err |= SUBTRACT_COUNTER(after->mperf, before->mperf, delta->mperf);
436 		if (perf_err) {
437 			fprintf(stderr, "cpu%d mperf counter went backwards %llX to %llX\n",
438 				before->cpu, before->mperf, after->mperf);
439 		}
440 		if (perf_err) {
441 			if (!aperf_mperf_unstable) {
442 				fprintf(stderr, "%s: APERF or MPERF went backwards *\n", progname);
443 				fprintf(stderr, "* Frequency results do not cover entire interval *\n");
444 				fprintf(stderr, "* fix this by running Linux-2.6.30 or later *\n");
445 
446 				aperf_mperf_unstable = 1;
447 			}
448 			/*
449 			 * mperf delta is likely a huge "positive" number
450 			 * can not use it for calculating c0 time
451 			 */
452 			skip_c0 = 1;
453 			skip_c1 = 1;
454 		}
455 
456 		/*
457 		 * As mperf and tsc collection are not atomic,
458 		 * it is possible for mperf's non-halted cycles
459 		 * to exceed TSC's all cycles: show c1 = 0% in that case.
460 		 */
461 		if (delta->mperf > delta->tsc)
462 			delta->c1 = 0;
463 		else /* normal case, derive c1 */
464 			delta->c1 = delta->tsc - delta->mperf
465 				- delta->c3 - delta->c6 - delta->c7;
466 
467 		if (delta->mperf == 0)
468 			delta->mperf = 1;	/* divide by 0 protection */
469 
470 		/*
471 		 * for "extra msr", just copy the latest w/o subtracting
472 		 */
473 		delta->extra_msr = after->extra_msr;
474 		if (errors) {
475 			fprintf(stderr, "ERROR cpu%d before:\n", before->cpu);
476 			dump_cnt(before);
477 			fprintf(stderr, "ERROR cpu%d after:\n", before->cpu);
478 			dump_cnt(after);
479 			errors = 0;
480 		}
481 	}
482 	return 0;
483 }
484 
485 void compute_average(struct counters *delta, struct counters *avg)
486 {
487 	struct counters *sum;
488 
489 	sum = calloc(1, sizeof(struct counters));
490 	if (sum == NULL) {
491 		perror("calloc sum");
492 		exit(1);
493 	}
494 
495 	for (; delta; delta = delta->next) {
496 		sum->tsc += delta->tsc;
497 		sum->c1 += delta->c1;
498 		sum->c3 += delta->c3;
499 		sum->c6 += delta->c6;
500 		sum->c7 += delta->c7;
501 		sum->aperf += delta->aperf;
502 		sum->mperf += delta->mperf;
503 		sum->pc2 += delta->pc2;
504 		sum->pc3 += delta->pc3;
505 		sum->pc6 += delta->pc6;
506 		sum->pc7 += delta->pc7;
507 	}
508 	avg->tsc = sum->tsc/num_cpus;
509 	avg->c1 = sum->c1/num_cpus;
510 	avg->c3 = sum->c3/num_cpus;
511 	avg->c6 = sum->c6/num_cpus;
512 	avg->c7 = sum->c7/num_cpus;
513 	avg->aperf = sum->aperf/num_cpus;
514 	avg->mperf = sum->mperf/num_cpus;
515 	avg->pc2 = sum->pc2/num_cpus;
516 	avg->pc3 = sum->pc3/num_cpus;
517 	avg->pc6 = sum->pc6/num_cpus;
518 	avg->pc7 = sum->pc7/num_cpus;
519 
520 	free(sum);
521 }
522 
523 int get_counters(struct counters *cnt)
524 {
525 	for ( ; cnt; cnt = cnt->next) {
526 
527 		if (cpu_migrate(cnt->cpu))
528 			return -1;
529 
530 		if (get_msr(cnt->cpu, MSR_TSC, &cnt->tsc))
531 			return -1;
532 
533 		if (has_aperf) {
534 			if (get_msr(cnt->cpu, MSR_APERF, &cnt->aperf))
535 				return -1;
536 			if (get_msr(cnt->cpu, MSR_MPERF, &cnt->mperf))
537 				return -1;
538 		}
539 
540 		if (do_nhm_cstates) {
541 			if (get_msr(cnt->cpu, MSR_CORE_C3_RESIDENCY, &cnt->c3))
542 				return -1;
543 			if (get_msr(cnt->cpu, MSR_CORE_C6_RESIDENCY, &cnt->c6))
544 				return -1;
545 		}
546 
547 		if (do_snb_cstates)
548 			if (get_msr(cnt->cpu, MSR_CORE_C7_RESIDENCY, &cnt->c7))
549 				return -1;
550 
551 		if (do_nhm_cstates) {
552 			if (get_msr(cnt->cpu, MSR_PKG_C3_RESIDENCY, &cnt->pc3))
553 				return -1;
554 			if (get_msr(cnt->cpu, MSR_PKG_C6_RESIDENCY, &cnt->pc6))
555 				return -1;
556 		}
557 		if (do_snb_cstates) {
558 			if (get_msr(cnt->cpu, MSR_PKG_C2_RESIDENCY, &cnt->pc2))
559 				return -1;
560 			if (get_msr(cnt->cpu, MSR_PKG_C7_RESIDENCY, &cnt->pc7))
561 				return -1;
562 		}
563 		if (extra_msr_offset)
564 			if (get_msr(cnt->cpu, extra_msr_offset, &cnt->extra_msr))
565 				return -1;
566 	}
567 	return 0;
568 }
569 
570 void print_nehalem_info(void)
571 {
572 	unsigned long long msr;
573 	unsigned int ratio;
574 
575 	if (!do_nehalem_platform_info)
576 		return;
577 
578 	get_msr(0, MSR_NEHALEM_PLATFORM_INFO, &msr);
579 
580 	ratio = (msr >> 40) & 0xFF;
581 	fprintf(stderr, "%d * %.0f = %.0f MHz max efficiency\n",
582 		ratio, bclk, ratio * bclk);
583 
584 	ratio = (msr >> 8) & 0xFF;
585 	fprintf(stderr, "%d * %.0f = %.0f MHz TSC frequency\n",
586 		ratio, bclk, ratio * bclk);
587 
588 	if (verbose > 1)
589 		fprintf(stderr, "MSR_NEHALEM_PLATFORM_INFO: 0x%llx\n", msr);
590 
591 	if (!do_nehalem_turbo_ratio_limit)
592 		return;
593 
594 	get_msr(0, MSR_NEHALEM_TURBO_RATIO_LIMIT, &msr);
595 
596 	ratio = (msr >> 24) & 0xFF;
597 	if (ratio)
598 		fprintf(stderr, "%d * %.0f = %.0f MHz max turbo 4 active cores\n",
599 			ratio, bclk, ratio * bclk);
600 
601 	ratio = (msr >> 16) & 0xFF;
602 	if (ratio)
603 		fprintf(stderr, "%d * %.0f = %.0f MHz max turbo 3 active cores\n",
604 			ratio, bclk, ratio * bclk);
605 
606 	ratio = (msr >> 8) & 0xFF;
607 	if (ratio)
608 		fprintf(stderr, "%d * %.0f = %.0f MHz max turbo 2 active cores\n",
609 			ratio, bclk, ratio * bclk);
610 
611 	ratio = (msr >> 0) & 0xFF;
612 	if (ratio)
613 		fprintf(stderr, "%d * %.0f = %.0f MHz max turbo 1 active cores\n",
614 			ratio, bclk, ratio * bclk);
615 
616 }
617 
618 void free_counter_list(struct counters *list)
619 {
620 	struct counters *p;
621 
622 	for (p = list; p; ) {
623 		struct counters *free_me;
624 
625 		free_me = p;
626 		p = p->next;
627 		free(free_me);
628 	}
629 }
630 
631 void free_all_counters(void)
632 {
633 	free_counter_list(cnt_even);
634 	cnt_even = NULL;
635 
636 	free_counter_list(cnt_odd);
637 	cnt_odd = NULL;
638 
639 	free_counter_list(cnt_delta);
640 	cnt_delta = NULL;
641 
642 	free_counter_list(cnt_average);
643 	cnt_average = NULL;
644 }
645 
646 void insert_counters(struct counters **list,
647 	struct counters *new)
648 {
649 	struct counters *prev;
650 
651 	/*
652 	 * list was empty
653 	 */
654 	if (*list == NULL) {
655 		new->next = *list;
656 		*list = new;
657 		return;
658 	}
659 
660 	if (!summary_only)
661 		show_cpu = 1;	/* there is more than one CPU */
662 
663 	/*
664 	 * insert on front of list.
665 	 * It is sorted by ascending package#, core#, cpu#
666 	 */
667 	if (((*list)->pkg > new->pkg) ||
668 	    (((*list)->pkg == new->pkg) && ((*list)->core > new->core)) ||
669 	    (((*list)->pkg == new->pkg) && ((*list)->core == new->core) && ((*list)->cpu > new->cpu))) {
670 		new->next = *list;
671 		*list = new;
672 		return;
673 	}
674 
675 	prev = *list;
676 
677 	while (prev->next && (prev->next->pkg < new->pkg)) {
678 		prev = prev->next;
679 		if (!summary_only)
680 			show_pkg = 1;	/* there is more than 1 package */
681 	}
682 
683 	while (prev->next && (prev->next->pkg == new->pkg)
684 		&& (prev->next->core < new->core)) {
685 		prev = prev->next;
686 		if (!summary_only)
687 			show_core = 1;	/* there is more than 1 core */
688 	}
689 
690 	while (prev->next && (prev->next->pkg == new->pkg)
691 		&& (prev->next->core == new->core)
692 		&& (prev->next->cpu < new->cpu)) {
693 		prev = prev->next;
694 	}
695 
696 	/*
697 	 * insert after "prev"
698 	 */
699 	new->next = prev->next;
700 	prev->next = new;
701 }
702 
703 void alloc_new_counters(int pkg, int core, int cpu)
704 {
705 	struct counters *new;
706 
707 	if (verbose > 1)
708 		printf("pkg%d core%d, cpu%d\n", pkg, core, cpu);
709 
710 	new = (struct counters *)calloc(1, sizeof(struct counters));
711 	if (new == NULL) {
712 		perror("calloc");
713 		exit(1);
714 	}
715 	new->pkg = pkg;
716 	new->core = core;
717 	new->cpu = cpu;
718 	insert_counters(&cnt_odd, new);
719 
720 	new = (struct counters *)calloc(1,
721 		sizeof(struct counters));
722 	if (new == NULL) {
723 		perror("calloc");
724 		exit(1);
725 	}
726 	new->pkg = pkg;
727 	new->core = core;
728 	new->cpu = cpu;
729 	insert_counters(&cnt_even, new);
730 
731 	new = (struct counters *)calloc(1, sizeof(struct counters));
732 	if (new == NULL) {
733 		perror("calloc");
734 		exit(1);
735 	}
736 	new->pkg = pkg;
737 	new->core = core;
738 	new->cpu = cpu;
739 	insert_counters(&cnt_delta, new);
740 
741 	new = (struct counters *)calloc(1, sizeof(struct counters));
742 	if (new == NULL) {
743 		perror("calloc");
744 		exit(1);
745 	}
746 	new->pkg = pkg;
747 	new->core = core;
748 	new->cpu = cpu;
749 	cnt_average = new;
750 }
751 
752 int get_physical_package_id(int cpu)
753 {
754 	char path[64];
755 	FILE *filep;
756 	int pkg;
757 
758 	sprintf(path, "/sys/devices/system/cpu/cpu%d/topology/physical_package_id", cpu);
759 	filep = fopen(path, "r");
760 	if (filep == NULL) {
761 		perror(path);
762 		exit(1);
763 	}
764 	fscanf(filep, "%d", &pkg);
765 	fclose(filep);
766 	return pkg;
767 }
768 
769 int get_core_id(int cpu)
770 {
771 	char path[64];
772 	FILE *filep;
773 	int core;
774 
775 	sprintf(path, "/sys/devices/system/cpu/cpu%d/topology/core_id", cpu);
776 	filep = fopen(path, "r");
777 	if (filep == NULL) {
778 		perror(path);
779 		exit(1);
780 	}
781 	fscanf(filep, "%d", &core);
782 	fclose(filep);
783 	return core;
784 }
785 
786 /*
787  * run func(pkg, core, cpu) on every cpu in /proc/stat
788  */
789 
790 int for_all_cpus(void (func)(int, int, int))
791 {
792 	FILE *fp;
793 	int cpu_count;
794 	int retval;
795 
796 	fp = fopen(proc_stat, "r");
797 	if (fp == NULL) {
798 		perror(proc_stat);
799 		exit(1);
800 	}
801 
802 	retval = fscanf(fp, "cpu %*d %*d %*d %*d %*d %*d %*d %*d %*d %*d\n");
803 	if (retval != 0) {
804 		perror("/proc/stat format");
805 		exit(1);
806 	}
807 
808 	for (cpu_count = 0; ; cpu_count++) {
809 		int cpu;
810 
811 		retval = fscanf(fp, "cpu%u %*d %*d %*d %*d %*d %*d %*d %*d %*d %*d\n", &cpu);
812 		if (retval != 1)
813 			break;
814 
815 		func(get_physical_package_id(cpu), get_core_id(cpu), cpu);
816 	}
817 	fclose(fp);
818 	return cpu_count;
819 }
820 
821 void re_initialize(void)
822 {
823 	free_all_counters();
824 	num_cpus = for_all_cpus(alloc_new_counters);
825 	cpu_mask_uninit();
826 	cpu_mask_init(num_cpus);
827 	printf("turbostat: re-initialized with num_cpus %d\n", num_cpus);
828 }
829 
830 void dummy(int pkg, int core, int cpu) { return; }
831 /*
832  * check to see if a cpu came on-line
833  */
834 int verify_num_cpus(void)
835 {
836 	int new_num_cpus;
837 
838 	new_num_cpus = for_all_cpus(dummy);
839 
840 	if (new_num_cpus != num_cpus) {
841 		if (verbose)
842 			printf("num_cpus was %d, is now  %d\n",
843 				num_cpus, new_num_cpus);
844 		return -1;
845 	}
846 	return 0;
847 }
848 
849 void turbostat_loop()
850 {
851 restart:
852 	get_counters(cnt_even);
853 	gettimeofday(&tv_even, (struct timezone *)NULL);
854 
855 	while (1) {
856 		if (verify_num_cpus()) {
857 			re_initialize();
858 			goto restart;
859 		}
860 		sleep(interval_sec);
861 		if (get_counters(cnt_odd)) {
862 			re_initialize();
863 			goto restart;
864 		}
865 		gettimeofday(&tv_odd, (struct timezone *)NULL);
866 		compute_delta(cnt_odd, cnt_even, cnt_delta);
867 		timersub(&tv_odd, &tv_even, &tv_delta);
868 		compute_average(cnt_delta, cnt_average);
869 		print_counters(cnt_delta);
870 		sleep(interval_sec);
871 		if (get_counters(cnt_even)) {
872 			re_initialize();
873 			goto restart;
874 		}
875 		gettimeofday(&tv_even, (struct timezone *)NULL);
876 		compute_delta(cnt_even, cnt_odd, cnt_delta);
877 		timersub(&tv_even, &tv_odd, &tv_delta);
878 		compute_average(cnt_delta, cnt_average);
879 		print_counters(cnt_delta);
880 	}
881 }
882 
883 void check_dev_msr()
884 {
885 	struct stat sb;
886 
887 	if (stat("/dev/cpu/0/msr", &sb)) {
888 		fprintf(stderr, "no /dev/cpu/0/msr\n");
889 		fprintf(stderr, "Try \"# modprobe msr\"\n");
890 		exit(-5);
891 	}
892 }
893 
894 void check_super_user()
895 {
896 	if (getuid() != 0) {
897 		fprintf(stderr, "must be root\n");
898 		exit(-6);
899 	}
900 }
901 
902 int has_nehalem_turbo_ratio_limit(unsigned int family, unsigned int model)
903 {
904 	if (!genuine_intel)
905 		return 0;
906 
907 	if (family != 6)
908 		return 0;
909 
910 	switch (model) {
911 	case 0x1A:	/* Core i7, Xeon 5500 series - Bloomfield, Gainstown NHM-EP */
912 	case 0x1E:	/* Core i7 and i5 Processor - Clarksfield, Lynnfield, Jasper Forest */
913 	case 0x1F:	/* Core i7 and i5 Processor - Nehalem */
914 	case 0x25:	/* Westmere Client - Clarkdale, Arrandale */
915 	case 0x2C:	/* Westmere EP - Gulftown */
916 	case 0x2A:	/* SNB */
917 	case 0x2D:	/* SNB Xeon */
918 	case 0x3A:	/* IVB */
919 	case 0x3D:	/* IVB Xeon */
920 		return 1;
921 	case 0x2E:	/* Nehalem-EX Xeon - Beckton */
922 	case 0x2F:	/* Westmere-EX Xeon - Eagleton */
923 	default:
924 		return 0;
925 	}
926 }
927 
928 int is_snb(unsigned int family, unsigned int model)
929 {
930 	if (!genuine_intel)
931 		return 0;
932 
933 	switch (model) {
934 	case 0x2A:
935 	case 0x2D:
936 	case 0x3A:	/* IVB */
937 	case 0x3D:	/* IVB Xeon */
938 		return 1;
939 	}
940 	return 0;
941 }
942 
943 double discover_bclk(unsigned int family, unsigned int model)
944 {
945 	if (is_snb(family, model))
946 		return 100.00;
947 	else
948 		return 133.33;
949 }
950 
951 void check_cpuid()
952 {
953 	unsigned int eax, ebx, ecx, edx, max_level;
954 	unsigned int fms, family, model, stepping;
955 
956 	eax = ebx = ecx = edx = 0;
957 
958 	asm("cpuid" : "=a" (max_level), "=b" (ebx), "=c" (ecx), "=d" (edx) : "a" (0));
959 
960 	if (ebx == 0x756e6547 && edx == 0x49656e69 && ecx == 0x6c65746e)
961 		genuine_intel = 1;
962 
963 	if (verbose)
964 		fprintf(stderr, "%.4s%.4s%.4s ",
965 			(char *)&ebx, (char *)&edx, (char *)&ecx);
966 
967 	asm("cpuid" : "=a" (fms), "=c" (ecx), "=d" (edx) : "a" (1) : "ebx");
968 	family = (fms >> 8) & 0xf;
969 	model = (fms >> 4) & 0xf;
970 	stepping = fms & 0xf;
971 	if (family == 6 || family == 0xf)
972 		model += ((fms >> 16) & 0xf) << 4;
973 
974 	if (verbose)
975 		fprintf(stderr, "%d CPUID levels; family:model:stepping 0x%x:%x:%x (%d:%d:%d)\n",
976 			max_level, family, model, stepping, family, model, stepping);
977 
978 	if (!(edx & (1 << 5))) {
979 		fprintf(stderr, "CPUID: no MSR\n");
980 		exit(1);
981 	}
982 
983 	/*
984 	 * check max extended function levels of CPUID.
985 	 * This is needed to check for invariant TSC.
986 	 * This check is valid for both Intel and AMD.
987 	 */
988 	ebx = ecx = edx = 0;
989 	asm("cpuid" : "=a" (max_level), "=b" (ebx), "=c" (ecx), "=d" (edx) : "a" (0x80000000));
990 
991 	if (max_level < 0x80000007) {
992 		fprintf(stderr, "CPUID: no invariant TSC (max_level 0x%x)\n", max_level);
993 		exit(1);
994 	}
995 
996 	/*
997 	 * Non-Stop TSC is advertised by CPUID.EAX=0x80000007: EDX.bit8
998 	 * this check is valid for both Intel and AMD
999 	 */
1000 	asm("cpuid" : "=a" (eax), "=b" (ebx), "=c" (ecx), "=d" (edx) : "a" (0x80000007));
1001 	has_invariant_tsc = edx & (1 << 8);
1002 
1003 	if (!has_invariant_tsc) {
1004 		fprintf(stderr, "No invariant TSC\n");
1005 		exit(1);
1006 	}
1007 
1008 	/*
1009 	 * APERF/MPERF is advertised by CPUID.EAX=0x6: ECX.bit0
1010 	 * this check is valid for both Intel and AMD
1011 	 */
1012 
1013 	asm("cpuid" : "=a" (eax), "=b" (ebx), "=c" (ecx), "=d" (edx) : "a" (0x6));
1014 	has_aperf = ecx & (1 << 0);
1015 	if (!has_aperf) {
1016 		fprintf(stderr, "No APERF MSR\n");
1017 		exit(1);
1018 	}
1019 
1020 	do_nehalem_platform_info = genuine_intel && has_invariant_tsc;
1021 	do_nhm_cstates = genuine_intel;	/* all Intel w/ non-stop TSC have NHM counters */
1022 	do_snb_cstates = is_snb(family, model);
1023 	bclk = discover_bclk(family, model);
1024 
1025 	do_nehalem_turbo_ratio_limit = has_nehalem_turbo_ratio_limit(family, model);
1026 }
1027 
1028 
1029 void usage()
1030 {
1031 	fprintf(stderr, "%s: [-v] [-M MSR#] [-i interval_sec | command ...]\n",
1032 		progname);
1033 	exit(1);
1034 }
1035 
1036 
1037 /*
1038  * in /dev/cpu/ return success for names that are numbers
1039  * ie. filter out ".", "..", "microcode".
1040  */
1041 int dir_filter(const struct dirent *dirp)
1042 {
1043 	if (isdigit(dirp->d_name[0]))
1044 		return 1;
1045 	else
1046 		return 0;
1047 }
1048 
1049 int open_dev_cpu_msr(int dummy1)
1050 {
1051 	return 0;
1052 }
1053 
1054 void turbostat_init()
1055 {
1056 	check_cpuid();
1057 
1058 	check_dev_msr();
1059 	check_super_user();
1060 
1061 	num_cpus = for_all_cpus(alloc_new_counters);
1062 	cpu_mask_init(num_cpus);
1063 
1064 	if (verbose)
1065 		print_nehalem_info();
1066 }
1067 
1068 int fork_it(char **argv)
1069 {
1070 	int retval;
1071 	pid_t child_pid;
1072 	get_counters(cnt_even);
1073 
1074         /* clear affinity side-effect of get_counters() */
1075         sched_setaffinity(0, cpu_present_setsize, cpu_present_set);
1076 	gettimeofday(&tv_even, (struct timezone *)NULL);
1077 
1078 	child_pid = fork();
1079 	if (!child_pid) {
1080 		/* child */
1081 		execvp(argv[0], argv);
1082 	} else {
1083 		int status;
1084 
1085 		/* parent */
1086 		if (child_pid == -1) {
1087 			perror("fork");
1088 			exit(1);
1089 		}
1090 
1091 		signal(SIGINT, SIG_IGN);
1092 		signal(SIGQUIT, SIG_IGN);
1093 		if (waitpid(child_pid, &status, 0) == -1) {
1094 			perror("wait");
1095 			exit(1);
1096 		}
1097 	}
1098 	get_counters(cnt_odd);
1099 	gettimeofday(&tv_odd, (struct timezone *)NULL);
1100 	retval = compute_delta(cnt_odd, cnt_even, cnt_delta);
1101 
1102 	timersub(&tv_odd, &tv_even, &tv_delta);
1103 	compute_average(cnt_delta, cnt_average);
1104 	if (!retval)
1105 		print_counters(cnt_delta);
1106 
1107 	fprintf(stderr, "%.6f sec\n", tv_delta.tv_sec + tv_delta.tv_usec/1000000.0);
1108 
1109 	return 0;
1110 }
1111 
1112 void cmdline(int argc, char **argv)
1113 {
1114 	int opt;
1115 
1116 	progname = argv[0];
1117 
1118 	while ((opt = getopt(argc, argv, "+svi:M:")) != -1) {
1119 		switch (opt) {
1120 		case 's':
1121 			summary_only++;
1122 			break;
1123 		case 'v':
1124 			verbose++;
1125 			break;
1126 		case 'i':
1127 			interval_sec = atoi(optarg);
1128 			break;
1129 		case 'M':
1130 			sscanf(optarg, "%x", &extra_msr_offset);
1131 			if (verbose > 1)
1132 				fprintf(stderr, "MSR 0x%X\n", extra_msr_offset);
1133 			break;
1134 		default:
1135 			usage();
1136 		}
1137 	}
1138 }
1139 
1140 int main(int argc, char **argv)
1141 {
1142 	cmdline(argc, argv);
1143 
1144 	if (verbose > 1)
1145 		fprintf(stderr, "turbostat Dec 6, 2010"
1146 			" - Len Brown <lenb@kernel.org>\n");
1147 	if (verbose > 1)
1148 		fprintf(stderr, "http://userweb.kernel.org/~lenb/acpi/utils/pmtools/turbostat/\n");
1149 
1150 	turbostat_init();
1151 
1152 	/*
1153 	 * if any params left, it must be a command to fork
1154 	 */
1155 	if (argc - optind)
1156 		return fork_it(argv + optind);
1157 	else
1158 		turbostat_loop();
1159 
1160 	return 0;
1161 }
1162