1 /*
2  * turbostat -- show CPU frequency and C-state residency
3  * on modern Intel turbo-capable processors.
4  *
5  * Copyright (c) 2013 Intel Corporation.
6  * Len Brown <len.brown@intel.com>
7  *
8  * This program is free software; you can redistribute it and/or modify it
9  * under the terms and conditions of the GNU General Public License,
10  * version 2, as published by the Free Software Foundation.
11  *
12  * This program is distributed in the hope it will be useful, but WITHOUT
13  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
14  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
15  * more details.
16  *
17  * You should have received a copy of the GNU General Public License along with
18  * this program; if not, write to the Free Software Foundation, Inc.,
19  * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
20  */
21 
22 #define _GNU_SOURCE
23 #include MSRHEADER
24 #include <stdarg.h>
25 #include <stdio.h>
26 #include <err.h>
27 #include <unistd.h>
28 #include <sys/types.h>
29 #include <sys/wait.h>
30 #include <sys/stat.h>
31 #include <sys/resource.h>
32 #include <fcntl.h>
33 #include <signal.h>
34 #include <sys/time.h>
35 #include <stdlib.h>
36 #include <getopt.h>
37 #include <dirent.h>
38 #include <string.h>
39 #include <ctype.h>
40 #include <sched.h>
41 #include <cpuid.h>
42 #include <linux/capability.h>
43 #include <errno.h>
44 
45 char *proc_stat = "/proc/stat";
46 unsigned int interval_sec = 5;
47 unsigned int debug;
48 unsigned int rapl_joules;
49 unsigned int summary_only;
50 unsigned int dump_only;
51 unsigned int skip_c0;
52 unsigned int skip_c1;
53 unsigned int do_nhm_cstates;
54 unsigned int do_snb_cstates;
55 unsigned int do_pc2;
56 unsigned int do_pc3;
57 unsigned int do_pc6;
58 unsigned int do_pc7;
59 unsigned int do_c8_c9_c10;
60 unsigned int do_skl_residency;
61 unsigned int do_slm_cstates;
62 unsigned int use_c1_residency_msr;
63 unsigned int has_aperf;
64 unsigned int has_epb;
65 unsigned int units = 1000000;	/* MHz etc */
66 unsigned int genuine_intel;
67 unsigned int has_invariant_tsc;
68 unsigned int do_nhm_platform_info;
69 unsigned int extra_msr_offset32;
70 unsigned int extra_msr_offset64;
71 unsigned int extra_delta_offset32;
72 unsigned int extra_delta_offset64;
73 int do_smi;
74 double bclk;
75 unsigned int show_pkg;
76 unsigned int show_core;
77 unsigned int show_cpu;
78 unsigned int show_pkg_only;
79 unsigned int show_core_only;
80 char *output_buffer, *outp;
81 unsigned int do_rapl;
82 unsigned int do_dts;
83 unsigned int do_ptm;
84 unsigned int tcc_activation_temp;
85 unsigned int tcc_activation_temp_override;
86 double rapl_power_units, rapl_time_units;
87 double rapl_dram_energy_units, rapl_energy_units;
88 double rapl_joule_counter_range;
89 unsigned int do_core_perf_limit_reasons;
90 unsigned int do_gfx_perf_limit_reasons;
91 unsigned int do_ring_perf_limit_reasons;
92 unsigned int crystal_hz;
93 unsigned long long tsc_hz;
94 
95 #define RAPL_PKG		(1 << 0)
96 					/* 0x610 MSR_PKG_POWER_LIMIT */
97 					/* 0x611 MSR_PKG_ENERGY_STATUS */
98 #define RAPL_PKG_PERF_STATUS	(1 << 1)
99 					/* 0x613 MSR_PKG_PERF_STATUS */
100 #define RAPL_PKG_POWER_INFO	(1 << 2)
101 					/* 0x614 MSR_PKG_POWER_INFO */
102 
103 #define RAPL_DRAM		(1 << 3)
104 					/* 0x618 MSR_DRAM_POWER_LIMIT */
105 					/* 0x619 MSR_DRAM_ENERGY_STATUS */
106 #define RAPL_DRAM_PERF_STATUS	(1 << 4)
107 					/* 0x61b MSR_DRAM_PERF_STATUS */
108 #define RAPL_DRAM_POWER_INFO	(1 << 5)
109 					/* 0x61c MSR_DRAM_POWER_INFO */
110 
111 #define RAPL_CORES		(1 << 6)
112 					/* 0x638 MSR_PP0_POWER_LIMIT */
113 					/* 0x639 MSR_PP0_ENERGY_STATUS */
114 #define RAPL_CORE_POLICY	(1 << 7)
115 					/* 0x63a MSR_PP0_POLICY */
116 
117 #define RAPL_GFX		(1 << 8)
118 					/* 0x640 MSR_PP1_POWER_LIMIT */
119 					/* 0x641 MSR_PP1_ENERGY_STATUS */
120 					/* 0x642 MSR_PP1_POLICY */
121 #define	TJMAX_DEFAULT	100
122 
123 #define MAX(a, b) ((a) > (b) ? (a) : (b))
124 
125 int aperf_mperf_unstable;
126 int backwards_count;
127 char *progname;
128 
129 cpu_set_t *cpu_present_set, *cpu_affinity_set;
130 size_t cpu_present_setsize, cpu_affinity_setsize;
131 
132 struct thread_data {
133 	unsigned long long tsc;
134 	unsigned long long aperf;
135 	unsigned long long mperf;
136 	unsigned long long c1;
137 	unsigned long long extra_msr64;
138 	unsigned long long extra_delta64;
139 	unsigned long long extra_msr32;
140 	unsigned long long extra_delta32;
141 	unsigned int smi_count;
142 	unsigned int cpu_id;
143 	unsigned int flags;
144 #define CPU_IS_FIRST_THREAD_IN_CORE	0x2
145 #define CPU_IS_FIRST_CORE_IN_PACKAGE	0x4
146 } *thread_even, *thread_odd;
147 
148 struct core_data {
149 	unsigned long long c3;
150 	unsigned long long c6;
151 	unsigned long long c7;
152 	unsigned int core_temp_c;
153 	unsigned int core_id;
154 } *core_even, *core_odd;
155 
156 struct pkg_data {
157 	unsigned long long pc2;
158 	unsigned long long pc3;
159 	unsigned long long pc6;
160 	unsigned long long pc7;
161 	unsigned long long pc8;
162 	unsigned long long pc9;
163 	unsigned long long pc10;
164 	unsigned long long pkg_wtd_core_c0;
165 	unsigned long long pkg_any_core_c0;
166 	unsigned long long pkg_any_gfxe_c0;
167 	unsigned long long pkg_both_core_gfxe_c0;
168 	unsigned int package_id;
169 	unsigned int energy_pkg;	/* MSR_PKG_ENERGY_STATUS */
170 	unsigned int energy_dram;	/* MSR_DRAM_ENERGY_STATUS */
171 	unsigned int energy_cores;	/* MSR_PP0_ENERGY_STATUS */
172 	unsigned int energy_gfx;	/* MSR_PP1_ENERGY_STATUS */
173 	unsigned int rapl_pkg_perf_status;	/* MSR_PKG_PERF_STATUS */
174 	unsigned int rapl_dram_perf_status;	/* MSR_DRAM_PERF_STATUS */
175 	unsigned int pkg_temp_c;
176 
177 } *package_even, *package_odd;
178 
179 #define ODD_COUNTERS thread_odd, core_odd, package_odd
180 #define EVEN_COUNTERS thread_even, core_even, package_even
181 
182 #define GET_THREAD(thread_base, thread_no, core_no, pkg_no) \
183 	(thread_base + (pkg_no) * topo.num_cores_per_pkg * \
184 		topo.num_threads_per_core + \
185 		(core_no) * topo.num_threads_per_core + (thread_no))
186 #define GET_CORE(core_base, core_no, pkg_no) \
187 	(core_base + (pkg_no) * topo.num_cores_per_pkg + (core_no))
188 #define GET_PKG(pkg_base, pkg_no) (pkg_base + pkg_no)
189 
190 struct system_summary {
191 	struct thread_data threads;
192 	struct core_data cores;
193 	struct pkg_data packages;
194 } sum, average;
195 
196 
197 struct topo_params {
198 	int num_packages;
199 	int num_cpus;
200 	int num_cores;
201 	int max_cpu_num;
202 	int num_cores_per_pkg;
203 	int num_threads_per_core;
204 } topo;
205 
206 struct timeval tv_even, tv_odd, tv_delta;
207 
208 void setup_all_buffers(void);
209 
210 int cpu_is_not_present(int cpu)
211 {
212 	return !CPU_ISSET_S(cpu, cpu_present_setsize, cpu_present_set);
213 }
214 /*
215  * run func(thread, core, package) in topology order
216  * skip non-present cpus
217  */
218 
219 int for_all_cpus(int (func)(struct thread_data *, struct core_data *, struct pkg_data *),
220 	struct thread_data *thread_base, struct core_data *core_base, struct pkg_data *pkg_base)
221 {
222 	int retval, pkg_no, core_no, thread_no;
223 
224 	for (pkg_no = 0; pkg_no < topo.num_packages; ++pkg_no) {
225 		for (core_no = 0; core_no < topo.num_cores_per_pkg; ++core_no) {
226 			for (thread_no = 0; thread_no <
227 				topo.num_threads_per_core; ++thread_no) {
228 				struct thread_data *t;
229 				struct core_data *c;
230 				struct pkg_data *p;
231 
232 				t = GET_THREAD(thread_base, thread_no, core_no, pkg_no);
233 
234 				if (cpu_is_not_present(t->cpu_id))
235 					continue;
236 
237 				c = GET_CORE(core_base, core_no, pkg_no);
238 				p = GET_PKG(pkg_base, pkg_no);
239 
240 				retval = func(t, c, p);
241 				if (retval)
242 					return retval;
243 			}
244 		}
245 	}
246 	return 0;
247 }
248 
249 int cpu_migrate(int cpu)
250 {
251 	CPU_ZERO_S(cpu_affinity_setsize, cpu_affinity_set);
252 	CPU_SET_S(cpu, cpu_affinity_setsize, cpu_affinity_set);
253 	if (sched_setaffinity(0, cpu_affinity_setsize, cpu_affinity_set) == -1)
254 		return -1;
255 	else
256 		return 0;
257 }
258 
259 int get_msr(int cpu, off_t offset, unsigned long long *msr)
260 {
261 	ssize_t retval;
262 	char pathname[32];
263 	int fd;
264 
265 	sprintf(pathname, "/dev/cpu/%d/msr", cpu);
266 	fd = open(pathname, O_RDONLY);
267 	if (fd < 0)
268 		err(-1, "%s open failed, try chown or chmod +r /dev/cpu/*/msr, or run as root", pathname);
269 
270 	retval = pread(fd, msr, sizeof *msr, offset);
271 	close(fd);
272 
273 	if (retval != sizeof *msr)
274 		err(-1, "%s offset 0x%llx read failed", pathname, (unsigned long long)offset);
275 
276 	return 0;
277 }
278 
279 /*
280  * Example Format w/ field column widths:
281  *
282  *  Package    Core     CPU Avg_MHz Bzy_MHz TSC_MHz     SMI   %Busy CPU_%c1 CPU_%c3 CPU_%c6 CPU_%c7 CoreTmp  PkgTmp Pkg%pc2 Pkg%pc3 Pkg%pc6 Pkg%pc7 PkgWatt CorWatt GFXWatt
283  * 123456781234567812345678123456781234567812345678123456781234567812345678123456781234567812345678123456781234567812345678123456781234567812345678123456781234567812345678
284  */
285 
286 void print_header(void)
287 {
288 	if (show_pkg)
289 		outp += sprintf(outp, " Package");
290 	if (show_core)
291 		outp += sprintf(outp, "    Core");
292 	if (show_cpu)
293 		outp += sprintf(outp, "     CPU");
294 	if (has_aperf)
295 		outp += sprintf(outp, " Avg_MHz");
296 	if (has_aperf)
297 		outp += sprintf(outp, "   %%Busy");
298 	if (has_aperf)
299 		outp += sprintf(outp, " Bzy_MHz");
300 	outp += sprintf(outp, " TSC_MHz");
301 
302 	if (extra_delta_offset32)
303 		outp += sprintf(outp, "  count 0x%03X", extra_delta_offset32);
304 	if (extra_delta_offset64)
305 		outp += sprintf(outp, "  COUNT 0x%03X", extra_delta_offset64);
306 	if (extra_msr_offset32)
307 		outp += sprintf(outp, "   MSR 0x%03X", extra_msr_offset32);
308 	if (extra_msr_offset64)
309 		outp += sprintf(outp, "           MSR 0x%03X", extra_msr_offset64);
310 
311 	if (!debug)
312 		goto done;
313 
314 	if (do_smi)
315 		outp += sprintf(outp, "     SMI");
316 
317 	if (do_nhm_cstates)
318 		outp += sprintf(outp, "  CPU%%c1");
319 	if (do_nhm_cstates && !do_slm_cstates)
320 		outp += sprintf(outp, "  CPU%%c3");
321 	if (do_nhm_cstates)
322 		outp += sprintf(outp, "  CPU%%c6");
323 	if (do_snb_cstates)
324 		outp += sprintf(outp, "  CPU%%c7");
325 
326 	if (do_dts)
327 		outp += sprintf(outp, " CoreTmp");
328 	if (do_ptm)
329 		outp += sprintf(outp, "  PkgTmp");
330 
331 	if (do_skl_residency) {
332 		outp += sprintf(outp, " Totl%%C0");
333 		outp += sprintf(outp, "  Any%%C0");
334 		outp += sprintf(outp, "  GFX%%C0");
335 		outp += sprintf(outp, " CPUGFX%%");
336 	}
337 
338 	if (do_pc2)
339 		outp += sprintf(outp, " Pkg%%pc2");
340 	if (do_pc3)
341 		outp += sprintf(outp, " Pkg%%pc3");
342 	if (do_pc6)
343 		outp += sprintf(outp, " Pkg%%pc6");
344 	if (do_pc7)
345 		outp += sprintf(outp, " Pkg%%pc7");
346 	if (do_c8_c9_c10) {
347 		outp += sprintf(outp, " Pkg%%pc8");
348 		outp += sprintf(outp, " Pkg%%pc9");
349 		outp += sprintf(outp, " Pk%%pc10");
350 	}
351 
352 	if (do_rapl && !rapl_joules) {
353 		if (do_rapl & RAPL_PKG)
354 			outp += sprintf(outp, " PkgWatt");
355 		if (do_rapl & RAPL_CORES)
356 			outp += sprintf(outp, " CorWatt");
357 		if (do_rapl & RAPL_GFX)
358 			outp += sprintf(outp, " GFXWatt");
359 		if (do_rapl & RAPL_DRAM)
360 			outp += sprintf(outp, " RAMWatt");
361 		if (do_rapl & RAPL_PKG_PERF_STATUS)
362 			outp += sprintf(outp, "   PKG_%%");
363 		if (do_rapl & RAPL_DRAM_PERF_STATUS)
364 			outp += sprintf(outp, "   RAM_%%");
365 	} else if (do_rapl && rapl_joules) {
366 		if (do_rapl & RAPL_PKG)
367 			outp += sprintf(outp, "   Pkg_J");
368 		if (do_rapl & RAPL_CORES)
369 			outp += sprintf(outp, "   Cor_J");
370 		if (do_rapl & RAPL_GFX)
371 			outp += sprintf(outp, "   GFX_J");
372 		if (do_rapl & RAPL_DRAM)
373 			outp += sprintf(outp, "   RAM_W");
374 		if (do_rapl & RAPL_PKG_PERF_STATUS)
375 			outp += sprintf(outp, "   PKG_%%");
376 		if (do_rapl & RAPL_DRAM_PERF_STATUS)
377 			outp += sprintf(outp, "   RAM_%%");
378 		outp += sprintf(outp, "   time");
379 
380 	}
381     done:
382 	outp += sprintf(outp, "\n");
383 }
384 
385 int dump_counters(struct thread_data *t, struct core_data *c,
386 	struct pkg_data *p)
387 {
388 	outp += sprintf(outp, "t %p, c %p, p %p\n", t, c, p);
389 
390 	if (t) {
391 		outp += sprintf(outp, "CPU: %d flags 0x%x\n",
392 			t->cpu_id, t->flags);
393 		outp += sprintf(outp, "TSC: %016llX\n", t->tsc);
394 		outp += sprintf(outp, "aperf: %016llX\n", t->aperf);
395 		outp += sprintf(outp, "mperf: %016llX\n", t->mperf);
396 		outp += sprintf(outp, "c1: %016llX\n", t->c1);
397 		outp += sprintf(outp, "msr0x%x: %08llX\n",
398 			extra_delta_offset32, t->extra_delta32);
399 		outp += sprintf(outp, "msr0x%x: %016llX\n",
400 			extra_delta_offset64, t->extra_delta64);
401 		outp += sprintf(outp, "msr0x%x: %08llX\n",
402 			extra_msr_offset32, t->extra_msr32);
403 		outp += sprintf(outp, "msr0x%x: %016llX\n",
404 			extra_msr_offset64, t->extra_msr64);
405 		if (do_smi)
406 			outp += sprintf(outp, "SMI: %08X\n", t->smi_count);
407 	}
408 
409 	if (c) {
410 		outp += sprintf(outp, "core: %d\n", c->core_id);
411 		outp += sprintf(outp, "c3: %016llX\n", c->c3);
412 		outp += sprintf(outp, "c6: %016llX\n", c->c6);
413 		outp += sprintf(outp, "c7: %016llX\n", c->c7);
414 		outp += sprintf(outp, "DTS: %dC\n", c->core_temp_c);
415 	}
416 
417 	if (p) {
418 		outp += sprintf(outp, "package: %d\n", p->package_id);
419 
420 		outp += sprintf(outp, "Weighted cores: %016llX\n", p->pkg_wtd_core_c0);
421 		outp += sprintf(outp, "Any cores: %016llX\n", p->pkg_any_core_c0);
422 		outp += sprintf(outp, "Any GFX: %016llX\n", p->pkg_any_gfxe_c0);
423 		outp += sprintf(outp, "CPU + GFX: %016llX\n", p->pkg_both_core_gfxe_c0);
424 
425 		outp += sprintf(outp, "pc2: %016llX\n", p->pc2);
426 		if (do_pc3)
427 			outp += sprintf(outp, "pc3: %016llX\n", p->pc3);
428 		if (do_pc6)
429 			outp += sprintf(outp, "pc6: %016llX\n", p->pc6);
430 		if (do_pc7)
431 			outp += sprintf(outp, "pc7: %016llX\n", p->pc7);
432 		outp += sprintf(outp, "pc8: %016llX\n", p->pc8);
433 		outp += sprintf(outp, "pc9: %016llX\n", p->pc9);
434 		outp += sprintf(outp, "pc10: %016llX\n", p->pc10);
435 		outp += sprintf(outp, "Joules PKG: %0X\n", p->energy_pkg);
436 		outp += sprintf(outp, "Joules COR: %0X\n", p->energy_cores);
437 		outp += sprintf(outp, "Joules GFX: %0X\n", p->energy_gfx);
438 		outp += sprintf(outp, "Joules RAM: %0X\n", p->energy_dram);
439 		outp += sprintf(outp, "Throttle PKG: %0X\n",
440 			p->rapl_pkg_perf_status);
441 		outp += sprintf(outp, "Throttle RAM: %0X\n",
442 			p->rapl_dram_perf_status);
443 		outp += sprintf(outp, "PTM: %dC\n", p->pkg_temp_c);
444 	}
445 
446 	outp += sprintf(outp, "\n");
447 
448 	return 0;
449 }
450 
451 /*
452  * column formatting convention & formats
453  */
454 int format_counters(struct thread_data *t, struct core_data *c,
455 	struct pkg_data *p)
456 {
457 	double interval_float;
458 	char *fmt8;
459 
460 	 /* if showing only 1st thread in core and this isn't one, bail out */
461 	if (show_core_only && !(t->flags & CPU_IS_FIRST_THREAD_IN_CORE))
462 		return 0;
463 
464 	 /* if showing only 1st thread in pkg and this isn't one, bail out */
465 	if (show_pkg_only && !(t->flags & CPU_IS_FIRST_CORE_IN_PACKAGE))
466 		return 0;
467 
468 	interval_float = tv_delta.tv_sec + tv_delta.tv_usec/1000000.0;
469 
470 	/* topo columns, print blanks on 1st (average) line */
471 	if (t == &average.threads) {
472 		if (show_pkg)
473 			outp += sprintf(outp, "       -");
474 		if (show_core)
475 			outp += sprintf(outp, "       -");
476 		if (show_cpu)
477 			outp += sprintf(outp, "       -");
478 	} else {
479 		if (show_pkg) {
480 			if (p)
481 				outp += sprintf(outp, "%8d", p->package_id);
482 			else
483 				outp += sprintf(outp, "       -");
484 		}
485 		if (show_core) {
486 			if (c)
487 				outp += sprintf(outp, "%8d", c->core_id);
488 			else
489 				outp += sprintf(outp, "       -");
490 		}
491 		if (show_cpu)
492 			outp += sprintf(outp, "%8d", t->cpu_id);
493 	}
494 
495 	/* Avg_MHz */
496 	if (has_aperf)
497 		outp += sprintf(outp, "%8.0f",
498 			1.0 / units * t->aperf / interval_float);
499 
500 	/* %Busy */
501 	if (has_aperf) {
502 		if (!skip_c0)
503 			outp += sprintf(outp, "%8.2f", 100.0 * t->mperf/t->tsc);
504 		else
505 			outp += sprintf(outp, "********");
506 	}
507 
508 	/* Bzy_MHz */
509 	if (has_aperf)
510 		outp += sprintf(outp, "%8.0f",
511 			1.0 * t->tsc / units * t->aperf / t->mperf / interval_float);
512 
513 	/* TSC_MHz */
514 	outp += sprintf(outp, "%8.0f", 1.0 * t->tsc/units/interval_float);
515 
516 	/* delta */
517 	if (extra_delta_offset32)
518 		outp += sprintf(outp, "  %11llu", t->extra_delta32);
519 
520 	/* DELTA */
521 	if (extra_delta_offset64)
522 		outp += sprintf(outp, "  %11llu", t->extra_delta64);
523 	/* msr */
524 	if (extra_msr_offset32)
525 		outp += sprintf(outp, "  0x%08llx", t->extra_msr32);
526 
527 	/* MSR */
528 	if (extra_msr_offset64)
529 		outp += sprintf(outp, "  0x%016llx", t->extra_msr64);
530 
531 	if (!debug)
532 		goto done;
533 
534 	/* SMI */
535 	if (do_smi)
536 		outp += sprintf(outp, "%8d", t->smi_count);
537 
538 	if (do_nhm_cstates) {
539 		if (!skip_c1)
540 			outp += sprintf(outp, "%8.2f", 100.0 * t->c1/t->tsc);
541 		else
542 			outp += sprintf(outp, "********");
543 	}
544 
545 	/* print per-core data only for 1st thread in core */
546 	if (!(t->flags & CPU_IS_FIRST_THREAD_IN_CORE))
547 		goto done;
548 
549 	if (do_nhm_cstates && !do_slm_cstates)
550 		outp += sprintf(outp, "%8.2f", 100.0 * c->c3/t->tsc);
551 	if (do_nhm_cstates)
552 		outp += sprintf(outp, "%8.2f", 100.0 * c->c6/t->tsc);
553 	if (do_snb_cstates)
554 		outp += sprintf(outp, "%8.2f", 100.0 * c->c7/t->tsc);
555 
556 	if (do_dts)
557 		outp += sprintf(outp, "%8d", c->core_temp_c);
558 
559 	/* print per-package data only for 1st core in package */
560 	if (!(t->flags & CPU_IS_FIRST_CORE_IN_PACKAGE))
561 		goto done;
562 
563 	/* PkgTmp */
564 	if (do_ptm)
565 		outp += sprintf(outp, "%8d", p->pkg_temp_c);
566 
567 	/* Totl%C0, Any%C0 GFX%C0 CPUGFX% */
568 	if (do_skl_residency) {
569 		outp += sprintf(outp, "%8.2f", 100.0 * p->pkg_wtd_core_c0/t->tsc);
570 		outp += sprintf(outp, "%8.2f", 100.0 * p->pkg_any_core_c0/t->tsc);
571 		outp += sprintf(outp, "%8.2f", 100.0 * p->pkg_any_gfxe_c0/t->tsc);
572 		outp += sprintf(outp, "%8.2f", 100.0 * p->pkg_both_core_gfxe_c0/t->tsc);
573 	}
574 
575 	if (do_pc2)
576 		outp += sprintf(outp, "%8.2f", 100.0 * p->pc2/t->tsc);
577 	if (do_pc3)
578 		outp += sprintf(outp, "%8.2f", 100.0 * p->pc3/t->tsc);
579 	if (do_pc6)
580 		outp += sprintf(outp, "%8.2f", 100.0 * p->pc6/t->tsc);
581 	if (do_pc7)
582 		outp += sprintf(outp, "%8.2f", 100.0 * p->pc7/t->tsc);
583 	if (do_c8_c9_c10) {
584 		outp += sprintf(outp, "%8.2f", 100.0 * p->pc8/t->tsc);
585 		outp += sprintf(outp, "%8.2f", 100.0 * p->pc9/t->tsc);
586 		outp += sprintf(outp, "%8.2f", 100.0 * p->pc10/t->tsc);
587 	}
588 
589 	/*
590  	 * If measurement interval exceeds minimum RAPL Joule Counter range,
591  	 * indicate that results are suspect by printing "**" in fraction place.
592  	 */
593 	if (interval_float < rapl_joule_counter_range)
594 		fmt8 = "%8.2f";
595 	else
596 		fmt8 = " %6.0f**";
597 
598 	if (do_rapl && !rapl_joules) {
599 		if (do_rapl & RAPL_PKG)
600 			outp += sprintf(outp, fmt8, p->energy_pkg * rapl_energy_units / interval_float);
601 		if (do_rapl & RAPL_CORES)
602 			outp += sprintf(outp, fmt8, p->energy_cores * rapl_energy_units / interval_float);
603 		if (do_rapl & RAPL_GFX)
604 			outp += sprintf(outp, fmt8, p->energy_gfx * rapl_energy_units / interval_float);
605 		if (do_rapl & RAPL_DRAM)
606 			outp += sprintf(outp, fmt8, p->energy_dram * rapl_dram_energy_units / interval_float);
607 		if (do_rapl & RAPL_PKG_PERF_STATUS)
608 			outp += sprintf(outp, fmt8, 100.0 * p->rapl_pkg_perf_status * rapl_time_units / interval_float);
609 		if (do_rapl & RAPL_DRAM_PERF_STATUS)
610 			outp += sprintf(outp, fmt8, 100.0 * p->rapl_dram_perf_status * rapl_time_units / interval_float);
611 	} else if (do_rapl && rapl_joules) {
612 		if (do_rapl & RAPL_PKG)
613 			outp += sprintf(outp, fmt8,
614 					p->energy_pkg * rapl_energy_units);
615 		if (do_rapl & RAPL_CORES)
616 			outp += sprintf(outp, fmt8,
617 					p->energy_cores * rapl_energy_units);
618 		if (do_rapl & RAPL_GFX)
619 			outp += sprintf(outp, fmt8,
620 					p->energy_gfx * rapl_energy_units);
621 		if (do_rapl & RAPL_DRAM)
622 			outp += sprintf(outp, fmt8,
623 					p->energy_dram * rapl_dram_energy_units);
624 		if (do_rapl & RAPL_PKG_PERF_STATUS)
625 			outp += sprintf(outp, fmt8, 100.0 * p->rapl_pkg_perf_status * rapl_time_units / interval_float);
626 		if (do_rapl & RAPL_DRAM_PERF_STATUS)
627 			outp += sprintf(outp, fmt8, 100.0 * p->rapl_dram_perf_status * rapl_time_units / interval_float);
628 
629 		outp += sprintf(outp, fmt8, interval_float);
630 	}
631 done:
632 	outp += sprintf(outp, "\n");
633 
634 	return 0;
635 }
636 
637 void flush_stdout()
638 {
639 	fputs(output_buffer, stdout);
640 	fflush(stdout);
641 	outp = output_buffer;
642 }
643 void flush_stderr()
644 {
645 	fputs(output_buffer, stderr);
646 	outp = output_buffer;
647 }
648 void format_all_counters(struct thread_data *t, struct core_data *c, struct pkg_data *p)
649 {
650 	static int printed;
651 
652 	if (!printed || !summary_only)
653 		print_header();
654 
655 	if (topo.num_cpus > 1)
656 		format_counters(&average.threads, &average.cores,
657 			&average.packages);
658 
659 	printed = 1;
660 
661 	if (summary_only)
662 		return;
663 
664 	for_all_cpus(format_counters, t, c, p);
665 }
666 
667 #define DELTA_WRAP32(new, old)			\
668 	if (new > old) {			\
669 		old = new - old;		\
670 	} else {				\
671 		old = 0x100000000 + new - old;	\
672 	}
673 
674 void
675 delta_package(struct pkg_data *new, struct pkg_data *old)
676 {
677 
678 	if (do_skl_residency) {
679 		old->pkg_wtd_core_c0 = new->pkg_wtd_core_c0 - old->pkg_wtd_core_c0;
680 		old->pkg_any_core_c0 = new->pkg_any_core_c0 - old->pkg_any_core_c0;
681 		old->pkg_any_gfxe_c0 = new->pkg_any_gfxe_c0 - old->pkg_any_gfxe_c0;
682 		old->pkg_both_core_gfxe_c0 = new->pkg_both_core_gfxe_c0 - old->pkg_both_core_gfxe_c0;
683 	}
684 	old->pc2 = new->pc2 - old->pc2;
685 	if (do_pc3)
686 		old->pc3 = new->pc3 - old->pc3;
687 	if (do_pc6)
688 		old->pc6 = new->pc6 - old->pc6;
689 	if (do_pc7)
690 		old->pc7 = new->pc7 - old->pc7;
691 	old->pc8 = new->pc8 - old->pc8;
692 	old->pc9 = new->pc9 - old->pc9;
693 	old->pc10 = new->pc10 - old->pc10;
694 	old->pkg_temp_c = new->pkg_temp_c;
695 
696 	DELTA_WRAP32(new->energy_pkg, old->energy_pkg);
697 	DELTA_WRAP32(new->energy_cores, old->energy_cores);
698 	DELTA_WRAP32(new->energy_gfx, old->energy_gfx);
699 	DELTA_WRAP32(new->energy_dram, old->energy_dram);
700 	DELTA_WRAP32(new->rapl_pkg_perf_status, old->rapl_pkg_perf_status);
701 	DELTA_WRAP32(new->rapl_dram_perf_status, old->rapl_dram_perf_status);
702 }
703 
704 void
705 delta_core(struct core_data *new, struct core_data *old)
706 {
707 	old->c3 = new->c3 - old->c3;
708 	old->c6 = new->c6 - old->c6;
709 	old->c7 = new->c7 - old->c7;
710 	old->core_temp_c = new->core_temp_c;
711 }
712 
713 /*
714  * old = new - old
715  */
716 void
717 delta_thread(struct thread_data *new, struct thread_data *old,
718 	struct core_data *core_delta)
719 {
720 	old->tsc = new->tsc - old->tsc;
721 
722 	/* check for TSC < 1 Mcycles over interval */
723 	if (old->tsc < (1000 * 1000))
724 		errx(-3, "Insanely slow TSC rate, TSC stops in idle?\n"
725 		     "You can disable all c-states by booting with \"idle=poll\"\n"
726 		     "or just the deep ones with \"processor.max_cstate=1\"");
727 
728 	old->c1 = new->c1 - old->c1;
729 
730 	if (has_aperf) {
731 		if ((new->aperf > old->aperf) && (new->mperf > old->mperf)) {
732 			old->aperf = new->aperf - old->aperf;
733 			old->mperf = new->mperf - old->mperf;
734 		} else {
735 
736 			if (!aperf_mperf_unstable) {
737 				fprintf(stderr, "%s: APERF or MPERF went backwards *\n", progname);
738 				fprintf(stderr, "* Frequency results do not cover entire interval *\n");
739 				fprintf(stderr, "* fix this by running Linux-2.6.30 or later *\n");
740 
741 				aperf_mperf_unstable = 1;
742 			}
743 			/*
744 			 * mperf delta is likely a huge "positive" number
745 			 * can not use it for calculating c0 time
746 			 */
747 			skip_c0 = 1;
748 			skip_c1 = 1;
749 		}
750 	}
751 
752 
753 	if (use_c1_residency_msr) {
754 		/*
755 		 * Some models have a dedicated C1 residency MSR,
756 		 * which should be more accurate than the derivation below.
757 		 */
758 	} else {
759 		/*
760 		 * As counter collection is not atomic,
761 		 * it is possible for mperf's non-halted cycles + idle states
762 		 * to exceed TSC's all cycles: show c1 = 0% in that case.
763 		 */
764 		if ((old->mperf + core_delta->c3 + core_delta->c6 + core_delta->c7) > old->tsc)
765 			old->c1 = 0;
766 		else {
767 			/* normal case, derive c1 */
768 			old->c1 = old->tsc - old->mperf - core_delta->c3
769 				- core_delta->c6 - core_delta->c7;
770 		}
771 	}
772 
773 	if (old->mperf == 0) {
774 		if (debug > 1) fprintf(stderr, "cpu%d MPERF 0!\n", old->cpu_id);
775 		old->mperf = 1;	/* divide by 0 protection */
776 	}
777 
778 	old->extra_delta32 = new->extra_delta32 - old->extra_delta32;
779 	old->extra_delta32 &= 0xFFFFFFFF;
780 
781 	old->extra_delta64 = new->extra_delta64 - old->extra_delta64;
782 
783 	/*
784 	 * Extra MSR is just a snapshot, simply copy latest w/o subtracting
785 	 */
786 	old->extra_msr32 = new->extra_msr32;
787 	old->extra_msr64 = new->extra_msr64;
788 
789 	if (do_smi)
790 		old->smi_count = new->smi_count - old->smi_count;
791 }
792 
793 int delta_cpu(struct thread_data *t, struct core_data *c,
794 	struct pkg_data *p, struct thread_data *t2,
795 	struct core_data *c2, struct pkg_data *p2)
796 {
797 	/* calculate core delta only for 1st thread in core */
798 	if (t->flags & CPU_IS_FIRST_THREAD_IN_CORE)
799 		delta_core(c, c2);
800 
801 	/* always calculate thread delta */
802 	delta_thread(t, t2, c2);	/* c2 is core delta */
803 
804 	/* calculate package delta only for 1st core in package */
805 	if (t->flags & CPU_IS_FIRST_CORE_IN_PACKAGE)
806 		delta_package(p, p2);
807 
808 	return 0;
809 }
810 
811 void clear_counters(struct thread_data *t, struct core_data *c, struct pkg_data *p)
812 {
813 	t->tsc = 0;
814 	t->aperf = 0;
815 	t->mperf = 0;
816 	t->c1 = 0;
817 
818 	t->smi_count = 0;
819 	t->extra_delta32 = 0;
820 	t->extra_delta64 = 0;
821 
822 	/* tells format_counters to dump all fields from this set */
823 	t->flags = CPU_IS_FIRST_THREAD_IN_CORE | CPU_IS_FIRST_CORE_IN_PACKAGE;
824 
825 	c->c3 = 0;
826 	c->c6 = 0;
827 	c->c7 = 0;
828 	c->core_temp_c = 0;
829 
830 	p->pkg_wtd_core_c0 = 0;
831 	p->pkg_any_core_c0 = 0;
832 	p->pkg_any_gfxe_c0 = 0;
833 	p->pkg_both_core_gfxe_c0 = 0;
834 
835 	p->pc2 = 0;
836 	if (do_pc3)
837 		p->pc3 = 0;
838 	if (do_pc6)
839 		p->pc6 = 0;
840 	if (do_pc7)
841 		p->pc7 = 0;
842 	p->pc8 = 0;
843 	p->pc9 = 0;
844 	p->pc10 = 0;
845 
846 	p->energy_pkg = 0;
847 	p->energy_dram = 0;
848 	p->energy_cores = 0;
849 	p->energy_gfx = 0;
850 	p->rapl_pkg_perf_status = 0;
851 	p->rapl_dram_perf_status = 0;
852 	p->pkg_temp_c = 0;
853 }
854 int sum_counters(struct thread_data *t, struct core_data *c,
855 	struct pkg_data *p)
856 {
857 	average.threads.tsc += t->tsc;
858 	average.threads.aperf += t->aperf;
859 	average.threads.mperf += t->mperf;
860 	average.threads.c1 += t->c1;
861 
862 	average.threads.extra_delta32 += t->extra_delta32;
863 	average.threads.extra_delta64 += t->extra_delta64;
864 
865 	/* sum per-core values only for 1st thread in core */
866 	if (!(t->flags & CPU_IS_FIRST_THREAD_IN_CORE))
867 		return 0;
868 
869 	average.cores.c3 += c->c3;
870 	average.cores.c6 += c->c6;
871 	average.cores.c7 += c->c7;
872 
873 	average.cores.core_temp_c = MAX(average.cores.core_temp_c, c->core_temp_c);
874 
875 	/* sum per-pkg values only for 1st core in pkg */
876 	if (!(t->flags & CPU_IS_FIRST_CORE_IN_PACKAGE))
877 		return 0;
878 
879 	if (do_skl_residency) {
880 		average.packages.pkg_wtd_core_c0 += p->pkg_wtd_core_c0;
881 		average.packages.pkg_any_core_c0 += p->pkg_any_core_c0;
882 		average.packages.pkg_any_gfxe_c0 += p->pkg_any_gfxe_c0;
883 		average.packages.pkg_both_core_gfxe_c0 += p->pkg_both_core_gfxe_c0;
884 	}
885 
886 	average.packages.pc2 += p->pc2;
887 	if (do_pc3)
888 		average.packages.pc3 += p->pc3;
889 	if (do_pc6)
890 		average.packages.pc6 += p->pc6;
891 	if (do_pc7)
892 		average.packages.pc7 += p->pc7;
893 	average.packages.pc8 += p->pc8;
894 	average.packages.pc9 += p->pc9;
895 	average.packages.pc10 += p->pc10;
896 
897 	average.packages.energy_pkg += p->energy_pkg;
898 	average.packages.energy_dram += p->energy_dram;
899 	average.packages.energy_cores += p->energy_cores;
900 	average.packages.energy_gfx += p->energy_gfx;
901 
902 	average.packages.pkg_temp_c = MAX(average.packages.pkg_temp_c, p->pkg_temp_c);
903 
904 	average.packages.rapl_pkg_perf_status += p->rapl_pkg_perf_status;
905 	average.packages.rapl_dram_perf_status += p->rapl_dram_perf_status;
906 	return 0;
907 }
908 /*
909  * sum the counters for all cpus in the system
910  * compute the weighted average
911  */
912 void compute_average(struct thread_data *t, struct core_data *c,
913 	struct pkg_data *p)
914 {
915 	clear_counters(&average.threads, &average.cores, &average.packages);
916 
917 	for_all_cpus(sum_counters, t, c, p);
918 
919 	average.threads.tsc /= topo.num_cpus;
920 	average.threads.aperf /= topo.num_cpus;
921 	average.threads.mperf /= topo.num_cpus;
922 	average.threads.c1 /= topo.num_cpus;
923 
924 	average.threads.extra_delta32 /= topo.num_cpus;
925 	average.threads.extra_delta32 &= 0xFFFFFFFF;
926 
927 	average.threads.extra_delta64 /= topo.num_cpus;
928 
929 	average.cores.c3 /= topo.num_cores;
930 	average.cores.c6 /= topo.num_cores;
931 	average.cores.c7 /= topo.num_cores;
932 
933 	if (do_skl_residency) {
934 		average.packages.pkg_wtd_core_c0 /= topo.num_packages;
935 		average.packages.pkg_any_core_c0 /= topo.num_packages;
936 		average.packages.pkg_any_gfxe_c0 /= topo.num_packages;
937 		average.packages.pkg_both_core_gfxe_c0 /= topo.num_packages;
938 	}
939 
940 	average.packages.pc2 /= topo.num_packages;
941 	if (do_pc3)
942 		average.packages.pc3 /= topo.num_packages;
943 	if (do_pc6)
944 		average.packages.pc6 /= topo.num_packages;
945 	if (do_pc7)
946 		average.packages.pc7 /= topo.num_packages;
947 
948 	average.packages.pc8 /= topo.num_packages;
949 	average.packages.pc9 /= topo.num_packages;
950 	average.packages.pc10 /= topo.num_packages;
951 }
952 
953 static unsigned long long rdtsc(void)
954 {
955 	unsigned int low, high;
956 
957 	asm volatile("rdtsc" : "=a" (low), "=d" (high));
958 
959 	return low | ((unsigned long long)high) << 32;
960 }
961 
962 
963 /*
964  * get_counters(...)
965  * migrate to cpu
966  * acquire and record local counters for that cpu
967  */
968 int get_counters(struct thread_data *t, struct core_data *c, struct pkg_data *p)
969 {
970 	int cpu = t->cpu_id;
971 	unsigned long long msr;
972 
973 	if (cpu_migrate(cpu)) {
974 		fprintf(stderr, "Could not migrate to CPU %d\n", cpu);
975 		return -1;
976 	}
977 
978 	t->tsc = rdtsc();	/* we are running on local CPU of interest */
979 
980 	if (has_aperf) {
981 		if (get_msr(cpu, MSR_IA32_APERF, &t->aperf))
982 			return -3;
983 		if (get_msr(cpu, MSR_IA32_MPERF, &t->mperf))
984 			return -4;
985 	}
986 
987 	if (do_smi) {
988 		if (get_msr(cpu, MSR_SMI_COUNT, &msr))
989 			return -5;
990 		t->smi_count = msr & 0xFFFFFFFF;
991 	}
992 	if (extra_delta_offset32) {
993 		if (get_msr(cpu, extra_delta_offset32, &msr))
994 			return -5;
995 		t->extra_delta32 = msr & 0xFFFFFFFF;
996 	}
997 
998 	if (extra_delta_offset64)
999 		if (get_msr(cpu, extra_delta_offset64, &t->extra_delta64))
1000 			return -5;
1001 
1002 	if (extra_msr_offset32) {
1003 		if (get_msr(cpu, extra_msr_offset32, &msr))
1004 			return -5;
1005 		t->extra_msr32 = msr & 0xFFFFFFFF;
1006 	}
1007 
1008 	if (extra_msr_offset64)
1009 		if (get_msr(cpu, extra_msr_offset64, &t->extra_msr64))
1010 			return -5;
1011 
1012 	if (use_c1_residency_msr) {
1013 		if (get_msr(cpu, MSR_CORE_C1_RES, &t->c1))
1014 			return -6;
1015 	}
1016 
1017 	/* collect core counters only for 1st thread in core */
1018 	if (!(t->flags & CPU_IS_FIRST_THREAD_IN_CORE))
1019 		return 0;
1020 
1021 	if (do_nhm_cstates && !do_slm_cstates) {
1022 		if (get_msr(cpu, MSR_CORE_C3_RESIDENCY, &c->c3))
1023 			return -6;
1024 	}
1025 
1026 	if (do_nhm_cstates) {
1027 		if (get_msr(cpu, MSR_CORE_C6_RESIDENCY, &c->c6))
1028 			return -7;
1029 	}
1030 
1031 	if (do_snb_cstates)
1032 		if (get_msr(cpu, MSR_CORE_C7_RESIDENCY, &c->c7))
1033 			return -8;
1034 
1035 	if (do_dts) {
1036 		if (get_msr(cpu, MSR_IA32_THERM_STATUS, &msr))
1037 			return -9;
1038 		c->core_temp_c = tcc_activation_temp - ((msr >> 16) & 0x7F);
1039 	}
1040 
1041 
1042 	/* collect package counters only for 1st core in package */
1043 	if (!(t->flags & CPU_IS_FIRST_CORE_IN_PACKAGE))
1044 		return 0;
1045 
1046 	if (do_skl_residency) {
1047 		if (get_msr(cpu, MSR_PKG_WEIGHTED_CORE_C0_RES, &p->pkg_wtd_core_c0))
1048 			return -10;
1049 		if (get_msr(cpu, MSR_PKG_ANY_CORE_C0_RES, &p->pkg_any_core_c0))
1050 			return -11;
1051 		if (get_msr(cpu, MSR_PKG_ANY_GFXE_C0_RES, &p->pkg_any_gfxe_c0))
1052 			return -12;
1053 		if (get_msr(cpu, MSR_PKG_BOTH_CORE_GFXE_C0_RES, &p->pkg_both_core_gfxe_c0))
1054 			return -13;
1055 	}
1056 	if (do_pc3)
1057 		if (get_msr(cpu, MSR_PKG_C3_RESIDENCY, &p->pc3))
1058 			return -9;
1059 	if (do_pc6)
1060 		if (get_msr(cpu, MSR_PKG_C6_RESIDENCY, &p->pc6))
1061 			return -10;
1062 	if (do_pc2)
1063 		if (get_msr(cpu, MSR_PKG_C2_RESIDENCY, &p->pc2))
1064 			return -11;
1065 	if (do_pc7)
1066 		if (get_msr(cpu, MSR_PKG_C7_RESIDENCY, &p->pc7))
1067 			return -12;
1068 	if (do_c8_c9_c10) {
1069 		if (get_msr(cpu, MSR_PKG_C8_RESIDENCY, &p->pc8))
1070 			return -13;
1071 		if (get_msr(cpu, MSR_PKG_C9_RESIDENCY, &p->pc9))
1072 			return -13;
1073 		if (get_msr(cpu, MSR_PKG_C10_RESIDENCY, &p->pc10))
1074 			return -13;
1075 	}
1076 	if (do_rapl & RAPL_PKG) {
1077 		if (get_msr(cpu, MSR_PKG_ENERGY_STATUS, &msr))
1078 			return -13;
1079 		p->energy_pkg = msr & 0xFFFFFFFF;
1080 	}
1081 	if (do_rapl & RAPL_CORES) {
1082 		if (get_msr(cpu, MSR_PP0_ENERGY_STATUS, &msr))
1083 			return -14;
1084 		p->energy_cores = msr & 0xFFFFFFFF;
1085 	}
1086 	if (do_rapl & RAPL_DRAM) {
1087 		if (get_msr(cpu, MSR_DRAM_ENERGY_STATUS, &msr))
1088 			return -15;
1089 		p->energy_dram = msr & 0xFFFFFFFF;
1090 	}
1091 	if (do_rapl & RAPL_GFX) {
1092 		if (get_msr(cpu, MSR_PP1_ENERGY_STATUS, &msr))
1093 			return -16;
1094 		p->energy_gfx = msr & 0xFFFFFFFF;
1095 	}
1096 	if (do_rapl & RAPL_PKG_PERF_STATUS) {
1097 		if (get_msr(cpu, MSR_PKG_PERF_STATUS, &msr))
1098 			return -16;
1099 		p->rapl_pkg_perf_status = msr & 0xFFFFFFFF;
1100 	}
1101 	if (do_rapl & RAPL_DRAM_PERF_STATUS) {
1102 		if (get_msr(cpu, MSR_DRAM_PERF_STATUS, &msr))
1103 			return -16;
1104 		p->rapl_dram_perf_status = msr & 0xFFFFFFFF;
1105 	}
1106 	if (do_ptm) {
1107 		if (get_msr(cpu, MSR_IA32_PACKAGE_THERM_STATUS, &msr))
1108 			return -17;
1109 		p->pkg_temp_c = tcc_activation_temp - ((msr >> 16) & 0x7F);
1110 	}
1111 	return 0;
1112 }
1113 
1114 /*
1115  * MSR_PKG_CST_CONFIG_CONTROL decoding for pkg_cstate_limit:
1116  * If you change the values, note they are used both in comparisons
1117  * (>= PCL__7) and to index pkg_cstate_limit_strings[].
1118  */
1119 
1120 #define PCLUKN 0 /* Unknown */
1121 #define PCLRSV 1 /* Reserved */
1122 #define PCL__0 2 /* PC0 */
1123 #define PCL__1 3 /* PC1 */
1124 #define PCL__2 4 /* PC2 */
1125 #define PCL__3 5 /* PC3 */
1126 #define PCL__4 6 /* PC4 */
1127 #define PCL__6 7 /* PC6 */
1128 #define PCL_6N 8 /* PC6 No Retention */
1129 #define PCL_6R 9 /* PC6 Retention */
1130 #define PCL__7 10 /* PC7 */
1131 #define PCL_7S 11 /* PC7 Shrink */
1132 #define PCL__8 12 /* PC8 */
1133 #define PCL__9 13 /* PC9 */
1134 #define PCLUNL 14 /* Unlimited */
1135 
1136 int pkg_cstate_limit = PCLUKN;
1137 char *pkg_cstate_limit_strings[] = { "reserved", "unknown", "pc0", "pc1", "pc2",
1138 	"pc3", "pc4", "pc6", "pc6n", "pc6r", "pc7", "pc7s", "pc8", "pc9", "unlimited"};
1139 
1140 int nhm_pkg_cstate_limits[16] = {PCL__0, PCL__1, PCL__3, PCL__6, PCL__7, PCLRSV, PCLRSV, PCLUNL, PCLRSV, PCLRSV, PCLRSV, PCLRSV, PCLRSV, PCLRSV, PCLRSV, PCLRSV};
1141 int snb_pkg_cstate_limits[16] = {PCL__0, PCL__2, PCL_6N, PCL_6R, PCL__7, PCL_7S, PCLRSV, PCLUNL, PCLRSV, PCLRSV, PCLRSV, PCLRSV, PCLRSV, PCLRSV, PCLRSV, PCLRSV};
1142 int hsw_pkg_cstate_limits[16] = {PCL__0, PCL__2, PCL__3, PCL__6, PCL__7, PCL_7S, PCL__8, PCL__9, PCLUNL, PCLRSV, PCLRSV, PCLRSV, PCLRSV, PCLRSV, PCLRSV, PCLRSV};
1143 int slv_pkg_cstate_limits[16] = {PCL__0, PCL__1, PCLRSV, PCLRSV, PCL__4, PCLRSV, PCL__6, PCL__7, PCLRSV, PCLRSV, PCLRSV, PCLRSV, PCLRSV, PCLRSV, PCLRSV, PCLRSV};
1144 int amt_pkg_cstate_limits[16] = {PCL__0, PCL__1, PCL__2, PCLRSV, PCLRSV, PCLRSV, PCL__6, PCL__7, PCLRSV, PCLRSV, PCLRSV, PCLRSV, PCLRSV, PCLRSV, PCLRSV, PCLRSV};
1145 int phi_pkg_cstate_limits[16] = {PCL__0, PCL__2, PCL_6N, PCL_6R, PCLRSV, PCLRSV, PCLRSV, PCLUNL, PCLRSV, PCLRSV, PCLRSV, PCLRSV, PCLRSV, PCLRSV, PCLRSV, PCLRSV};
1146 
1147 static void
1148 dump_nhm_platform_info(void)
1149 {
1150 	unsigned long long msr;
1151 	unsigned int ratio;
1152 
1153 	get_msr(0, MSR_NHM_PLATFORM_INFO, &msr);
1154 
1155 	fprintf(stderr, "cpu0: MSR_NHM_PLATFORM_INFO: 0x%08llx\n", msr);
1156 
1157 	ratio = (msr >> 40) & 0xFF;
1158 	fprintf(stderr, "%d * %.0f = %.0f MHz max efficiency frequency\n",
1159 		ratio, bclk, ratio * bclk);
1160 
1161 	ratio = (msr >> 8) & 0xFF;
1162 	fprintf(stderr, "%d * %.0f = %.0f MHz base frequency\n",
1163 		ratio, bclk, ratio * bclk);
1164 
1165 	get_msr(0, MSR_IA32_POWER_CTL, &msr);
1166 	fprintf(stderr, "cpu0: MSR_IA32_POWER_CTL: 0x%08llx (C1E auto-promotion: %sabled)\n",
1167 		msr, msr & 0x2 ? "EN" : "DIS");
1168 
1169 	return;
1170 }
1171 
1172 static void
1173 dump_hsw_turbo_ratio_limits(void)
1174 {
1175 	unsigned long long msr;
1176 	unsigned int ratio;
1177 
1178 	get_msr(0, MSR_TURBO_RATIO_LIMIT2, &msr);
1179 
1180 	fprintf(stderr, "cpu0: MSR_TURBO_RATIO_LIMIT2: 0x%08llx\n", msr);
1181 
1182 	ratio = (msr >> 8) & 0xFF;
1183 	if (ratio)
1184 		fprintf(stderr, "%d * %.0f = %.0f MHz max turbo 18 active cores\n",
1185 			ratio, bclk, ratio * bclk);
1186 
1187 	ratio = (msr >> 0) & 0xFF;
1188 	if (ratio)
1189 		fprintf(stderr, "%d * %.0f = %.0f MHz max turbo 17 active cores\n",
1190 			ratio, bclk, ratio * bclk);
1191 	return;
1192 }
1193 
1194 static void
1195 dump_ivt_turbo_ratio_limits(void)
1196 {
1197 	unsigned long long msr;
1198 	unsigned int ratio;
1199 
1200 	get_msr(0, MSR_TURBO_RATIO_LIMIT1, &msr);
1201 
1202 	fprintf(stderr, "cpu0: MSR_TURBO_RATIO_LIMIT1: 0x%08llx\n", msr);
1203 
1204 	ratio = (msr >> 56) & 0xFF;
1205 	if (ratio)
1206 		fprintf(stderr, "%d * %.0f = %.0f MHz max turbo 16 active cores\n",
1207 			ratio, bclk, ratio * bclk);
1208 
1209 	ratio = (msr >> 48) & 0xFF;
1210 	if (ratio)
1211 		fprintf(stderr, "%d * %.0f = %.0f MHz max turbo 15 active cores\n",
1212 			ratio, bclk, ratio * bclk);
1213 
1214 	ratio = (msr >> 40) & 0xFF;
1215 	if (ratio)
1216 		fprintf(stderr, "%d * %.0f = %.0f MHz max turbo 14 active cores\n",
1217 			ratio, bclk, ratio * bclk);
1218 
1219 	ratio = (msr >> 32) & 0xFF;
1220 	if (ratio)
1221 		fprintf(stderr, "%d * %.0f = %.0f MHz max turbo 13 active cores\n",
1222 			ratio, bclk, ratio * bclk);
1223 
1224 	ratio = (msr >> 24) & 0xFF;
1225 	if (ratio)
1226 		fprintf(stderr, "%d * %.0f = %.0f MHz max turbo 12 active cores\n",
1227 			ratio, bclk, ratio * bclk);
1228 
1229 	ratio = (msr >> 16) & 0xFF;
1230 	if (ratio)
1231 		fprintf(stderr, "%d * %.0f = %.0f MHz max turbo 11 active cores\n",
1232 			ratio, bclk, ratio * bclk);
1233 
1234 	ratio = (msr >> 8) & 0xFF;
1235 	if (ratio)
1236 		fprintf(stderr, "%d * %.0f = %.0f MHz max turbo 10 active cores\n",
1237 			ratio, bclk, ratio * bclk);
1238 
1239 	ratio = (msr >> 0) & 0xFF;
1240 	if (ratio)
1241 		fprintf(stderr, "%d * %.0f = %.0f MHz max turbo 9 active cores\n",
1242 			ratio, bclk, ratio * bclk);
1243 	return;
1244 }
1245 
1246 static void
1247 dump_nhm_turbo_ratio_limits(void)
1248 {
1249 	unsigned long long msr;
1250 	unsigned int ratio;
1251 
1252 	get_msr(0, MSR_TURBO_RATIO_LIMIT, &msr);
1253 
1254 	fprintf(stderr, "cpu0: MSR_TURBO_RATIO_LIMIT: 0x%08llx\n", msr);
1255 
1256 	ratio = (msr >> 56) & 0xFF;
1257 	if (ratio)
1258 		fprintf(stderr, "%d * %.0f = %.0f MHz max turbo 8 active cores\n",
1259 			ratio, bclk, ratio * bclk);
1260 
1261 	ratio = (msr >> 48) & 0xFF;
1262 	if (ratio)
1263 		fprintf(stderr, "%d * %.0f = %.0f MHz max turbo 7 active cores\n",
1264 			ratio, bclk, ratio * bclk);
1265 
1266 	ratio = (msr >> 40) & 0xFF;
1267 	if (ratio)
1268 		fprintf(stderr, "%d * %.0f = %.0f MHz max turbo 6 active cores\n",
1269 			ratio, bclk, ratio * bclk);
1270 
1271 	ratio = (msr >> 32) & 0xFF;
1272 	if (ratio)
1273 		fprintf(stderr, "%d * %.0f = %.0f MHz max turbo 5 active cores\n",
1274 			ratio, bclk, ratio * bclk);
1275 
1276 	ratio = (msr >> 24) & 0xFF;
1277 	if (ratio)
1278 		fprintf(stderr, "%d * %.0f = %.0f MHz max turbo 4 active cores\n",
1279 			ratio, bclk, ratio * bclk);
1280 
1281 	ratio = (msr >> 16) & 0xFF;
1282 	if (ratio)
1283 		fprintf(stderr, "%d * %.0f = %.0f MHz max turbo 3 active cores\n",
1284 			ratio, bclk, ratio * bclk);
1285 
1286 	ratio = (msr >> 8) & 0xFF;
1287 	if (ratio)
1288 		fprintf(stderr, "%d * %.0f = %.0f MHz max turbo 2 active cores\n",
1289 			ratio, bclk, ratio * bclk);
1290 
1291 	ratio = (msr >> 0) & 0xFF;
1292 	if (ratio)
1293 		fprintf(stderr, "%d * %.0f = %.0f MHz max turbo 1 active cores\n",
1294 			ratio, bclk, ratio * bclk);
1295 	return;
1296 }
1297 
1298 static void
1299 dump_nhm_cst_cfg(void)
1300 {
1301 	unsigned long long msr;
1302 
1303 	get_msr(0, MSR_NHM_SNB_PKG_CST_CFG_CTL, &msr);
1304 
1305 #define SNB_C1_AUTO_UNDEMOTE              (1UL << 27)
1306 #define SNB_C3_AUTO_UNDEMOTE              (1UL << 28)
1307 
1308 	fprintf(stderr, "cpu0: MSR_NHM_SNB_PKG_CST_CFG_CTL: 0x%08llx", msr);
1309 
1310 	fprintf(stderr, " (%s%s%s%s%slocked: pkg-cstate-limit=%d: %s)\n",
1311 		(msr & SNB_C3_AUTO_UNDEMOTE) ? "UNdemote-C3, " : "",
1312 		(msr & SNB_C1_AUTO_UNDEMOTE) ? "UNdemote-C1, " : "",
1313 		(msr & NHM_C3_AUTO_DEMOTE) ? "demote-C3, " : "",
1314 		(msr & NHM_C1_AUTO_DEMOTE) ? "demote-C1, " : "",
1315 		(msr & (1 << 15)) ? "" : "UN",
1316 		(unsigned int)msr & 7,
1317 		pkg_cstate_limit_strings[pkg_cstate_limit]);
1318 	return;
1319 }
1320 
1321 void free_all_buffers(void)
1322 {
1323 	CPU_FREE(cpu_present_set);
1324 	cpu_present_set = NULL;
1325 	cpu_present_set = 0;
1326 
1327 	CPU_FREE(cpu_affinity_set);
1328 	cpu_affinity_set = NULL;
1329 	cpu_affinity_setsize = 0;
1330 
1331 	free(thread_even);
1332 	free(core_even);
1333 	free(package_even);
1334 
1335 	thread_even = NULL;
1336 	core_even = NULL;
1337 	package_even = NULL;
1338 
1339 	free(thread_odd);
1340 	free(core_odd);
1341 	free(package_odd);
1342 
1343 	thread_odd = NULL;
1344 	core_odd = NULL;
1345 	package_odd = NULL;
1346 
1347 	free(output_buffer);
1348 	output_buffer = NULL;
1349 	outp = NULL;
1350 }
1351 
1352 /*
1353  * Open a file, and exit on failure
1354  */
1355 FILE *fopen_or_die(const char *path, const char *mode)
1356 {
1357 	FILE *filep = fopen(path, "r");
1358 	if (!filep)
1359 		err(1, "%s: open failed", path);
1360 	return filep;
1361 }
1362 
1363 /*
1364  * Parse a file containing a single int.
1365  */
1366 int parse_int_file(const char *fmt, ...)
1367 {
1368 	va_list args;
1369 	char path[PATH_MAX];
1370 	FILE *filep;
1371 	int value;
1372 
1373 	va_start(args, fmt);
1374 	vsnprintf(path, sizeof(path), fmt, args);
1375 	va_end(args);
1376 	filep = fopen_or_die(path, "r");
1377 	if (fscanf(filep, "%d", &value) != 1)
1378 		err(1, "%s: failed to parse number from file", path);
1379 	fclose(filep);
1380 	return value;
1381 }
1382 
1383 /*
1384  * cpu_is_first_sibling_in_core(cpu)
1385  * return 1 if given CPU is 1st HT sibling in the core
1386  */
1387 int cpu_is_first_sibling_in_core(int cpu)
1388 {
1389 	return cpu == parse_int_file("/sys/devices/system/cpu/cpu%d/topology/thread_siblings_list", cpu);
1390 }
1391 
1392 /*
1393  * cpu_is_first_core_in_package(cpu)
1394  * return 1 if given CPU is 1st core in package
1395  */
1396 int cpu_is_first_core_in_package(int cpu)
1397 {
1398 	return cpu == parse_int_file("/sys/devices/system/cpu/cpu%d/topology/core_siblings_list", cpu);
1399 }
1400 
1401 int get_physical_package_id(int cpu)
1402 {
1403 	return parse_int_file("/sys/devices/system/cpu/cpu%d/topology/physical_package_id", cpu);
1404 }
1405 
1406 int get_core_id(int cpu)
1407 {
1408 	return parse_int_file("/sys/devices/system/cpu/cpu%d/topology/core_id", cpu);
1409 }
1410 
1411 int get_num_ht_siblings(int cpu)
1412 {
1413 	char path[80];
1414 	FILE *filep;
1415 	int sib1, sib2;
1416 	int matches;
1417 	char character;
1418 
1419 	sprintf(path, "/sys/devices/system/cpu/cpu%d/topology/thread_siblings_list", cpu);
1420 	filep = fopen_or_die(path, "r");
1421 	/*
1422 	 * file format:
1423 	 * if a pair of number with a character between: 2 siblings (eg. 1-2, or 1,4)
1424 	 * otherwinse 1 sibling (self).
1425 	 */
1426 	matches = fscanf(filep, "%d%c%d\n", &sib1, &character, &sib2);
1427 
1428 	fclose(filep);
1429 
1430 	if (matches == 3)
1431 		return 2;
1432 	else
1433 		return 1;
1434 }
1435 
1436 /*
1437  * run func(thread, core, package) in topology order
1438  * skip non-present cpus
1439  */
1440 
1441 int for_all_cpus_2(int (func)(struct thread_data *, struct core_data *,
1442 	struct pkg_data *, struct thread_data *, struct core_data *,
1443 	struct pkg_data *), struct thread_data *thread_base,
1444 	struct core_data *core_base, struct pkg_data *pkg_base,
1445 	struct thread_data *thread_base2, struct core_data *core_base2,
1446 	struct pkg_data *pkg_base2)
1447 {
1448 	int retval, pkg_no, core_no, thread_no;
1449 
1450 	for (pkg_no = 0; pkg_no < topo.num_packages; ++pkg_no) {
1451 		for (core_no = 0; core_no < topo.num_cores_per_pkg; ++core_no) {
1452 			for (thread_no = 0; thread_no <
1453 				topo.num_threads_per_core; ++thread_no) {
1454 				struct thread_data *t, *t2;
1455 				struct core_data *c, *c2;
1456 				struct pkg_data *p, *p2;
1457 
1458 				t = GET_THREAD(thread_base, thread_no, core_no, pkg_no);
1459 
1460 				if (cpu_is_not_present(t->cpu_id))
1461 					continue;
1462 
1463 				t2 = GET_THREAD(thread_base2, thread_no, core_no, pkg_no);
1464 
1465 				c = GET_CORE(core_base, core_no, pkg_no);
1466 				c2 = GET_CORE(core_base2, core_no, pkg_no);
1467 
1468 				p = GET_PKG(pkg_base, pkg_no);
1469 				p2 = GET_PKG(pkg_base2, pkg_no);
1470 
1471 				retval = func(t, c, p, t2, c2, p2);
1472 				if (retval)
1473 					return retval;
1474 			}
1475 		}
1476 	}
1477 	return 0;
1478 }
1479 
1480 /*
1481  * run func(cpu) on every cpu in /proc/stat
1482  * return max_cpu number
1483  */
1484 int for_all_proc_cpus(int (func)(int))
1485 {
1486 	FILE *fp;
1487 	int cpu_num;
1488 	int retval;
1489 
1490 	fp = fopen_or_die(proc_stat, "r");
1491 
1492 	retval = fscanf(fp, "cpu %*d %*d %*d %*d %*d %*d %*d %*d %*d %*d\n");
1493 	if (retval != 0)
1494 		err(1, "%s: failed to parse format", proc_stat);
1495 
1496 	while (1) {
1497 		retval = fscanf(fp, "cpu%u %*d %*d %*d %*d %*d %*d %*d %*d %*d %*d\n", &cpu_num);
1498 		if (retval != 1)
1499 			break;
1500 
1501 		retval = func(cpu_num);
1502 		if (retval) {
1503 			fclose(fp);
1504 			return(retval);
1505 		}
1506 	}
1507 	fclose(fp);
1508 	return 0;
1509 }
1510 
1511 void re_initialize(void)
1512 {
1513 	free_all_buffers();
1514 	setup_all_buffers();
1515 	printf("turbostat: re-initialized with num_cpus %d\n", topo.num_cpus);
1516 }
1517 
1518 
1519 /*
1520  * count_cpus()
1521  * remember the last one seen, it will be the max
1522  */
1523 int count_cpus(int cpu)
1524 {
1525 	if (topo.max_cpu_num < cpu)
1526 		topo.max_cpu_num = cpu;
1527 
1528 	topo.num_cpus += 1;
1529 	return 0;
1530 }
1531 int mark_cpu_present(int cpu)
1532 {
1533 	CPU_SET_S(cpu, cpu_present_setsize, cpu_present_set);
1534 	return 0;
1535 }
1536 
1537 void turbostat_loop()
1538 {
1539 	int retval;
1540 	int restarted = 0;
1541 
1542 restart:
1543 	restarted++;
1544 
1545 	retval = for_all_cpus(get_counters, EVEN_COUNTERS);
1546 	if (retval < -1) {
1547 		exit(retval);
1548 	} else if (retval == -1) {
1549 		if (restarted > 1) {
1550 			exit(retval);
1551 		}
1552 		re_initialize();
1553 		goto restart;
1554 	}
1555 	restarted = 0;
1556 	gettimeofday(&tv_even, (struct timezone *)NULL);
1557 
1558 	while (1) {
1559 		if (for_all_proc_cpus(cpu_is_not_present)) {
1560 			re_initialize();
1561 			goto restart;
1562 		}
1563 		sleep(interval_sec);
1564 		retval = for_all_cpus(get_counters, ODD_COUNTERS);
1565 		if (retval < -1) {
1566 			exit(retval);
1567 		} else if (retval == -1) {
1568 			re_initialize();
1569 			goto restart;
1570 		}
1571 		gettimeofday(&tv_odd, (struct timezone *)NULL);
1572 		timersub(&tv_odd, &tv_even, &tv_delta);
1573 		for_all_cpus_2(delta_cpu, ODD_COUNTERS, EVEN_COUNTERS);
1574 		compute_average(EVEN_COUNTERS);
1575 		format_all_counters(EVEN_COUNTERS);
1576 		flush_stdout();
1577 		sleep(interval_sec);
1578 		retval = for_all_cpus(get_counters, EVEN_COUNTERS);
1579 		if (retval < -1) {
1580 			exit(retval);
1581 		} else if (retval == -1) {
1582 			re_initialize();
1583 			goto restart;
1584 		}
1585 		gettimeofday(&tv_even, (struct timezone *)NULL);
1586 		timersub(&tv_even, &tv_odd, &tv_delta);
1587 		for_all_cpus_2(delta_cpu, EVEN_COUNTERS, ODD_COUNTERS);
1588 		compute_average(ODD_COUNTERS);
1589 		format_all_counters(ODD_COUNTERS);
1590 		flush_stdout();
1591 	}
1592 }
1593 
1594 void check_dev_msr()
1595 {
1596 	struct stat sb;
1597 
1598 	if (stat("/dev/cpu/0/msr", &sb))
1599  		if (system("/sbin/modprobe msr > /dev/null 2>&1"))
1600 			err(-5, "no /dev/cpu/0/msr, Try \"# modprobe msr\" ");
1601 }
1602 
1603 void check_permissions()
1604 {
1605 	struct __user_cap_header_struct cap_header_data;
1606 	cap_user_header_t cap_header = &cap_header_data;
1607 	struct __user_cap_data_struct cap_data_data;
1608 	cap_user_data_t cap_data = &cap_data_data;
1609 	extern int capget(cap_user_header_t hdrp, cap_user_data_t datap);
1610 	int do_exit = 0;
1611 
1612 	/* check for CAP_SYS_RAWIO */
1613 	cap_header->pid = getpid();
1614 	cap_header->version = _LINUX_CAPABILITY_VERSION;
1615 	if (capget(cap_header, cap_data) < 0)
1616 		err(-6, "capget(2) failed");
1617 
1618 	if ((cap_data->effective & (1 << CAP_SYS_RAWIO)) == 0) {
1619 		do_exit++;
1620 		warnx("capget(CAP_SYS_RAWIO) failed,"
1621 			" try \"# setcap cap_sys_rawio=ep %s\"", progname);
1622 	}
1623 
1624 	/* test file permissions */
1625 	if (euidaccess("/dev/cpu/0/msr", R_OK)) {
1626 		do_exit++;
1627 		warn("/dev/cpu/0/msr open failed, try chown or chmod +r /dev/cpu/*/msr");
1628 	}
1629 
1630 	/* if all else fails, thell them to be root */
1631 	if (do_exit)
1632 		if (getuid() != 0)
1633 			warnx("... or simply run as root");
1634 
1635 	if (do_exit)
1636 		exit(-6);
1637 }
1638 
1639 /*
1640  * NHM adds support for additional MSRs:
1641  *
1642  * MSR_SMI_COUNT                   0x00000034
1643  *
1644  * MSR_NHM_PLATFORM_INFO           0x000000ce
1645  * MSR_NHM_SNB_PKG_CST_CFG_CTL     0x000000e2
1646  *
1647  * MSR_PKG_C3_RESIDENCY            0x000003f8
1648  * MSR_PKG_C6_RESIDENCY            0x000003f9
1649  * MSR_CORE_C3_RESIDENCY           0x000003fc
1650  * MSR_CORE_C6_RESIDENCY           0x000003fd
1651  *
1652  * Side effect:
1653  * sets global pkg_cstate_limit to decode MSR_NHM_SNB_PKG_CST_CFG_CTL
1654  */
1655 int probe_nhm_msrs(unsigned int family, unsigned int model)
1656 {
1657 	unsigned long long msr;
1658 	int *pkg_cstate_limits;
1659 
1660 	if (!genuine_intel)
1661 		return 0;
1662 
1663 	if (family != 6)
1664 		return 0;
1665 
1666 	switch (model) {
1667 	case 0x1A:	/* Core i7, Xeon 5500 series - Bloomfield, Gainstown NHM-EP */
1668 	case 0x1E:	/* Core i7 and i5 Processor - Clarksfield, Lynnfield, Jasper Forest */
1669 	case 0x1F:	/* Core i7 and i5 Processor - Nehalem */
1670 	case 0x25:	/* Westmere Client - Clarkdale, Arrandale */
1671 	case 0x2C:	/* Westmere EP - Gulftown */
1672 	case 0x2E:	/* Nehalem-EX Xeon - Beckton */
1673 	case 0x2F:	/* Westmere-EX Xeon - Eagleton */
1674 		pkg_cstate_limits = nhm_pkg_cstate_limits;
1675 		break;
1676 	case 0x2A:	/* SNB */
1677 	case 0x2D:	/* SNB Xeon */
1678 	case 0x3A:	/* IVB */
1679 	case 0x3E:	/* IVB Xeon */
1680 		pkg_cstate_limits = snb_pkg_cstate_limits;
1681 		break;
1682 	case 0x3C:	/* HSW */
1683 	case 0x3F:	/* HSX */
1684 	case 0x45:	/* HSW */
1685 	case 0x46:	/* HSW */
1686 	case 0x3D:	/* BDW */
1687 	case 0x47:	/* BDW */
1688 	case 0x4F:	/* BDX */
1689 	case 0x56:	/* BDX-DE */
1690 	case 0x4E:	/* SKL */
1691 	case 0x5E:	/* SKL */
1692 		pkg_cstate_limits = hsw_pkg_cstate_limits;
1693 		break;
1694 	case 0x37:	/* BYT */
1695 	case 0x4D:	/* AVN */
1696 		pkg_cstate_limits = slv_pkg_cstate_limits;
1697 		break;
1698 	case 0x4C:	/* AMT */
1699 		pkg_cstate_limits = amt_pkg_cstate_limits;
1700 		break;
1701 	case 0x57:	/* PHI */
1702 		pkg_cstate_limits = phi_pkg_cstate_limits;
1703 		break;
1704 	default:
1705 		return 0;
1706 	}
1707 	get_msr(0, MSR_NHM_SNB_PKG_CST_CFG_CTL, &msr);
1708 
1709 	pkg_cstate_limit = pkg_cstate_limits[msr & 0xF];
1710 
1711 	return 1;
1712 }
1713 int has_nhm_turbo_ratio_limit(unsigned int family, unsigned int model)
1714 {
1715 	switch (model) {
1716 	/* Nehalem compatible, but do not include turbo-ratio limit support */
1717 	case 0x2E:	/* Nehalem-EX Xeon - Beckton */
1718 	case 0x2F:	/* Westmere-EX Xeon - Eagleton */
1719 		return 0;
1720 	default:
1721 		return 1;
1722 	}
1723 }
1724 int has_ivt_turbo_ratio_limit(unsigned int family, unsigned int model)
1725 {
1726 	if (!genuine_intel)
1727 		return 0;
1728 
1729 	if (family != 6)
1730 		return 0;
1731 
1732 	switch (model) {
1733 	case 0x3E:	/* IVB Xeon */
1734 	case 0x3F:	/* HSW Xeon */
1735 		return 1;
1736 	default:
1737 		return 0;
1738 	}
1739 }
1740 int has_hsw_turbo_ratio_limit(unsigned int family, unsigned int model)
1741 {
1742 	if (!genuine_intel)
1743 		return 0;
1744 
1745 	if (family != 6)
1746 		return 0;
1747 
1748 	switch (model) {
1749 	case 0x3F:	/* HSW Xeon */
1750 		return 1;
1751 	default:
1752 		return 0;
1753 	}
1754 }
1755 
1756 static void
1757 dump_cstate_pstate_config_info(family, model)
1758 {
1759 	if (!do_nhm_platform_info)
1760 		return;
1761 
1762 	dump_nhm_platform_info();
1763 
1764 	if (has_hsw_turbo_ratio_limit(family, model))
1765 		dump_hsw_turbo_ratio_limits();
1766 
1767 	if (has_ivt_turbo_ratio_limit(family, model))
1768 		dump_ivt_turbo_ratio_limits();
1769 
1770 	if (has_nhm_turbo_ratio_limit(family, model))
1771 		dump_nhm_turbo_ratio_limits();
1772 
1773 	dump_nhm_cst_cfg();
1774 }
1775 
1776 
1777 /*
1778  * print_epb()
1779  * Decode the ENERGY_PERF_BIAS MSR
1780  */
1781 int print_epb(struct thread_data *t, struct core_data *c, struct pkg_data *p)
1782 {
1783 	unsigned long long msr;
1784 	char *epb_string;
1785 	int cpu;
1786 
1787 	if (!has_epb)
1788 		return 0;
1789 
1790 	cpu = t->cpu_id;
1791 
1792 	/* EPB is per-package */
1793 	if (!(t->flags & CPU_IS_FIRST_THREAD_IN_CORE) || !(t->flags & CPU_IS_FIRST_CORE_IN_PACKAGE))
1794 		return 0;
1795 
1796 	if (cpu_migrate(cpu)) {
1797 		fprintf(stderr, "Could not migrate to CPU %d\n", cpu);
1798 		return -1;
1799 	}
1800 
1801 	if (get_msr(cpu, MSR_IA32_ENERGY_PERF_BIAS, &msr))
1802 		return 0;
1803 
1804 	switch (msr & 0x7) {
1805 	case ENERGY_PERF_BIAS_PERFORMANCE:
1806 		epb_string = "performance";
1807 		break;
1808 	case ENERGY_PERF_BIAS_NORMAL:
1809 		epb_string = "balanced";
1810 		break;
1811 	case ENERGY_PERF_BIAS_POWERSAVE:
1812 		epb_string = "powersave";
1813 		break;
1814 	default:
1815 		epb_string = "custom";
1816 		break;
1817 	}
1818 	fprintf(stderr, "cpu%d: MSR_IA32_ENERGY_PERF_BIAS: 0x%08llx (%s)\n", cpu, msr, epb_string);
1819 
1820 	return 0;
1821 }
1822 
1823 /*
1824  * print_perf_limit()
1825  */
1826 int print_perf_limit(struct thread_data *t, struct core_data *c, struct pkg_data *p)
1827 {
1828 	unsigned long long msr;
1829 	int cpu;
1830 
1831 	cpu = t->cpu_id;
1832 
1833 	/* per-package */
1834 	if (!(t->flags & CPU_IS_FIRST_THREAD_IN_CORE) || !(t->flags & CPU_IS_FIRST_CORE_IN_PACKAGE))
1835 		return 0;
1836 
1837 	if (cpu_migrate(cpu)) {
1838 		fprintf(stderr, "Could not migrate to CPU %d\n", cpu);
1839 		return -1;
1840 	}
1841 
1842 	if (do_core_perf_limit_reasons) {
1843 		get_msr(cpu, MSR_CORE_PERF_LIMIT_REASONS, &msr);
1844 		fprintf(stderr, "cpu%d: MSR_CORE_PERF_LIMIT_REASONS, 0x%08llx", cpu, msr);
1845 		fprintf(stderr, " (Active: %s%s%s%s%s%s%s%s%s%s%s%s%s%s)",
1846 			(msr & 1 << 15) ? "bit15, " : "",
1847 			(msr & 1 << 14) ? "bit14, " : "",
1848 			(msr & 1 << 13) ? "Transitions, " : "",
1849 			(msr & 1 << 12) ? "MultiCoreTurbo, " : "",
1850 			(msr & 1 << 11) ? "PkgPwrL2, " : "",
1851 			(msr & 1 << 10) ? "PkgPwrL1, " : "",
1852 			(msr & 1 << 9) ? "CorePwr, " : "",
1853 			(msr & 1 << 8) ? "Amps, " : "",
1854 			(msr & 1 << 6) ? "VR-Therm, " : "",
1855 			(msr & 1 << 5) ? "Auto-HWP, " : "",
1856 			(msr & 1 << 4) ? "Graphics, " : "",
1857 			(msr & 1 << 2) ? "bit2, " : "",
1858 			(msr & 1 << 1) ? "ThermStatus, " : "",
1859 			(msr & 1 << 0) ? "PROCHOT, " : "");
1860 		fprintf(stderr, " (Logged: %s%s%s%s%s%s%s%s%s%s%s%s%s%s)\n",
1861 			(msr & 1 << 31) ? "bit31, " : "",
1862 			(msr & 1 << 30) ? "bit30, " : "",
1863 			(msr & 1 << 29) ? "Transitions, " : "",
1864 			(msr & 1 << 28) ? "MultiCoreTurbo, " : "",
1865 			(msr & 1 << 27) ? "PkgPwrL2, " : "",
1866 			(msr & 1 << 26) ? "PkgPwrL1, " : "",
1867 			(msr & 1 << 25) ? "CorePwr, " : "",
1868 			(msr & 1 << 24) ? "Amps, " : "",
1869 			(msr & 1 << 22) ? "VR-Therm, " : "",
1870 			(msr & 1 << 21) ? "Auto-HWP, " : "",
1871 			(msr & 1 << 20) ? "Graphics, " : "",
1872 			(msr & 1 << 18) ? "bit18, " : "",
1873 			(msr & 1 << 17) ? "ThermStatus, " : "",
1874 			(msr & 1 << 16) ? "PROCHOT, " : "");
1875 
1876 	}
1877 	if (do_gfx_perf_limit_reasons) {
1878 		get_msr(cpu, MSR_GFX_PERF_LIMIT_REASONS, &msr);
1879 		fprintf(stderr, "cpu%d: MSR_GFX_PERF_LIMIT_REASONS, 0x%08llx", cpu, msr);
1880 		fprintf(stderr, " (Active: %s%s%s%s%s%s%s%s)",
1881 			(msr & 1 << 0) ? "PROCHOT, " : "",
1882 			(msr & 1 << 1) ? "ThermStatus, " : "",
1883 			(msr & 1 << 4) ? "Graphics, " : "",
1884 			(msr & 1 << 6) ? "VR-Therm, " : "",
1885 			(msr & 1 << 8) ? "Amps, " : "",
1886 			(msr & 1 << 9) ? "GFXPwr, " : "",
1887 			(msr & 1 << 10) ? "PkgPwrL1, " : "",
1888 			(msr & 1 << 11) ? "PkgPwrL2, " : "");
1889 		fprintf(stderr, " (Logged: %s%s%s%s%s%s%s%s)\n",
1890 			(msr & 1 << 16) ? "PROCHOT, " : "",
1891 			(msr & 1 << 17) ? "ThermStatus, " : "",
1892 			(msr & 1 << 20) ? "Graphics, " : "",
1893 			(msr & 1 << 22) ? "VR-Therm, " : "",
1894 			(msr & 1 << 24) ? "Amps, " : "",
1895 			(msr & 1 << 25) ? "GFXPwr, " : "",
1896 			(msr & 1 << 26) ? "PkgPwrL1, " : "",
1897 			(msr & 1 << 27) ? "PkgPwrL2, " : "");
1898 	}
1899 	if (do_ring_perf_limit_reasons) {
1900 		get_msr(cpu, MSR_RING_PERF_LIMIT_REASONS, &msr);
1901 		fprintf(stderr, "cpu%d: MSR_RING_PERF_LIMIT_REASONS, 0x%08llx", cpu, msr);
1902 		fprintf(stderr, " (Active: %s%s%s%s%s%s)",
1903 			(msr & 1 << 0) ? "PROCHOT, " : "",
1904 			(msr & 1 << 1) ? "ThermStatus, " : "",
1905 			(msr & 1 << 6) ? "VR-Therm, " : "",
1906 			(msr & 1 << 8) ? "Amps, " : "",
1907 			(msr & 1 << 10) ? "PkgPwrL1, " : "",
1908 			(msr & 1 << 11) ? "PkgPwrL2, " : "");
1909 		fprintf(stderr, " (Logged: %s%s%s%s%s%s)\n",
1910 			(msr & 1 << 16) ? "PROCHOT, " : "",
1911 			(msr & 1 << 17) ? "ThermStatus, " : "",
1912 			(msr & 1 << 22) ? "VR-Therm, " : "",
1913 			(msr & 1 << 24) ? "Amps, " : "",
1914 			(msr & 1 << 26) ? "PkgPwrL1, " : "",
1915 			(msr & 1 << 27) ? "PkgPwrL2, " : "");
1916 	}
1917 	return 0;
1918 }
1919 
1920 #define	RAPL_POWER_GRANULARITY	0x7FFF	/* 15 bit power granularity */
1921 #define	RAPL_TIME_GRANULARITY	0x3F /* 6 bit time granularity */
1922 
1923 double get_tdp(model)
1924 {
1925 	unsigned long long msr;
1926 
1927 	if (do_rapl & RAPL_PKG_POWER_INFO)
1928 		if (!get_msr(0, MSR_PKG_POWER_INFO, &msr))
1929 			return ((msr >> 0) & RAPL_POWER_GRANULARITY) * rapl_power_units;
1930 
1931 	switch (model) {
1932 	case 0x37:
1933 	case 0x4D:
1934 		return 30.0;
1935 	default:
1936 		return 135.0;
1937 	}
1938 }
1939 
1940 /*
1941  * rapl_dram_energy_units_probe()
1942  * Energy units are either hard-coded, or come from RAPL Energy Unit MSR.
1943  */
1944 static double
1945 rapl_dram_energy_units_probe(int  model, double rapl_energy_units)
1946 {
1947 	/* only called for genuine_intel, family 6 */
1948 
1949 	switch (model) {
1950 	case 0x3F:	/* HSX */
1951 	case 0x4F:	/* BDX */
1952 	case 0x56:	/* BDX-DE */
1953 		return (rapl_dram_energy_units = 15.3 / 1000000);
1954 	default:
1955 		return (rapl_energy_units);
1956 	}
1957 }
1958 
1959 
1960 /*
1961  * rapl_probe()
1962  *
1963  * sets do_rapl, rapl_power_units, rapl_energy_units, rapl_time_units
1964  */
1965 void rapl_probe(unsigned int family, unsigned int model)
1966 {
1967 	unsigned long long msr;
1968 	unsigned int time_unit;
1969 	double tdp;
1970 
1971 	if (!genuine_intel)
1972 		return;
1973 
1974 	if (family != 6)
1975 		return;
1976 
1977 	switch (model) {
1978 	case 0x2A:
1979 	case 0x3A:
1980 	case 0x3C:	/* HSW */
1981 	case 0x45:	/* HSW */
1982 	case 0x46:	/* HSW */
1983 	case 0x3D:	/* BDW */
1984 	case 0x47:	/* BDW */
1985 		do_rapl = RAPL_PKG | RAPL_CORES | RAPL_CORE_POLICY | RAPL_GFX | RAPL_PKG_POWER_INFO;
1986 		break;
1987 	case 0x4E:	/* SKL */
1988 	case 0x5E:	/* SKL */
1989 		do_rapl = RAPL_PKG | RAPL_DRAM | RAPL_DRAM_PERF_STATUS | RAPL_PKG_PERF_STATUS | RAPL_PKG_POWER_INFO;
1990 		break;
1991 	case 0x3F:	/* HSX */
1992 	case 0x4F:	/* BDX */
1993 	case 0x56:	/* BDX-DE */
1994 		do_rapl = RAPL_PKG | RAPL_DRAM | RAPL_DRAM_POWER_INFO | RAPL_DRAM_PERF_STATUS | RAPL_PKG_PERF_STATUS | RAPL_PKG_POWER_INFO;
1995 		break;
1996 	case 0x2D:
1997 	case 0x3E:
1998 		do_rapl = RAPL_PKG | RAPL_CORES | RAPL_CORE_POLICY | RAPL_DRAM | RAPL_DRAM_POWER_INFO | RAPL_PKG_PERF_STATUS | RAPL_DRAM_PERF_STATUS | RAPL_PKG_POWER_INFO;
1999 		break;
2000 	case 0x37:	/* BYT */
2001 	case 0x4D:	/* AVN */
2002 		do_rapl = RAPL_PKG | RAPL_CORES ;
2003 		break;
2004 	default:
2005 		return;
2006 	}
2007 
2008 	/* units on package 0, verify later other packages match */
2009 	if (get_msr(0, MSR_RAPL_POWER_UNIT, &msr))
2010 		return;
2011 
2012 	rapl_power_units = 1.0 / (1 << (msr & 0xF));
2013 	if (model == 0x37)
2014 		rapl_energy_units = 1.0 * (1 << (msr >> 8 & 0x1F)) / 1000000;
2015 	else
2016 		rapl_energy_units = 1.0 / (1 << (msr >> 8 & 0x1F));
2017 
2018 	rapl_dram_energy_units = rapl_dram_energy_units_probe(model, rapl_energy_units);
2019 
2020 	time_unit = msr >> 16 & 0xF;
2021 	if (time_unit == 0)
2022 		time_unit = 0xA;
2023 
2024 	rapl_time_units = 1.0 / (1 << (time_unit));
2025 
2026 	tdp = get_tdp(model);
2027 
2028 	rapl_joule_counter_range = 0xFFFFFFFF * rapl_energy_units / tdp;
2029 	if (debug)
2030 		fprintf(stderr, "RAPL: %.0f sec. Joule Counter Range, at %.0f Watts\n", rapl_joule_counter_range, tdp);
2031 
2032 	return;
2033 }
2034 
2035 void perf_limit_reasons_probe(family, model)
2036 {
2037 	if (!genuine_intel)
2038 		return;
2039 
2040 	if (family != 6)
2041 		return;
2042 
2043 	switch (model) {
2044 	case 0x3C:	/* HSW */
2045 	case 0x45:	/* HSW */
2046 	case 0x46:	/* HSW */
2047 		do_gfx_perf_limit_reasons = 1;
2048 	case 0x3F:	/* HSX */
2049 		do_core_perf_limit_reasons = 1;
2050 		do_ring_perf_limit_reasons = 1;
2051 	default:
2052 		return;
2053 	}
2054 }
2055 
2056 int print_thermal(struct thread_data *t, struct core_data *c, struct pkg_data *p)
2057 {
2058 	unsigned long long msr;
2059 	unsigned int dts;
2060 	int cpu;
2061 
2062 	if (!(do_dts || do_ptm))
2063 		return 0;
2064 
2065 	cpu = t->cpu_id;
2066 
2067 	/* DTS is per-core, no need to print for each thread */
2068 	if (!(t->flags & CPU_IS_FIRST_THREAD_IN_CORE))
2069 		return 0;
2070 
2071 	if (cpu_migrate(cpu)) {
2072 		fprintf(stderr, "Could not migrate to CPU %d\n", cpu);
2073 		return -1;
2074 	}
2075 
2076 	if (do_ptm && (t->flags & CPU_IS_FIRST_CORE_IN_PACKAGE)) {
2077 		if (get_msr(cpu, MSR_IA32_PACKAGE_THERM_STATUS, &msr))
2078 			return 0;
2079 
2080 		dts = (msr >> 16) & 0x7F;
2081 		fprintf(stderr, "cpu%d: MSR_IA32_PACKAGE_THERM_STATUS: 0x%08llx (%d C)\n",
2082 			cpu, msr, tcc_activation_temp - dts);
2083 
2084 #ifdef	THERM_DEBUG
2085 		if (get_msr(cpu, MSR_IA32_PACKAGE_THERM_INTERRUPT, &msr))
2086 			return 0;
2087 
2088 		dts = (msr >> 16) & 0x7F;
2089 		dts2 = (msr >> 8) & 0x7F;
2090 		fprintf(stderr, "cpu%d: MSR_IA32_PACKAGE_THERM_INTERRUPT: 0x%08llx (%d C, %d C)\n",
2091 			cpu, msr, tcc_activation_temp - dts, tcc_activation_temp - dts2);
2092 #endif
2093 	}
2094 
2095 
2096 	if (do_dts) {
2097 		unsigned int resolution;
2098 
2099 		if (get_msr(cpu, MSR_IA32_THERM_STATUS, &msr))
2100 			return 0;
2101 
2102 		dts = (msr >> 16) & 0x7F;
2103 		resolution = (msr >> 27) & 0xF;
2104 		fprintf(stderr, "cpu%d: MSR_IA32_THERM_STATUS: 0x%08llx (%d C +/- %d)\n",
2105 			cpu, msr, tcc_activation_temp - dts, resolution);
2106 
2107 #ifdef THERM_DEBUG
2108 		if (get_msr(cpu, MSR_IA32_THERM_INTERRUPT, &msr))
2109 			return 0;
2110 
2111 		dts = (msr >> 16) & 0x7F;
2112 		dts2 = (msr >> 8) & 0x7F;
2113 		fprintf(stderr, "cpu%d: MSR_IA32_THERM_INTERRUPT: 0x%08llx (%d C, %d C)\n",
2114 			cpu, msr, tcc_activation_temp - dts, tcc_activation_temp - dts2);
2115 #endif
2116 	}
2117 
2118 	return 0;
2119 }
2120 
2121 void print_power_limit_msr(int cpu, unsigned long long msr, char *label)
2122 {
2123 	fprintf(stderr, "cpu%d: %s: %sabled (%f Watts, %f sec, clamp %sabled)\n",
2124 		cpu, label,
2125 		((msr >> 15) & 1) ? "EN" : "DIS",
2126 		((msr >> 0) & 0x7FFF) * rapl_power_units,
2127 		(1.0 + (((msr >> 22) & 0x3)/4.0)) * (1 << ((msr >> 17) & 0x1F)) * rapl_time_units,
2128 		(((msr >> 16) & 1) ? "EN" : "DIS"));
2129 
2130 	return;
2131 }
2132 
2133 int print_rapl(struct thread_data *t, struct core_data *c, struct pkg_data *p)
2134 {
2135 	unsigned long long msr;
2136 	int cpu;
2137 
2138 	if (!do_rapl)
2139 		return 0;
2140 
2141 	/* RAPL counters are per package, so print only for 1st thread/package */
2142 	if (!(t->flags & CPU_IS_FIRST_THREAD_IN_CORE) || !(t->flags & CPU_IS_FIRST_CORE_IN_PACKAGE))
2143 		return 0;
2144 
2145 	cpu = t->cpu_id;
2146 	if (cpu_migrate(cpu)) {
2147 		fprintf(stderr, "Could not migrate to CPU %d\n", cpu);
2148 		return -1;
2149 	}
2150 
2151 	if (get_msr(cpu, MSR_RAPL_POWER_UNIT, &msr))
2152 		return -1;
2153 
2154 	if (debug) {
2155 		fprintf(stderr, "cpu%d: MSR_RAPL_POWER_UNIT: 0x%08llx "
2156 			"(%f Watts, %f Joules, %f sec.)\n", cpu, msr,
2157 			rapl_power_units, rapl_energy_units, rapl_time_units);
2158 	}
2159 	if (do_rapl & RAPL_PKG_POWER_INFO) {
2160 
2161 		if (get_msr(cpu, MSR_PKG_POWER_INFO, &msr))
2162                 	return -5;
2163 
2164 
2165 		fprintf(stderr, "cpu%d: MSR_PKG_POWER_INFO: 0x%08llx (%.0f W TDP, RAPL %.0f - %.0f W, %f sec.)\n",
2166 			cpu, msr,
2167 			((msr >>  0) & RAPL_POWER_GRANULARITY) * rapl_power_units,
2168 			((msr >> 16) & RAPL_POWER_GRANULARITY) * rapl_power_units,
2169 			((msr >> 32) & RAPL_POWER_GRANULARITY) * rapl_power_units,
2170 			((msr >> 48) & RAPL_TIME_GRANULARITY) * rapl_time_units);
2171 
2172 	}
2173 	if (do_rapl & RAPL_PKG) {
2174 
2175 		if (get_msr(cpu, MSR_PKG_POWER_LIMIT, &msr))
2176 			return -9;
2177 
2178 		fprintf(stderr, "cpu%d: MSR_PKG_POWER_LIMIT: 0x%08llx (%slocked)\n",
2179 			cpu, msr, (msr >> 63) & 1 ? "": "UN");
2180 
2181 		print_power_limit_msr(cpu, msr, "PKG Limit #1");
2182 		fprintf(stderr, "cpu%d: PKG Limit #2: %sabled (%f Watts, %f* sec, clamp %sabled)\n",
2183 			cpu,
2184 			((msr >> 47) & 1) ? "EN" : "DIS",
2185 			((msr >> 32) & 0x7FFF) * rapl_power_units,
2186 			(1.0 + (((msr >> 54) & 0x3)/4.0)) * (1 << ((msr >> 49) & 0x1F)) * rapl_time_units,
2187 			((msr >> 48) & 1) ? "EN" : "DIS");
2188 	}
2189 
2190 	if (do_rapl & RAPL_DRAM_POWER_INFO) {
2191 		if (get_msr(cpu, MSR_DRAM_POWER_INFO, &msr))
2192                 	return -6;
2193 
2194 		fprintf(stderr, "cpu%d: MSR_DRAM_POWER_INFO,: 0x%08llx (%.0f W TDP, RAPL %.0f - %.0f W, %f sec.)\n",
2195 			cpu, msr,
2196 			((msr >>  0) & RAPL_POWER_GRANULARITY) * rapl_power_units,
2197 			((msr >> 16) & RAPL_POWER_GRANULARITY) * rapl_power_units,
2198 			((msr >> 32) & RAPL_POWER_GRANULARITY) * rapl_power_units,
2199 			((msr >> 48) & RAPL_TIME_GRANULARITY) * rapl_time_units);
2200 	}
2201 	if (do_rapl & RAPL_DRAM) {
2202 		if (get_msr(cpu, MSR_DRAM_POWER_LIMIT, &msr))
2203 			return -9;
2204 		fprintf(stderr, "cpu%d: MSR_DRAM_POWER_LIMIT: 0x%08llx (%slocked)\n",
2205 				cpu, msr, (msr >> 31) & 1 ? "": "UN");
2206 
2207 		print_power_limit_msr(cpu, msr, "DRAM Limit");
2208 	}
2209 	if (do_rapl & RAPL_CORE_POLICY) {
2210 		if (debug) {
2211 			if (get_msr(cpu, MSR_PP0_POLICY, &msr))
2212 				return -7;
2213 
2214 			fprintf(stderr, "cpu%d: MSR_PP0_POLICY: %lld\n", cpu, msr & 0xF);
2215 		}
2216 	}
2217 	if (do_rapl & RAPL_CORES) {
2218 		if (debug) {
2219 
2220 			if (get_msr(cpu, MSR_PP0_POWER_LIMIT, &msr))
2221 				return -9;
2222 			fprintf(stderr, "cpu%d: MSR_PP0_POWER_LIMIT: 0x%08llx (%slocked)\n",
2223 					cpu, msr, (msr >> 31) & 1 ? "": "UN");
2224 			print_power_limit_msr(cpu, msr, "Cores Limit");
2225 		}
2226 	}
2227 	if (do_rapl & RAPL_GFX) {
2228 		if (debug) {
2229 			if (get_msr(cpu, MSR_PP1_POLICY, &msr))
2230 				return -8;
2231 
2232 			fprintf(stderr, "cpu%d: MSR_PP1_POLICY: %lld\n", cpu, msr & 0xF);
2233 
2234 			if (get_msr(cpu, MSR_PP1_POWER_LIMIT, &msr))
2235 				return -9;
2236 			fprintf(stderr, "cpu%d: MSR_PP1_POWER_LIMIT: 0x%08llx (%slocked)\n",
2237 					cpu, msr, (msr >> 31) & 1 ? "": "UN");
2238 			print_power_limit_msr(cpu, msr, "GFX Limit");
2239 		}
2240 	}
2241 	return 0;
2242 }
2243 
2244 /*
2245  * SNB adds support for additional MSRs:
2246  *
2247  * MSR_PKG_C7_RESIDENCY            0x000003fa
2248  * MSR_CORE_C7_RESIDENCY           0x000003fe
2249  * MSR_PKG_C2_RESIDENCY            0x0000060d
2250  */
2251 
2252 int has_snb_msrs(unsigned int family, unsigned int model)
2253 {
2254 	if (!genuine_intel)
2255 		return 0;
2256 
2257 	switch (model) {
2258 	case 0x2A:
2259 	case 0x2D:
2260 	case 0x3A:	/* IVB */
2261 	case 0x3E:	/* IVB Xeon */
2262 	case 0x3C:	/* HSW */
2263 	case 0x3F:	/* HSW */
2264 	case 0x45:	/* HSW */
2265 	case 0x46:	/* HSW */
2266 	case 0x3D:	/* BDW */
2267 	case 0x47:	/* BDW */
2268 	case 0x4F:	/* BDX */
2269 	case 0x56:	/* BDX-DE */
2270 	case 0x4E:	/* SKL */
2271 	case 0x5E:	/* SKL */
2272 		return 1;
2273 	}
2274 	return 0;
2275 }
2276 
2277 /*
2278  * HSW adds support for additional MSRs:
2279  *
2280  * MSR_PKG_C8_RESIDENCY            0x00000630
2281  * MSR_PKG_C9_RESIDENCY            0x00000631
2282  * MSR_PKG_C10_RESIDENCY           0x00000632
2283  */
2284 int has_hsw_msrs(unsigned int family, unsigned int model)
2285 {
2286 	if (!genuine_intel)
2287 		return 0;
2288 
2289 	switch (model) {
2290 	case 0x45:	/* HSW */
2291 	case 0x3D:	/* BDW */
2292 	case 0x4E:	/* SKL */
2293 	case 0x5E:	/* SKL */
2294 		return 1;
2295 	}
2296 	return 0;
2297 }
2298 
2299 /*
2300  * SKL adds support for additional MSRS:
2301  *
2302  * MSR_PKG_WEIGHTED_CORE_C0_RES    0x00000658
2303  * MSR_PKG_ANY_CORE_C0_RES         0x00000659
2304  * MSR_PKG_ANY_GFXE_C0_RES         0x0000065A
2305  * MSR_PKG_BOTH_CORE_GFXE_C0_RES   0x0000065B
2306  */
2307 int has_skl_msrs(unsigned int family, unsigned int model)
2308 {
2309 	if (!genuine_intel)
2310 		return 0;
2311 
2312 	switch (model) {
2313 	case 0x4E:	/* SKL */
2314 	case 0x5E:	/* SKL */
2315 		return 1;
2316 	}
2317 	return 0;
2318 }
2319 
2320 
2321 
2322 int is_slm(unsigned int family, unsigned int model)
2323 {
2324 	if (!genuine_intel)
2325 		return 0;
2326 	switch (model) {
2327 	case 0x37:	/* BYT */
2328 	case 0x4D:	/* AVN */
2329 		return 1;
2330 	}
2331 	return 0;
2332 }
2333 
2334 #define SLM_BCLK_FREQS 5
2335 double slm_freq_table[SLM_BCLK_FREQS] = { 83.3, 100.0, 133.3, 116.7, 80.0};
2336 
2337 double slm_bclk(void)
2338 {
2339 	unsigned long long msr = 3;
2340 	unsigned int i;
2341 	double freq;
2342 
2343 	if (get_msr(0, MSR_FSB_FREQ, &msr))
2344 		fprintf(stderr, "SLM BCLK: unknown\n");
2345 
2346 	i = msr & 0xf;
2347 	if (i >= SLM_BCLK_FREQS) {
2348 		fprintf(stderr, "SLM BCLK[%d] invalid\n", i);
2349 		msr = 3;
2350 	}
2351 	freq = slm_freq_table[i];
2352 
2353 	fprintf(stderr, "SLM BCLK: %.1f Mhz\n", freq);
2354 
2355 	return freq;
2356 }
2357 
2358 double discover_bclk(unsigned int family, unsigned int model)
2359 {
2360 	if (has_snb_msrs(family, model))
2361 		return 100.00;
2362 	else if (is_slm(family, model))
2363 		return slm_bclk();
2364 	else
2365 		return 133.33;
2366 }
2367 
2368 /*
2369  * MSR_IA32_TEMPERATURE_TARGET indicates the temperature where
2370  * the Thermal Control Circuit (TCC) activates.
2371  * This is usually equal to tjMax.
2372  *
2373  * Older processors do not have this MSR, so there we guess,
2374  * but also allow cmdline over-ride with -T.
2375  *
2376  * Several MSR temperature values are in units of degrees-C
2377  * below this value, including the Digital Thermal Sensor (DTS),
2378  * Package Thermal Management Sensor (PTM), and thermal event thresholds.
2379  */
2380 int set_temperature_target(struct thread_data *t, struct core_data *c, struct pkg_data *p)
2381 {
2382 	unsigned long long msr;
2383 	unsigned int target_c_local;
2384 	int cpu;
2385 
2386 	/* tcc_activation_temp is used only for dts or ptm */
2387 	if (!(do_dts || do_ptm))
2388 		return 0;
2389 
2390 	/* this is a per-package concept */
2391 	if (!(t->flags & CPU_IS_FIRST_THREAD_IN_CORE) || !(t->flags & CPU_IS_FIRST_CORE_IN_PACKAGE))
2392 		return 0;
2393 
2394 	cpu = t->cpu_id;
2395 	if (cpu_migrate(cpu)) {
2396 		fprintf(stderr, "Could not migrate to CPU %d\n", cpu);
2397 		return -1;
2398 	}
2399 
2400 	if (tcc_activation_temp_override != 0) {
2401 		tcc_activation_temp = tcc_activation_temp_override;
2402 		fprintf(stderr, "cpu%d: Using cmdline TCC Target (%d C)\n",
2403 			cpu, tcc_activation_temp);
2404 		return 0;
2405 	}
2406 
2407 	/* Temperature Target MSR is Nehalem and newer only */
2408 	if (!do_nhm_platform_info)
2409 		goto guess;
2410 
2411 	if (get_msr(0, MSR_IA32_TEMPERATURE_TARGET, &msr))
2412 		goto guess;
2413 
2414 	target_c_local = (msr >> 16) & 0xFF;
2415 
2416 	if (debug)
2417 		fprintf(stderr, "cpu%d: MSR_IA32_TEMPERATURE_TARGET: 0x%08llx (%d C)\n",
2418 			cpu, msr, target_c_local);
2419 
2420 	if (!target_c_local)
2421 		goto guess;
2422 
2423 	tcc_activation_temp = target_c_local;
2424 
2425 	return 0;
2426 
2427 guess:
2428 	tcc_activation_temp = TJMAX_DEFAULT;
2429 	fprintf(stderr, "cpu%d: Guessing tjMax %d C, Please use -T to specify\n",
2430 		cpu, tcc_activation_temp);
2431 
2432 	return 0;
2433 }
2434 void process_cpuid()
2435 {
2436 	unsigned int eax, ebx, ecx, edx, max_level;
2437 	unsigned int fms, family, model, stepping;
2438 
2439 	eax = ebx = ecx = edx = 0;
2440 
2441 	__get_cpuid(0, &max_level, &ebx, &ecx, &edx);
2442 
2443 	if (ebx == 0x756e6547 && edx == 0x49656e69 && ecx == 0x6c65746e)
2444 		genuine_intel = 1;
2445 
2446 	if (debug)
2447 		fprintf(stderr, "CPUID(0): %.4s%.4s%.4s ",
2448 			(char *)&ebx, (char *)&edx, (char *)&ecx);
2449 
2450 	__get_cpuid(1, &fms, &ebx, &ecx, &edx);
2451 	family = (fms >> 8) & 0xf;
2452 	model = (fms >> 4) & 0xf;
2453 	stepping = fms & 0xf;
2454 	if (family == 6 || family == 0xf)
2455 		model += ((fms >> 16) & 0xf) << 4;
2456 
2457 	if (debug)
2458 		fprintf(stderr, "%d CPUID levels; family:model:stepping 0x%x:%x:%x (%d:%d:%d)\n",
2459 			max_level, family, model, stepping, family, model, stepping);
2460 
2461 	if (!(edx & (1 << 5)))
2462 		errx(1, "CPUID: no MSR");
2463 
2464 	/*
2465 	 * check max extended function levels of CPUID.
2466 	 * This is needed to check for invariant TSC.
2467 	 * This check is valid for both Intel and AMD.
2468 	 */
2469 	ebx = ecx = edx = 0;
2470 	__get_cpuid(0x80000000, &max_level, &ebx, &ecx, &edx);
2471 
2472 	if (max_level >= 0x80000007) {
2473 
2474 		/*
2475 		 * Non-Stop TSC is advertised by CPUID.EAX=0x80000007: EDX.bit8
2476 		 * this check is valid for both Intel and AMD
2477 		 */
2478 		__get_cpuid(0x80000007, &eax, &ebx, &ecx, &edx);
2479 		has_invariant_tsc = edx & (1 << 8);
2480 	}
2481 
2482 	/*
2483 	 * APERF/MPERF is advertised by CPUID.EAX=0x6: ECX.bit0
2484 	 * this check is valid for both Intel and AMD
2485 	 */
2486 
2487 	__get_cpuid(0x6, &eax, &ebx, &ecx, &edx);
2488 	has_aperf = ecx & (1 << 0);
2489 	do_dts = eax & (1 << 0);
2490 	do_ptm = eax & (1 << 6);
2491 	has_epb = ecx & (1 << 3);
2492 
2493 	if (debug)
2494 		fprintf(stderr, "CPUID(6): %sAPERF, %sDTS, %sPTM, %sEPB\n",
2495 			has_aperf ? "" : "No ",
2496 			do_dts ? "" : "No ",
2497 			do_ptm ? "" : "No ",
2498 			has_epb ? "" : "No ");
2499 
2500 	if (max_level > 0x15) {
2501 		unsigned int eax_crystal;
2502 		unsigned int ebx_tsc;
2503 
2504 		/*
2505 		 * CPUID 15H TSC/Crystal ratio, possibly Crystal Hz
2506 		 */
2507 		eax_crystal = ebx_tsc = crystal_hz = edx = 0;
2508 		__get_cpuid(0x15, &eax_crystal, &ebx_tsc, &crystal_hz, &edx);
2509 
2510 		if (ebx_tsc != 0) {
2511 
2512 			if (debug && (ebx != 0))
2513 				fprintf(stderr, "CPUID(0x15): eax_crystal: %d ebx_tsc: %d ecx_crystal_hz: %d\n",
2514 					eax_crystal, ebx_tsc, crystal_hz);
2515 
2516 			if (crystal_hz == 0)
2517 				switch(model) {
2518 				case 0x4E:	/* SKL */
2519 				case 0x5E:	/* SKL */
2520 					crystal_hz = 24000000;	/* 24 MHz */
2521 					break;
2522 				default:
2523 					crystal_hz = 0;
2524 			}
2525 
2526 			if (crystal_hz) {
2527 				tsc_hz =  (unsigned long long) crystal_hz * ebx_tsc / eax_crystal;
2528 				if (debug)
2529 					fprintf(stderr, "TSC: %lld MHz (%d Hz * %d / %d / 1000000)\n",
2530 						tsc_hz / 1000000, crystal_hz, ebx_tsc,  eax_crystal);
2531 			}
2532 		}
2533 	}
2534 
2535 	do_nhm_platform_info = do_nhm_cstates = do_smi = probe_nhm_msrs(family, model);
2536 	do_snb_cstates = has_snb_msrs(family, model);
2537 	do_pc2 = do_snb_cstates && (pkg_cstate_limit >= PCL__2);
2538 	do_pc3 = (pkg_cstate_limit >= PCL__3);
2539 	do_pc6 = (pkg_cstate_limit >= PCL__6);
2540 	do_pc7 = do_snb_cstates && (pkg_cstate_limit >= PCL__7);
2541 	do_c8_c9_c10 = has_hsw_msrs(family, model);
2542 	do_skl_residency = has_skl_msrs(family, model);
2543 	do_slm_cstates = is_slm(family, model);
2544 	bclk = discover_bclk(family, model);
2545 
2546 	rapl_probe(family, model);
2547 	perf_limit_reasons_probe(family, model);
2548 
2549 	if (debug)
2550 		dump_cstate_pstate_config_info();
2551 
2552 	return;
2553 }
2554 
2555 void help()
2556 {
2557 	fprintf(stderr,
2558 	"Usage: turbostat [OPTIONS][(--interval seconds) | COMMAND ...]\n"
2559 	"\n"
2560 	"Turbostat forks the specified COMMAND and prints statistics\n"
2561 	"when COMMAND completes.\n"
2562 	"If no COMMAND is specified, turbostat wakes every 5-seconds\n"
2563 	"to print statistics, until interrupted.\n"
2564 	"--debug	run in \"debug\" mode\n"
2565 	"--interval sec	Override default 5-second measurement interval\n"
2566 	"--help		print this help message\n"
2567 	"--counter msr	print 32-bit counter at address \"msr\"\n"
2568 	"--Counter msr	print 64-bit Counter at address \"msr\"\n"
2569 	"--msr msr	print 32-bit value at address \"msr\"\n"
2570 	"--MSR msr	print 64-bit Value at address \"msr\"\n"
2571 	"--version	print version information\n"
2572 	"\n"
2573 	"For more help, run \"man turbostat\"\n");
2574 }
2575 
2576 
2577 /*
2578  * in /dev/cpu/ return success for names that are numbers
2579  * ie. filter out ".", "..", "microcode".
2580  */
2581 int dir_filter(const struct dirent *dirp)
2582 {
2583 	if (isdigit(dirp->d_name[0]))
2584 		return 1;
2585 	else
2586 		return 0;
2587 }
2588 
2589 int open_dev_cpu_msr(int dummy1)
2590 {
2591 	return 0;
2592 }
2593 
2594 void topology_probe()
2595 {
2596 	int i;
2597 	int max_core_id = 0;
2598 	int max_package_id = 0;
2599 	int max_siblings = 0;
2600 	struct cpu_topology {
2601 		int core_id;
2602 		int physical_package_id;
2603 	} *cpus;
2604 
2605 	/* Initialize num_cpus, max_cpu_num */
2606 	topo.num_cpus = 0;
2607 	topo.max_cpu_num = 0;
2608 	for_all_proc_cpus(count_cpus);
2609 	if (!summary_only && topo.num_cpus > 1)
2610 		show_cpu = 1;
2611 
2612 	if (debug > 1)
2613 		fprintf(stderr, "num_cpus %d max_cpu_num %d\n", topo.num_cpus, topo.max_cpu_num);
2614 
2615 	cpus = calloc(1, (topo.max_cpu_num  + 1) * sizeof(struct cpu_topology));
2616 	if (cpus == NULL)
2617 		err(1, "calloc cpus");
2618 
2619 	/*
2620 	 * Allocate and initialize cpu_present_set
2621 	 */
2622 	cpu_present_set = CPU_ALLOC((topo.max_cpu_num + 1));
2623 	if (cpu_present_set == NULL)
2624 		err(3, "CPU_ALLOC");
2625 	cpu_present_setsize = CPU_ALLOC_SIZE((topo.max_cpu_num + 1));
2626 	CPU_ZERO_S(cpu_present_setsize, cpu_present_set);
2627 	for_all_proc_cpus(mark_cpu_present);
2628 
2629 	/*
2630 	 * Allocate and initialize cpu_affinity_set
2631 	 */
2632 	cpu_affinity_set = CPU_ALLOC((topo.max_cpu_num + 1));
2633 	if (cpu_affinity_set == NULL)
2634 		err(3, "CPU_ALLOC");
2635 	cpu_affinity_setsize = CPU_ALLOC_SIZE((topo.max_cpu_num + 1));
2636 	CPU_ZERO_S(cpu_affinity_setsize, cpu_affinity_set);
2637 
2638 
2639 	/*
2640 	 * For online cpus
2641 	 * find max_core_id, max_package_id
2642 	 */
2643 	for (i = 0; i <= topo.max_cpu_num; ++i) {
2644 		int siblings;
2645 
2646 		if (cpu_is_not_present(i)) {
2647 			if (debug > 1)
2648 				fprintf(stderr, "cpu%d NOT PRESENT\n", i);
2649 			continue;
2650 		}
2651 		cpus[i].core_id = get_core_id(i);
2652 		if (cpus[i].core_id > max_core_id)
2653 			max_core_id = cpus[i].core_id;
2654 
2655 		cpus[i].physical_package_id = get_physical_package_id(i);
2656 		if (cpus[i].physical_package_id > max_package_id)
2657 			max_package_id = cpus[i].physical_package_id;
2658 
2659 		siblings = get_num_ht_siblings(i);
2660 		if (siblings > max_siblings)
2661 			max_siblings = siblings;
2662 		if (debug > 1)
2663 			fprintf(stderr, "cpu %d pkg %d core %d\n",
2664 				i, cpus[i].physical_package_id, cpus[i].core_id);
2665 	}
2666 	topo.num_cores_per_pkg = max_core_id + 1;
2667 	if (debug > 1)
2668 		fprintf(stderr, "max_core_id %d, sizing for %d cores per package\n",
2669 			max_core_id, topo.num_cores_per_pkg);
2670 	if (debug && !summary_only && topo.num_cores_per_pkg > 1)
2671 		show_core = 1;
2672 
2673 	topo.num_packages = max_package_id + 1;
2674 	if (debug > 1)
2675 		fprintf(stderr, "max_package_id %d, sizing for %d packages\n",
2676 			max_package_id, topo.num_packages);
2677 	if (debug && !summary_only && topo.num_packages > 1)
2678 		show_pkg = 1;
2679 
2680 	topo.num_threads_per_core = max_siblings;
2681 	if (debug > 1)
2682 		fprintf(stderr, "max_siblings %d\n", max_siblings);
2683 
2684 	free(cpus);
2685 }
2686 
2687 void
2688 allocate_counters(struct thread_data **t, struct core_data **c, struct pkg_data **p)
2689 {
2690 	int i;
2691 
2692 	*t = calloc(topo.num_threads_per_core * topo.num_cores_per_pkg *
2693 		topo.num_packages, sizeof(struct thread_data));
2694 	if (*t == NULL)
2695 		goto error;
2696 
2697 	for (i = 0; i < topo.num_threads_per_core *
2698 		topo.num_cores_per_pkg * topo.num_packages; i++)
2699 		(*t)[i].cpu_id = -1;
2700 
2701 	*c = calloc(topo.num_cores_per_pkg * topo.num_packages,
2702 		sizeof(struct core_data));
2703 	if (*c == NULL)
2704 		goto error;
2705 
2706 	for (i = 0; i < topo.num_cores_per_pkg * topo.num_packages; i++)
2707 		(*c)[i].core_id = -1;
2708 
2709 	*p = calloc(topo.num_packages, sizeof(struct pkg_data));
2710 	if (*p == NULL)
2711 		goto error;
2712 
2713 	for (i = 0; i < topo.num_packages; i++)
2714 		(*p)[i].package_id = i;
2715 
2716 	return;
2717 error:
2718 	err(1, "calloc counters");
2719 }
2720 /*
2721  * init_counter()
2722  *
2723  * set cpu_id, core_num, pkg_num
2724  * set FIRST_THREAD_IN_CORE and FIRST_CORE_IN_PACKAGE
2725  *
2726  * increment topo.num_cores when 1st core in pkg seen
2727  */
2728 void init_counter(struct thread_data *thread_base, struct core_data *core_base,
2729 	struct pkg_data *pkg_base, int thread_num, int core_num,
2730 	int pkg_num, int cpu_id)
2731 {
2732 	struct thread_data *t;
2733 	struct core_data *c;
2734 	struct pkg_data *p;
2735 
2736 	t = GET_THREAD(thread_base, thread_num, core_num, pkg_num);
2737 	c = GET_CORE(core_base, core_num, pkg_num);
2738 	p = GET_PKG(pkg_base, pkg_num);
2739 
2740 	t->cpu_id = cpu_id;
2741 	if (thread_num == 0) {
2742 		t->flags |= CPU_IS_FIRST_THREAD_IN_CORE;
2743 		if (cpu_is_first_core_in_package(cpu_id))
2744 			t->flags |= CPU_IS_FIRST_CORE_IN_PACKAGE;
2745 	}
2746 
2747 	c->core_id = core_num;
2748 	p->package_id = pkg_num;
2749 }
2750 
2751 
2752 int initialize_counters(int cpu_id)
2753 {
2754 	int my_thread_id, my_core_id, my_package_id;
2755 
2756 	my_package_id = get_physical_package_id(cpu_id);
2757 	my_core_id = get_core_id(cpu_id);
2758 
2759 	if (cpu_is_first_sibling_in_core(cpu_id)) {
2760 		my_thread_id = 0;
2761 		topo.num_cores++;
2762 	} else {
2763 		my_thread_id = 1;
2764 	}
2765 
2766 	init_counter(EVEN_COUNTERS, my_thread_id, my_core_id, my_package_id, cpu_id);
2767 	init_counter(ODD_COUNTERS, my_thread_id, my_core_id, my_package_id, cpu_id);
2768 	return 0;
2769 }
2770 
2771 void allocate_output_buffer()
2772 {
2773 	output_buffer = calloc(1, (1 + topo.num_cpus) * 1024);
2774 	outp = output_buffer;
2775 	if (outp == NULL)
2776 		err(-1, "calloc output buffer");
2777 }
2778 
2779 void setup_all_buffers(void)
2780 {
2781 	topology_probe();
2782 	allocate_counters(&thread_even, &core_even, &package_even);
2783 	allocate_counters(&thread_odd, &core_odd, &package_odd);
2784 	allocate_output_buffer();
2785 	for_all_proc_cpus(initialize_counters);
2786 }
2787 
2788 void turbostat_init()
2789 {
2790 	check_dev_msr();
2791 	check_permissions();
2792 	process_cpuid();
2793 
2794 	setup_all_buffers();
2795 
2796 	if (debug)
2797 		for_all_cpus(print_epb, ODD_COUNTERS);
2798 
2799 	if (debug)
2800 		for_all_cpus(print_perf_limit, ODD_COUNTERS);
2801 
2802 	if (debug)
2803 		for_all_cpus(print_rapl, ODD_COUNTERS);
2804 
2805 	for_all_cpus(set_temperature_target, ODD_COUNTERS);
2806 
2807 	if (debug)
2808 		for_all_cpus(print_thermal, ODD_COUNTERS);
2809 }
2810 
2811 int fork_it(char **argv)
2812 {
2813 	pid_t child_pid;
2814 	int status;
2815 
2816 	status = for_all_cpus(get_counters, EVEN_COUNTERS);
2817 	if (status)
2818 		exit(status);
2819 	/* clear affinity side-effect of get_counters() */
2820 	sched_setaffinity(0, cpu_present_setsize, cpu_present_set);
2821 	gettimeofday(&tv_even, (struct timezone *)NULL);
2822 
2823 	child_pid = fork();
2824 	if (!child_pid) {
2825 		/* child */
2826 		execvp(argv[0], argv);
2827 	} else {
2828 
2829 		/* parent */
2830 		if (child_pid == -1)
2831 			err(1, "fork");
2832 
2833 		signal(SIGINT, SIG_IGN);
2834 		signal(SIGQUIT, SIG_IGN);
2835 		if (waitpid(child_pid, &status, 0) == -1)
2836 			err(status, "waitpid");
2837 	}
2838 	/*
2839 	 * n.b. fork_it() does not check for errors from for_all_cpus()
2840 	 * because re-starting is problematic when forking
2841 	 */
2842 	for_all_cpus(get_counters, ODD_COUNTERS);
2843 	gettimeofday(&tv_odd, (struct timezone *)NULL);
2844 	timersub(&tv_odd, &tv_even, &tv_delta);
2845 	for_all_cpus_2(delta_cpu, ODD_COUNTERS, EVEN_COUNTERS);
2846 	compute_average(EVEN_COUNTERS);
2847 	format_all_counters(EVEN_COUNTERS);
2848 	flush_stderr();
2849 
2850 	fprintf(stderr, "%.6f sec\n", tv_delta.tv_sec + tv_delta.tv_usec/1000000.0);
2851 
2852 	return status;
2853 }
2854 
2855 int get_and_dump_counters(void)
2856 {
2857 	int status;
2858 
2859 	status = for_all_cpus(get_counters, ODD_COUNTERS);
2860 	if (status)
2861 		return status;
2862 
2863 	status = for_all_cpus(dump_counters, ODD_COUNTERS);
2864 	if (status)
2865 		return status;
2866 
2867 	flush_stdout();
2868 
2869 	return status;
2870 }
2871 
2872 void print_version() {
2873 	fprintf(stderr, "turbostat version 4.5 2 Apr, 2015"
2874 		" - Len Brown <lenb@kernel.org>\n");
2875 }
2876 
2877 void cmdline(int argc, char **argv)
2878 {
2879 	int opt;
2880 	int option_index = 0;
2881 	static struct option long_options[] = {
2882 		{"Counter",	required_argument,	0, 'C'},
2883 		{"counter",	required_argument,	0, 'c'},
2884 		{"Dump",	no_argument,		0, 'D'},
2885 		{"debug",	no_argument,		0, 'd'},
2886 		{"interval",	required_argument,	0, 'i'},
2887 		{"help",	no_argument,		0, 'h'},
2888 		{"Joules",	no_argument,		0, 'J'},
2889 		{"MSR",		required_argument,	0, 'M'},
2890 		{"msr",		required_argument,	0, 'm'},
2891 		{"Package",	no_argument,		0, 'p'},
2892 		{"processor",	no_argument,		0, 'p'},
2893 		{"Summary",	no_argument,		0, 'S'},
2894 		{"TCC",		required_argument,	0, 'T'},
2895 		{"version",	no_argument,		0, 'v' },
2896 		{0,		0,			0,  0 }
2897 	};
2898 
2899 	progname = argv[0];
2900 
2901 	while ((opt = getopt_long_only(argc, argv, "C:c:Ddhi:JM:m:PpST:v",
2902 				long_options, &option_index)) != -1) {
2903 		switch (opt) {
2904 		case 'C':
2905 			sscanf(optarg, "%x", &extra_delta_offset64);
2906 			break;
2907 		case 'c':
2908 			sscanf(optarg, "%x", &extra_delta_offset32);
2909 			break;
2910 		case 'D':
2911 			dump_only++;
2912 			break;
2913 		case 'd':
2914 			debug++;
2915 			break;
2916 		case 'h':
2917 		default:
2918 			help();
2919 			exit(1);
2920 		case 'i':
2921 			interval_sec = atoi(optarg);
2922 			break;
2923 		case 'J':
2924 			rapl_joules++;
2925 			break;
2926 		case 'M':
2927 			sscanf(optarg, "%x", &extra_msr_offset64);
2928 			break;
2929 		case 'm':
2930 			sscanf(optarg, "%x", &extra_msr_offset32);
2931 			break;
2932 		case 'P':
2933 			show_pkg_only++;
2934 			break;
2935 		case 'p':
2936 			show_core_only++;
2937 			break;
2938 		case 'S':
2939 			summary_only++;
2940 			break;
2941 		case 'T':
2942 			tcc_activation_temp_override = atoi(optarg);
2943 			break;
2944 		case 'v':
2945 			print_version();
2946 			exit(0);
2947 			break;
2948 		}
2949 	}
2950 }
2951 
2952 int main(int argc, char **argv)
2953 {
2954 	cmdline(argc, argv);
2955 
2956 	if (debug)
2957 		print_version();
2958 
2959 	turbostat_init();
2960 
2961 	/* dump counters and exit */
2962 	if (dump_only)
2963 		return get_and_dump_counters();
2964 
2965 	/*
2966 	 * if any params left, it must be a command to fork
2967 	 */
2968 	if (argc - optind)
2969 		return fork_it(argv + optind);
2970 	else
2971 		turbostat_loop();
2972 
2973 	return 0;
2974 }
2975