1 /*
2  * turbostat -- show CPU frequency and C-state residency
3  * on modern Intel turbo-capable processors.
4  *
5  * Copyright (c) 2013 Intel Corporation.
6  * Len Brown <len.brown@intel.com>
7  *
8  * This program is free software; you can redistribute it and/or modify it
9  * under the terms and conditions of the GNU General Public License,
10  * version 2, as published by the Free Software Foundation.
11  *
12  * This program is distributed in the hope it will be useful, but WITHOUT
13  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
14  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
15  * more details.
16  *
17  * You should have received a copy of the GNU General Public License along with
18  * this program; if not, write to the Free Software Foundation, Inc.,
19  * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
20  */
21 
22 #define _GNU_SOURCE
23 #include MSRHEADER
24 #include <stdarg.h>
25 #include <stdio.h>
26 #include <err.h>
27 #include <unistd.h>
28 #include <sys/types.h>
29 #include <sys/wait.h>
30 #include <sys/stat.h>
31 #include <sys/resource.h>
32 #include <fcntl.h>
33 #include <signal.h>
34 #include <sys/time.h>
35 #include <stdlib.h>
36 #include <getopt.h>
37 #include <dirent.h>
38 #include <string.h>
39 #include <ctype.h>
40 #include <sched.h>
41 #include <cpuid.h>
42 #include <linux/capability.h>
43 #include <errno.h>
44 
45 char *proc_stat = "/proc/stat";
46 unsigned int interval_sec = 5;
47 unsigned int debug;
48 unsigned int rapl_joules;
49 unsigned int summary_only;
50 unsigned int dump_only;
51 unsigned int skip_c0;
52 unsigned int skip_c1;
53 unsigned int do_nhm_cstates;
54 unsigned int do_snb_cstates;
55 unsigned int do_knl_cstates;
56 unsigned int do_pc2;
57 unsigned int do_pc3;
58 unsigned int do_pc6;
59 unsigned int do_pc7;
60 unsigned int do_c8_c9_c10;
61 unsigned int do_skl_residency;
62 unsigned int do_slm_cstates;
63 unsigned int use_c1_residency_msr;
64 unsigned int has_aperf;
65 unsigned int has_epb;
66 unsigned int units = 1000000;	/* MHz etc */
67 unsigned int genuine_intel;
68 unsigned int has_invariant_tsc;
69 unsigned int do_nhm_platform_info;
70 unsigned int extra_msr_offset32;
71 unsigned int extra_msr_offset64;
72 unsigned int extra_delta_offset32;
73 unsigned int extra_delta_offset64;
74 int do_smi;
75 double bclk;
76 unsigned int show_pkg;
77 unsigned int show_core;
78 unsigned int show_cpu;
79 unsigned int show_pkg_only;
80 unsigned int show_core_only;
81 char *output_buffer, *outp;
82 unsigned int do_rapl;
83 unsigned int do_dts;
84 unsigned int do_ptm;
85 unsigned int tcc_activation_temp;
86 unsigned int tcc_activation_temp_override;
87 double rapl_power_units, rapl_time_units;
88 double rapl_dram_energy_units, rapl_energy_units;
89 double rapl_joule_counter_range;
90 unsigned int do_core_perf_limit_reasons;
91 unsigned int do_gfx_perf_limit_reasons;
92 unsigned int do_ring_perf_limit_reasons;
93 unsigned int crystal_hz;
94 unsigned long long tsc_hz;
95 int base_cpu;
96 
97 #define RAPL_PKG		(1 << 0)
98 					/* 0x610 MSR_PKG_POWER_LIMIT */
99 					/* 0x611 MSR_PKG_ENERGY_STATUS */
100 #define RAPL_PKG_PERF_STATUS	(1 << 1)
101 					/* 0x613 MSR_PKG_PERF_STATUS */
102 #define RAPL_PKG_POWER_INFO	(1 << 2)
103 					/* 0x614 MSR_PKG_POWER_INFO */
104 
105 #define RAPL_DRAM		(1 << 3)
106 					/* 0x618 MSR_DRAM_POWER_LIMIT */
107 					/* 0x619 MSR_DRAM_ENERGY_STATUS */
108 #define RAPL_DRAM_PERF_STATUS	(1 << 4)
109 					/* 0x61b MSR_DRAM_PERF_STATUS */
110 #define RAPL_DRAM_POWER_INFO	(1 << 5)
111 					/* 0x61c MSR_DRAM_POWER_INFO */
112 
113 #define RAPL_CORES		(1 << 6)
114 					/* 0x638 MSR_PP0_POWER_LIMIT */
115 					/* 0x639 MSR_PP0_ENERGY_STATUS */
116 #define RAPL_CORE_POLICY	(1 << 7)
117 					/* 0x63a MSR_PP0_POLICY */
118 
119 #define RAPL_GFX		(1 << 8)
120 					/* 0x640 MSR_PP1_POWER_LIMIT */
121 					/* 0x641 MSR_PP1_ENERGY_STATUS */
122 					/* 0x642 MSR_PP1_POLICY */
123 #define	TJMAX_DEFAULT	100
124 
125 #define MAX(a, b) ((a) > (b) ? (a) : (b))
126 
127 int aperf_mperf_unstable;
128 int backwards_count;
129 char *progname;
130 
131 cpu_set_t *cpu_present_set, *cpu_affinity_set;
132 size_t cpu_present_setsize, cpu_affinity_setsize;
133 
134 struct thread_data {
135 	unsigned long long tsc;
136 	unsigned long long aperf;
137 	unsigned long long mperf;
138 	unsigned long long c1;
139 	unsigned long long extra_msr64;
140 	unsigned long long extra_delta64;
141 	unsigned long long extra_msr32;
142 	unsigned long long extra_delta32;
143 	unsigned int smi_count;
144 	unsigned int cpu_id;
145 	unsigned int flags;
146 #define CPU_IS_FIRST_THREAD_IN_CORE	0x2
147 #define CPU_IS_FIRST_CORE_IN_PACKAGE	0x4
148 } *thread_even, *thread_odd;
149 
150 struct core_data {
151 	unsigned long long c3;
152 	unsigned long long c6;
153 	unsigned long long c7;
154 	unsigned int core_temp_c;
155 	unsigned int core_id;
156 } *core_even, *core_odd;
157 
158 struct pkg_data {
159 	unsigned long long pc2;
160 	unsigned long long pc3;
161 	unsigned long long pc6;
162 	unsigned long long pc7;
163 	unsigned long long pc8;
164 	unsigned long long pc9;
165 	unsigned long long pc10;
166 	unsigned long long pkg_wtd_core_c0;
167 	unsigned long long pkg_any_core_c0;
168 	unsigned long long pkg_any_gfxe_c0;
169 	unsigned long long pkg_both_core_gfxe_c0;
170 	unsigned int package_id;
171 	unsigned int energy_pkg;	/* MSR_PKG_ENERGY_STATUS */
172 	unsigned int energy_dram;	/* MSR_DRAM_ENERGY_STATUS */
173 	unsigned int energy_cores;	/* MSR_PP0_ENERGY_STATUS */
174 	unsigned int energy_gfx;	/* MSR_PP1_ENERGY_STATUS */
175 	unsigned int rapl_pkg_perf_status;	/* MSR_PKG_PERF_STATUS */
176 	unsigned int rapl_dram_perf_status;	/* MSR_DRAM_PERF_STATUS */
177 	unsigned int pkg_temp_c;
178 
179 } *package_even, *package_odd;
180 
181 #define ODD_COUNTERS thread_odd, core_odd, package_odd
182 #define EVEN_COUNTERS thread_even, core_even, package_even
183 
184 #define GET_THREAD(thread_base, thread_no, core_no, pkg_no) \
185 	(thread_base + (pkg_no) * topo.num_cores_per_pkg * \
186 		topo.num_threads_per_core + \
187 		(core_no) * topo.num_threads_per_core + (thread_no))
188 #define GET_CORE(core_base, core_no, pkg_no) \
189 	(core_base + (pkg_no) * topo.num_cores_per_pkg + (core_no))
190 #define GET_PKG(pkg_base, pkg_no) (pkg_base + pkg_no)
191 
192 struct system_summary {
193 	struct thread_data threads;
194 	struct core_data cores;
195 	struct pkg_data packages;
196 } sum, average;
197 
198 
199 struct topo_params {
200 	int num_packages;
201 	int num_cpus;
202 	int num_cores;
203 	int max_cpu_num;
204 	int num_cores_per_pkg;
205 	int num_threads_per_core;
206 } topo;
207 
208 struct timeval tv_even, tv_odd, tv_delta;
209 
210 void setup_all_buffers(void);
211 
212 int cpu_is_not_present(int cpu)
213 {
214 	return !CPU_ISSET_S(cpu, cpu_present_setsize, cpu_present_set);
215 }
216 /*
217  * run func(thread, core, package) in topology order
218  * skip non-present cpus
219  */
220 
221 int for_all_cpus(int (func)(struct thread_data *, struct core_data *, struct pkg_data *),
222 	struct thread_data *thread_base, struct core_data *core_base, struct pkg_data *pkg_base)
223 {
224 	int retval, pkg_no, core_no, thread_no;
225 
226 	for (pkg_no = 0; pkg_no < topo.num_packages; ++pkg_no) {
227 		for (core_no = 0; core_no < topo.num_cores_per_pkg; ++core_no) {
228 			for (thread_no = 0; thread_no <
229 				topo.num_threads_per_core; ++thread_no) {
230 				struct thread_data *t;
231 				struct core_data *c;
232 				struct pkg_data *p;
233 
234 				t = GET_THREAD(thread_base, thread_no, core_no, pkg_no);
235 
236 				if (cpu_is_not_present(t->cpu_id))
237 					continue;
238 
239 				c = GET_CORE(core_base, core_no, pkg_no);
240 				p = GET_PKG(pkg_base, pkg_no);
241 
242 				retval = func(t, c, p);
243 				if (retval)
244 					return retval;
245 			}
246 		}
247 	}
248 	return 0;
249 }
250 
251 int cpu_migrate(int cpu)
252 {
253 	CPU_ZERO_S(cpu_affinity_setsize, cpu_affinity_set);
254 	CPU_SET_S(cpu, cpu_affinity_setsize, cpu_affinity_set);
255 	if (sched_setaffinity(0, cpu_affinity_setsize, cpu_affinity_set) == -1)
256 		return -1;
257 	else
258 		return 0;
259 }
260 
261 int get_msr(int cpu, off_t offset, unsigned long long *msr)
262 {
263 	ssize_t retval;
264 	char pathname[32];
265 	int fd;
266 
267 	sprintf(pathname, "/dev/cpu/%d/msr", cpu);
268 	fd = open(pathname, O_RDONLY);
269 	if (fd < 0)
270 		err(-1, "%s open failed, try chown or chmod +r /dev/cpu/*/msr, or run as root", pathname);
271 
272 	retval = pread(fd, msr, sizeof *msr, offset);
273 	close(fd);
274 
275 	if (retval != sizeof *msr)
276 		err(-1, "%s offset 0x%llx read failed", pathname, (unsigned long long)offset);
277 
278 	return 0;
279 }
280 
281 /*
282  * Example Format w/ field column widths:
283  *
284  *  Package    Core     CPU Avg_MHz Bzy_MHz TSC_MHz     SMI   %Busy CPU_%c1 CPU_%c3 CPU_%c6 CPU_%c7 CoreTmp  PkgTmp Pkg%pc2 Pkg%pc3 Pkg%pc6 Pkg%pc7 PkgWatt CorWatt GFXWatt
285  * 123456781234567812345678123456781234567812345678123456781234567812345678123456781234567812345678123456781234567812345678123456781234567812345678123456781234567812345678
286  */
287 
288 void print_header(void)
289 {
290 	if (show_pkg)
291 		outp += sprintf(outp, " Package");
292 	if (show_core)
293 		outp += sprintf(outp, "    Core");
294 	if (show_cpu)
295 		outp += sprintf(outp, "     CPU");
296 	if (has_aperf)
297 		outp += sprintf(outp, " Avg_MHz");
298 	if (has_aperf)
299 		outp += sprintf(outp, "   %%Busy");
300 	if (has_aperf)
301 		outp += sprintf(outp, " Bzy_MHz");
302 	outp += sprintf(outp, " TSC_MHz");
303 
304 	if (extra_delta_offset32)
305 		outp += sprintf(outp, "  count 0x%03X", extra_delta_offset32);
306 	if (extra_delta_offset64)
307 		outp += sprintf(outp, "  COUNT 0x%03X", extra_delta_offset64);
308 	if (extra_msr_offset32)
309 		outp += sprintf(outp, "   MSR 0x%03X", extra_msr_offset32);
310 	if (extra_msr_offset64)
311 		outp += sprintf(outp, "           MSR 0x%03X", extra_msr_offset64);
312 
313 	if (!debug)
314 		goto done;
315 
316 	if (do_smi)
317 		outp += sprintf(outp, "     SMI");
318 
319 	if (do_nhm_cstates)
320 		outp += sprintf(outp, "  CPU%%c1");
321 	if (do_nhm_cstates && !do_slm_cstates && !do_knl_cstates)
322 		outp += sprintf(outp, "  CPU%%c3");
323 	if (do_nhm_cstates)
324 		outp += sprintf(outp, "  CPU%%c6");
325 	if (do_snb_cstates)
326 		outp += sprintf(outp, "  CPU%%c7");
327 
328 	if (do_dts)
329 		outp += sprintf(outp, " CoreTmp");
330 	if (do_ptm)
331 		outp += sprintf(outp, "  PkgTmp");
332 
333 	if (do_skl_residency) {
334 		outp += sprintf(outp, " Totl%%C0");
335 		outp += sprintf(outp, "  Any%%C0");
336 		outp += sprintf(outp, "  GFX%%C0");
337 		outp += sprintf(outp, " CPUGFX%%");
338 	}
339 
340 	if (do_pc2)
341 		outp += sprintf(outp, " Pkg%%pc2");
342 	if (do_pc3)
343 		outp += sprintf(outp, " Pkg%%pc3");
344 	if (do_pc6)
345 		outp += sprintf(outp, " Pkg%%pc6");
346 	if (do_pc7)
347 		outp += sprintf(outp, " Pkg%%pc7");
348 	if (do_c8_c9_c10) {
349 		outp += sprintf(outp, " Pkg%%pc8");
350 		outp += sprintf(outp, " Pkg%%pc9");
351 		outp += sprintf(outp, " Pk%%pc10");
352 	}
353 
354 	if (do_rapl && !rapl_joules) {
355 		if (do_rapl & RAPL_PKG)
356 			outp += sprintf(outp, " PkgWatt");
357 		if (do_rapl & RAPL_CORES)
358 			outp += sprintf(outp, " CorWatt");
359 		if (do_rapl & RAPL_GFX)
360 			outp += sprintf(outp, " GFXWatt");
361 		if (do_rapl & RAPL_DRAM)
362 			outp += sprintf(outp, " RAMWatt");
363 		if (do_rapl & RAPL_PKG_PERF_STATUS)
364 			outp += sprintf(outp, "   PKG_%%");
365 		if (do_rapl & RAPL_DRAM_PERF_STATUS)
366 			outp += sprintf(outp, "   RAM_%%");
367 	} else if (do_rapl && rapl_joules) {
368 		if (do_rapl & RAPL_PKG)
369 			outp += sprintf(outp, "   Pkg_J");
370 		if (do_rapl & RAPL_CORES)
371 			outp += sprintf(outp, "   Cor_J");
372 		if (do_rapl & RAPL_GFX)
373 			outp += sprintf(outp, "   GFX_J");
374 		if (do_rapl & RAPL_DRAM)
375 			outp += sprintf(outp, "   RAM_W");
376 		if (do_rapl & RAPL_PKG_PERF_STATUS)
377 			outp += sprintf(outp, "   PKG_%%");
378 		if (do_rapl & RAPL_DRAM_PERF_STATUS)
379 			outp += sprintf(outp, "   RAM_%%");
380 		outp += sprintf(outp, "   time");
381 
382 	}
383     done:
384 	outp += sprintf(outp, "\n");
385 }
386 
387 int dump_counters(struct thread_data *t, struct core_data *c,
388 	struct pkg_data *p)
389 {
390 	outp += sprintf(outp, "t %p, c %p, p %p\n", t, c, p);
391 
392 	if (t) {
393 		outp += sprintf(outp, "CPU: %d flags 0x%x\n",
394 			t->cpu_id, t->flags);
395 		outp += sprintf(outp, "TSC: %016llX\n", t->tsc);
396 		outp += sprintf(outp, "aperf: %016llX\n", t->aperf);
397 		outp += sprintf(outp, "mperf: %016llX\n", t->mperf);
398 		outp += sprintf(outp, "c1: %016llX\n", t->c1);
399 		outp += sprintf(outp, "msr0x%x: %08llX\n",
400 			extra_delta_offset32, t->extra_delta32);
401 		outp += sprintf(outp, "msr0x%x: %016llX\n",
402 			extra_delta_offset64, t->extra_delta64);
403 		outp += sprintf(outp, "msr0x%x: %08llX\n",
404 			extra_msr_offset32, t->extra_msr32);
405 		outp += sprintf(outp, "msr0x%x: %016llX\n",
406 			extra_msr_offset64, t->extra_msr64);
407 		if (do_smi)
408 			outp += sprintf(outp, "SMI: %08X\n", t->smi_count);
409 	}
410 
411 	if (c) {
412 		outp += sprintf(outp, "core: %d\n", c->core_id);
413 		outp += sprintf(outp, "c3: %016llX\n", c->c3);
414 		outp += sprintf(outp, "c6: %016llX\n", c->c6);
415 		outp += sprintf(outp, "c7: %016llX\n", c->c7);
416 		outp += sprintf(outp, "DTS: %dC\n", c->core_temp_c);
417 	}
418 
419 	if (p) {
420 		outp += sprintf(outp, "package: %d\n", p->package_id);
421 
422 		outp += sprintf(outp, "Weighted cores: %016llX\n", p->pkg_wtd_core_c0);
423 		outp += sprintf(outp, "Any cores: %016llX\n", p->pkg_any_core_c0);
424 		outp += sprintf(outp, "Any GFX: %016llX\n", p->pkg_any_gfxe_c0);
425 		outp += sprintf(outp, "CPU + GFX: %016llX\n", p->pkg_both_core_gfxe_c0);
426 
427 		outp += sprintf(outp, "pc2: %016llX\n", p->pc2);
428 		if (do_pc3)
429 			outp += sprintf(outp, "pc3: %016llX\n", p->pc3);
430 		if (do_pc6)
431 			outp += sprintf(outp, "pc6: %016llX\n", p->pc6);
432 		if (do_pc7)
433 			outp += sprintf(outp, "pc7: %016llX\n", p->pc7);
434 		outp += sprintf(outp, "pc8: %016llX\n", p->pc8);
435 		outp += sprintf(outp, "pc9: %016llX\n", p->pc9);
436 		outp += sprintf(outp, "pc10: %016llX\n", p->pc10);
437 		outp += sprintf(outp, "Joules PKG: %0X\n", p->energy_pkg);
438 		outp += sprintf(outp, "Joules COR: %0X\n", p->energy_cores);
439 		outp += sprintf(outp, "Joules GFX: %0X\n", p->energy_gfx);
440 		outp += sprintf(outp, "Joules RAM: %0X\n", p->energy_dram);
441 		outp += sprintf(outp, "Throttle PKG: %0X\n",
442 			p->rapl_pkg_perf_status);
443 		outp += sprintf(outp, "Throttle RAM: %0X\n",
444 			p->rapl_dram_perf_status);
445 		outp += sprintf(outp, "PTM: %dC\n", p->pkg_temp_c);
446 	}
447 
448 	outp += sprintf(outp, "\n");
449 
450 	return 0;
451 }
452 
453 /*
454  * column formatting convention & formats
455  */
456 int format_counters(struct thread_data *t, struct core_data *c,
457 	struct pkg_data *p)
458 {
459 	double interval_float;
460 	char *fmt8;
461 
462 	 /* if showing only 1st thread in core and this isn't one, bail out */
463 	if (show_core_only && !(t->flags & CPU_IS_FIRST_THREAD_IN_CORE))
464 		return 0;
465 
466 	 /* if showing only 1st thread in pkg and this isn't one, bail out */
467 	if (show_pkg_only && !(t->flags & CPU_IS_FIRST_CORE_IN_PACKAGE))
468 		return 0;
469 
470 	interval_float = tv_delta.tv_sec + tv_delta.tv_usec/1000000.0;
471 
472 	/* topo columns, print blanks on 1st (average) line */
473 	if (t == &average.threads) {
474 		if (show_pkg)
475 			outp += sprintf(outp, "       -");
476 		if (show_core)
477 			outp += sprintf(outp, "       -");
478 		if (show_cpu)
479 			outp += sprintf(outp, "       -");
480 	} else {
481 		if (show_pkg) {
482 			if (p)
483 				outp += sprintf(outp, "%8d", p->package_id);
484 			else
485 				outp += sprintf(outp, "       -");
486 		}
487 		if (show_core) {
488 			if (c)
489 				outp += sprintf(outp, "%8d", c->core_id);
490 			else
491 				outp += sprintf(outp, "       -");
492 		}
493 		if (show_cpu)
494 			outp += sprintf(outp, "%8d", t->cpu_id);
495 	}
496 
497 	/* Avg_MHz */
498 	if (has_aperf)
499 		outp += sprintf(outp, "%8.0f",
500 			1.0 / units * t->aperf / interval_float);
501 
502 	/* %Busy */
503 	if (has_aperf) {
504 		if (!skip_c0)
505 			outp += sprintf(outp, "%8.2f", 100.0 * t->mperf/t->tsc);
506 		else
507 			outp += sprintf(outp, "********");
508 	}
509 
510 	/* Bzy_MHz */
511 	if (has_aperf)
512 		outp += sprintf(outp, "%8.0f",
513 			1.0 * t->tsc / units * t->aperf / t->mperf / interval_float);
514 
515 	/* TSC_MHz */
516 	outp += sprintf(outp, "%8.0f", 1.0 * t->tsc/units/interval_float);
517 
518 	/* delta */
519 	if (extra_delta_offset32)
520 		outp += sprintf(outp, "  %11llu", t->extra_delta32);
521 
522 	/* DELTA */
523 	if (extra_delta_offset64)
524 		outp += sprintf(outp, "  %11llu", t->extra_delta64);
525 	/* msr */
526 	if (extra_msr_offset32)
527 		outp += sprintf(outp, "  0x%08llx", t->extra_msr32);
528 
529 	/* MSR */
530 	if (extra_msr_offset64)
531 		outp += sprintf(outp, "  0x%016llx", t->extra_msr64);
532 
533 	if (!debug)
534 		goto done;
535 
536 	/* SMI */
537 	if (do_smi)
538 		outp += sprintf(outp, "%8d", t->smi_count);
539 
540 	if (do_nhm_cstates) {
541 		if (!skip_c1)
542 			outp += sprintf(outp, "%8.2f", 100.0 * t->c1/t->tsc);
543 		else
544 			outp += sprintf(outp, "********");
545 	}
546 
547 	/* print per-core data only for 1st thread in core */
548 	if (!(t->flags & CPU_IS_FIRST_THREAD_IN_CORE))
549 		goto done;
550 
551 	if (do_nhm_cstates && !do_slm_cstates && !do_knl_cstates)
552 		outp += sprintf(outp, "%8.2f", 100.0 * c->c3/t->tsc);
553 	if (do_nhm_cstates)
554 		outp += sprintf(outp, "%8.2f", 100.0 * c->c6/t->tsc);
555 	if (do_snb_cstates)
556 		outp += sprintf(outp, "%8.2f", 100.0 * c->c7/t->tsc);
557 
558 	if (do_dts)
559 		outp += sprintf(outp, "%8d", c->core_temp_c);
560 
561 	/* print per-package data only for 1st core in package */
562 	if (!(t->flags & CPU_IS_FIRST_CORE_IN_PACKAGE))
563 		goto done;
564 
565 	/* PkgTmp */
566 	if (do_ptm)
567 		outp += sprintf(outp, "%8d", p->pkg_temp_c);
568 
569 	/* Totl%C0, Any%C0 GFX%C0 CPUGFX% */
570 	if (do_skl_residency) {
571 		outp += sprintf(outp, "%8.2f", 100.0 * p->pkg_wtd_core_c0/t->tsc);
572 		outp += sprintf(outp, "%8.2f", 100.0 * p->pkg_any_core_c0/t->tsc);
573 		outp += sprintf(outp, "%8.2f", 100.0 * p->pkg_any_gfxe_c0/t->tsc);
574 		outp += sprintf(outp, "%8.2f", 100.0 * p->pkg_both_core_gfxe_c0/t->tsc);
575 	}
576 
577 	if (do_pc2)
578 		outp += sprintf(outp, "%8.2f", 100.0 * p->pc2/t->tsc);
579 	if (do_pc3)
580 		outp += sprintf(outp, "%8.2f", 100.0 * p->pc3/t->tsc);
581 	if (do_pc6)
582 		outp += sprintf(outp, "%8.2f", 100.0 * p->pc6/t->tsc);
583 	if (do_pc7)
584 		outp += sprintf(outp, "%8.2f", 100.0 * p->pc7/t->tsc);
585 	if (do_c8_c9_c10) {
586 		outp += sprintf(outp, "%8.2f", 100.0 * p->pc8/t->tsc);
587 		outp += sprintf(outp, "%8.2f", 100.0 * p->pc9/t->tsc);
588 		outp += sprintf(outp, "%8.2f", 100.0 * p->pc10/t->tsc);
589 	}
590 
591 	/*
592  	 * If measurement interval exceeds minimum RAPL Joule Counter range,
593  	 * indicate that results are suspect by printing "**" in fraction place.
594  	 */
595 	if (interval_float < rapl_joule_counter_range)
596 		fmt8 = "%8.2f";
597 	else
598 		fmt8 = " %6.0f**";
599 
600 	if (do_rapl && !rapl_joules) {
601 		if (do_rapl & RAPL_PKG)
602 			outp += sprintf(outp, fmt8, p->energy_pkg * rapl_energy_units / interval_float);
603 		if (do_rapl & RAPL_CORES)
604 			outp += sprintf(outp, fmt8, p->energy_cores * rapl_energy_units / interval_float);
605 		if (do_rapl & RAPL_GFX)
606 			outp += sprintf(outp, fmt8, p->energy_gfx * rapl_energy_units / interval_float);
607 		if (do_rapl & RAPL_DRAM)
608 			outp += sprintf(outp, fmt8, p->energy_dram * rapl_dram_energy_units / interval_float);
609 		if (do_rapl & RAPL_PKG_PERF_STATUS)
610 			outp += sprintf(outp, fmt8, 100.0 * p->rapl_pkg_perf_status * rapl_time_units / interval_float);
611 		if (do_rapl & RAPL_DRAM_PERF_STATUS)
612 			outp += sprintf(outp, fmt8, 100.0 * p->rapl_dram_perf_status * rapl_time_units / interval_float);
613 	} else if (do_rapl && rapl_joules) {
614 		if (do_rapl & RAPL_PKG)
615 			outp += sprintf(outp, fmt8,
616 					p->energy_pkg * rapl_energy_units);
617 		if (do_rapl & RAPL_CORES)
618 			outp += sprintf(outp, fmt8,
619 					p->energy_cores * rapl_energy_units);
620 		if (do_rapl & RAPL_GFX)
621 			outp += sprintf(outp, fmt8,
622 					p->energy_gfx * rapl_energy_units);
623 		if (do_rapl & RAPL_DRAM)
624 			outp += sprintf(outp, fmt8,
625 					p->energy_dram * rapl_dram_energy_units);
626 		if (do_rapl & RAPL_PKG_PERF_STATUS)
627 			outp += sprintf(outp, fmt8, 100.0 * p->rapl_pkg_perf_status * rapl_time_units / interval_float);
628 		if (do_rapl & RAPL_DRAM_PERF_STATUS)
629 			outp += sprintf(outp, fmt8, 100.0 * p->rapl_dram_perf_status * rapl_time_units / interval_float);
630 
631 		outp += sprintf(outp, fmt8, interval_float);
632 	}
633 done:
634 	outp += sprintf(outp, "\n");
635 
636 	return 0;
637 }
638 
639 void flush_stdout()
640 {
641 	fputs(output_buffer, stdout);
642 	fflush(stdout);
643 	outp = output_buffer;
644 }
645 void flush_stderr()
646 {
647 	fputs(output_buffer, stderr);
648 	outp = output_buffer;
649 }
650 void format_all_counters(struct thread_data *t, struct core_data *c, struct pkg_data *p)
651 {
652 	static int printed;
653 
654 	if (!printed || !summary_only)
655 		print_header();
656 
657 	if (topo.num_cpus > 1)
658 		format_counters(&average.threads, &average.cores,
659 			&average.packages);
660 
661 	printed = 1;
662 
663 	if (summary_only)
664 		return;
665 
666 	for_all_cpus(format_counters, t, c, p);
667 }
668 
669 #define DELTA_WRAP32(new, old)			\
670 	if (new > old) {			\
671 		old = new - old;		\
672 	} else {				\
673 		old = 0x100000000 + new - old;	\
674 	}
675 
676 void
677 delta_package(struct pkg_data *new, struct pkg_data *old)
678 {
679 
680 	if (do_skl_residency) {
681 		old->pkg_wtd_core_c0 = new->pkg_wtd_core_c0 - old->pkg_wtd_core_c0;
682 		old->pkg_any_core_c0 = new->pkg_any_core_c0 - old->pkg_any_core_c0;
683 		old->pkg_any_gfxe_c0 = new->pkg_any_gfxe_c0 - old->pkg_any_gfxe_c0;
684 		old->pkg_both_core_gfxe_c0 = new->pkg_both_core_gfxe_c0 - old->pkg_both_core_gfxe_c0;
685 	}
686 	old->pc2 = new->pc2 - old->pc2;
687 	if (do_pc3)
688 		old->pc3 = new->pc3 - old->pc3;
689 	if (do_pc6)
690 		old->pc6 = new->pc6 - old->pc6;
691 	if (do_pc7)
692 		old->pc7 = new->pc7 - old->pc7;
693 	old->pc8 = new->pc8 - old->pc8;
694 	old->pc9 = new->pc9 - old->pc9;
695 	old->pc10 = new->pc10 - old->pc10;
696 	old->pkg_temp_c = new->pkg_temp_c;
697 
698 	DELTA_WRAP32(new->energy_pkg, old->energy_pkg);
699 	DELTA_WRAP32(new->energy_cores, old->energy_cores);
700 	DELTA_WRAP32(new->energy_gfx, old->energy_gfx);
701 	DELTA_WRAP32(new->energy_dram, old->energy_dram);
702 	DELTA_WRAP32(new->rapl_pkg_perf_status, old->rapl_pkg_perf_status);
703 	DELTA_WRAP32(new->rapl_dram_perf_status, old->rapl_dram_perf_status);
704 }
705 
706 void
707 delta_core(struct core_data *new, struct core_data *old)
708 {
709 	old->c3 = new->c3 - old->c3;
710 	old->c6 = new->c6 - old->c6;
711 	old->c7 = new->c7 - old->c7;
712 	old->core_temp_c = new->core_temp_c;
713 }
714 
715 /*
716  * old = new - old
717  */
718 void
719 delta_thread(struct thread_data *new, struct thread_data *old,
720 	struct core_data *core_delta)
721 {
722 	old->tsc = new->tsc - old->tsc;
723 
724 	/* check for TSC < 1 Mcycles over interval */
725 	if (old->tsc < (1000 * 1000))
726 		errx(-3, "Insanely slow TSC rate, TSC stops in idle?\n"
727 		     "You can disable all c-states by booting with \"idle=poll\"\n"
728 		     "or just the deep ones with \"processor.max_cstate=1\"");
729 
730 	old->c1 = new->c1 - old->c1;
731 
732 	if (has_aperf) {
733 		if ((new->aperf > old->aperf) && (new->mperf > old->mperf)) {
734 			old->aperf = new->aperf - old->aperf;
735 			old->mperf = new->mperf - old->mperf;
736 		} else {
737 
738 			if (!aperf_mperf_unstable) {
739 				fprintf(stderr, "%s: APERF or MPERF went backwards *\n", progname);
740 				fprintf(stderr, "* Frequency results do not cover entire interval *\n");
741 				fprintf(stderr, "* fix this by running Linux-2.6.30 or later *\n");
742 
743 				aperf_mperf_unstable = 1;
744 			}
745 			/*
746 			 * mperf delta is likely a huge "positive" number
747 			 * can not use it for calculating c0 time
748 			 */
749 			skip_c0 = 1;
750 			skip_c1 = 1;
751 		}
752 	}
753 
754 
755 	if (use_c1_residency_msr) {
756 		/*
757 		 * Some models have a dedicated C1 residency MSR,
758 		 * which should be more accurate than the derivation below.
759 		 */
760 	} else {
761 		/*
762 		 * As counter collection is not atomic,
763 		 * it is possible for mperf's non-halted cycles + idle states
764 		 * to exceed TSC's all cycles: show c1 = 0% in that case.
765 		 */
766 		if ((old->mperf + core_delta->c3 + core_delta->c6 + core_delta->c7) > old->tsc)
767 			old->c1 = 0;
768 		else {
769 			/* normal case, derive c1 */
770 			old->c1 = old->tsc - old->mperf - core_delta->c3
771 				- core_delta->c6 - core_delta->c7;
772 		}
773 	}
774 
775 	if (old->mperf == 0) {
776 		if (debug > 1) fprintf(stderr, "cpu%d MPERF 0!\n", old->cpu_id);
777 		old->mperf = 1;	/* divide by 0 protection */
778 	}
779 
780 	old->extra_delta32 = new->extra_delta32 - old->extra_delta32;
781 	old->extra_delta32 &= 0xFFFFFFFF;
782 
783 	old->extra_delta64 = new->extra_delta64 - old->extra_delta64;
784 
785 	/*
786 	 * Extra MSR is just a snapshot, simply copy latest w/o subtracting
787 	 */
788 	old->extra_msr32 = new->extra_msr32;
789 	old->extra_msr64 = new->extra_msr64;
790 
791 	if (do_smi)
792 		old->smi_count = new->smi_count - old->smi_count;
793 }
794 
795 int delta_cpu(struct thread_data *t, struct core_data *c,
796 	struct pkg_data *p, struct thread_data *t2,
797 	struct core_data *c2, struct pkg_data *p2)
798 {
799 	/* calculate core delta only for 1st thread in core */
800 	if (t->flags & CPU_IS_FIRST_THREAD_IN_CORE)
801 		delta_core(c, c2);
802 
803 	/* always calculate thread delta */
804 	delta_thread(t, t2, c2);	/* c2 is core delta */
805 
806 	/* calculate package delta only for 1st core in package */
807 	if (t->flags & CPU_IS_FIRST_CORE_IN_PACKAGE)
808 		delta_package(p, p2);
809 
810 	return 0;
811 }
812 
813 void clear_counters(struct thread_data *t, struct core_data *c, struct pkg_data *p)
814 {
815 	t->tsc = 0;
816 	t->aperf = 0;
817 	t->mperf = 0;
818 	t->c1 = 0;
819 
820 	t->smi_count = 0;
821 	t->extra_delta32 = 0;
822 	t->extra_delta64 = 0;
823 
824 	/* tells format_counters to dump all fields from this set */
825 	t->flags = CPU_IS_FIRST_THREAD_IN_CORE | CPU_IS_FIRST_CORE_IN_PACKAGE;
826 
827 	c->c3 = 0;
828 	c->c6 = 0;
829 	c->c7 = 0;
830 	c->core_temp_c = 0;
831 
832 	p->pkg_wtd_core_c0 = 0;
833 	p->pkg_any_core_c0 = 0;
834 	p->pkg_any_gfxe_c0 = 0;
835 	p->pkg_both_core_gfxe_c0 = 0;
836 
837 	p->pc2 = 0;
838 	if (do_pc3)
839 		p->pc3 = 0;
840 	if (do_pc6)
841 		p->pc6 = 0;
842 	if (do_pc7)
843 		p->pc7 = 0;
844 	p->pc8 = 0;
845 	p->pc9 = 0;
846 	p->pc10 = 0;
847 
848 	p->energy_pkg = 0;
849 	p->energy_dram = 0;
850 	p->energy_cores = 0;
851 	p->energy_gfx = 0;
852 	p->rapl_pkg_perf_status = 0;
853 	p->rapl_dram_perf_status = 0;
854 	p->pkg_temp_c = 0;
855 }
856 int sum_counters(struct thread_data *t, struct core_data *c,
857 	struct pkg_data *p)
858 {
859 	average.threads.tsc += t->tsc;
860 	average.threads.aperf += t->aperf;
861 	average.threads.mperf += t->mperf;
862 	average.threads.c1 += t->c1;
863 
864 	average.threads.extra_delta32 += t->extra_delta32;
865 	average.threads.extra_delta64 += t->extra_delta64;
866 
867 	/* sum per-core values only for 1st thread in core */
868 	if (!(t->flags & CPU_IS_FIRST_THREAD_IN_CORE))
869 		return 0;
870 
871 	average.cores.c3 += c->c3;
872 	average.cores.c6 += c->c6;
873 	average.cores.c7 += c->c7;
874 
875 	average.cores.core_temp_c = MAX(average.cores.core_temp_c, c->core_temp_c);
876 
877 	/* sum per-pkg values only for 1st core in pkg */
878 	if (!(t->flags & CPU_IS_FIRST_CORE_IN_PACKAGE))
879 		return 0;
880 
881 	if (do_skl_residency) {
882 		average.packages.pkg_wtd_core_c0 += p->pkg_wtd_core_c0;
883 		average.packages.pkg_any_core_c0 += p->pkg_any_core_c0;
884 		average.packages.pkg_any_gfxe_c0 += p->pkg_any_gfxe_c0;
885 		average.packages.pkg_both_core_gfxe_c0 += p->pkg_both_core_gfxe_c0;
886 	}
887 
888 	average.packages.pc2 += p->pc2;
889 	if (do_pc3)
890 		average.packages.pc3 += p->pc3;
891 	if (do_pc6)
892 		average.packages.pc6 += p->pc6;
893 	if (do_pc7)
894 		average.packages.pc7 += p->pc7;
895 	average.packages.pc8 += p->pc8;
896 	average.packages.pc9 += p->pc9;
897 	average.packages.pc10 += p->pc10;
898 
899 	average.packages.energy_pkg += p->energy_pkg;
900 	average.packages.energy_dram += p->energy_dram;
901 	average.packages.energy_cores += p->energy_cores;
902 	average.packages.energy_gfx += p->energy_gfx;
903 
904 	average.packages.pkg_temp_c = MAX(average.packages.pkg_temp_c, p->pkg_temp_c);
905 
906 	average.packages.rapl_pkg_perf_status += p->rapl_pkg_perf_status;
907 	average.packages.rapl_dram_perf_status += p->rapl_dram_perf_status;
908 	return 0;
909 }
910 /*
911  * sum the counters for all cpus in the system
912  * compute the weighted average
913  */
914 void compute_average(struct thread_data *t, struct core_data *c,
915 	struct pkg_data *p)
916 {
917 	clear_counters(&average.threads, &average.cores, &average.packages);
918 
919 	for_all_cpus(sum_counters, t, c, p);
920 
921 	average.threads.tsc /= topo.num_cpus;
922 	average.threads.aperf /= topo.num_cpus;
923 	average.threads.mperf /= topo.num_cpus;
924 	average.threads.c1 /= topo.num_cpus;
925 
926 	average.threads.extra_delta32 /= topo.num_cpus;
927 	average.threads.extra_delta32 &= 0xFFFFFFFF;
928 
929 	average.threads.extra_delta64 /= topo.num_cpus;
930 
931 	average.cores.c3 /= topo.num_cores;
932 	average.cores.c6 /= topo.num_cores;
933 	average.cores.c7 /= topo.num_cores;
934 
935 	if (do_skl_residency) {
936 		average.packages.pkg_wtd_core_c0 /= topo.num_packages;
937 		average.packages.pkg_any_core_c0 /= topo.num_packages;
938 		average.packages.pkg_any_gfxe_c0 /= topo.num_packages;
939 		average.packages.pkg_both_core_gfxe_c0 /= topo.num_packages;
940 	}
941 
942 	average.packages.pc2 /= topo.num_packages;
943 	if (do_pc3)
944 		average.packages.pc3 /= topo.num_packages;
945 	if (do_pc6)
946 		average.packages.pc6 /= topo.num_packages;
947 	if (do_pc7)
948 		average.packages.pc7 /= topo.num_packages;
949 
950 	average.packages.pc8 /= topo.num_packages;
951 	average.packages.pc9 /= topo.num_packages;
952 	average.packages.pc10 /= topo.num_packages;
953 }
954 
955 static unsigned long long rdtsc(void)
956 {
957 	unsigned int low, high;
958 
959 	asm volatile("rdtsc" : "=a" (low), "=d" (high));
960 
961 	return low | ((unsigned long long)high) << 32;
962 }
963 
964 
965 /*
966  * get_counters(...)
967  * migrate to cpu
968  * acquire and record local counters for that cpu
969  */
970 int get_counters(struct thread_data *t, struct core_data *c, struct pkg_data *p)
971 {
972 	int cpu = t->cpu_id;
973 	unsigned long long msr;
974 
975 	if (cpu_migrate(cpu)) {
976 		fprintf(stderr, "Could not migrate to CPU %d\n", cpu);
977 		return -1;
978 	}
979 
980 	t->tsc = rdtsc();	/* we are running on local CPU of interest */
981 
982 	if (has_aperf) {
983 		if (get_msr(cpu, MSR_IA32_APERF, &t->aperf))
984 			return -3;
985 		if (get_msr(cpu, MSR_IA32_MPERF, &t->mperf))
986 			return -4;
987 	}
988 
989 	if (do_smi) {
990 		if (get_msr(cpu, MSR_SMI_COUNT, &msr))
991 			return -5;
992 		t->smi_count = msr & 0xFFFFFFFF;
993 	}
994 	if (extra_delta_offset32) {
995 		if (get_msr(cpu, extra_delta_offset32, &msr))
996 			return -5;
997 		t->extra_delta32 = msr & 0xFFFFFFFF;
998 	}
999 
1000 	if (extra_delta_offset64)
1001 		if (get_msr(cpu, extra_delta_offset64, &t->extra_delta64))
1002 			return -5;
1003 
1004 	if (extra_msr_offset32) {
1005 		if (get_msr(cpu, extra_msr_offset32, &msr))
1006 			return -5;
1007 		t->extra_msr32 = msr & 0xFFFFFFFF;
1008 	}
1009 
1010 	if (extra_msr_offset64)
1011 		if (get_msr(cpu, extra_msr_offset64, &t->extra_msr64))
1012 			return -5;
1013 
1014 	if (use_c1_residency_msr) {
1015 		if (get_msr(cpu, MSR_CORE_C1_RES, &t->c1))
1016 			return -6;
1017 	}
1018 
1019 	/* collect core counters only for 1st thread in core */
1020 	if (!(t->flags & CPU_IS_FIRST_THREAD_IN_CORE))
1021 		return 0;
1022 
1023 	if (do_nhm_cstates && !do_slm_cstates && !do_knl_cstates) {
1024 		if (get_msr(cpu, MSR_CORE_C3_RESIDENCY, &c->c3))
1025 			return -6;
1026 	}
1027 
1028 	if (do_nhm_cstates && !do_knl_cstates) {
1029 		if (get_msr(cpu, MSR_CORE_C6_RESIDENCY, &c->c6))
1030 			return -7;
1031 	} else if (do_knl_cstates) {
1032 		if (get_msr(cpu, MSR_KNL_CORE_C6_RESIDENCY, &c->c6))
1033 			return -7;
1034 	}
1035 
1036 	if (do_snb_cstates)
1037 		if (get_msr(cpu, MSR_CORE_C7_RESIDENCY, &c->c7))
1038 			return -8;
1039 
1040 	if (do_dts) {
1041 		if (get_msr(cpu, MSR_IA32_THERM_STATUS, &msr))
1042 			return -9;
1043 		c->core_temp_c = tcc_activation_temp - ((msr >> 16) & 0x7F);
1044 	}
1045 
1046 
1047 	/* collect package counters only for 1st core in package */
1048 	if (!(t->flags & CPU_IS_FIRST_CORE_IN_PACKAGE))
1049 		return 0;
1050 
1051 	if (do_skl_residency) {
1052 		if (get_msr(cpu, MSR_PKG_WEIGHTED_CORE_C0_RES, &p->pkg_wtd_core_c0))
1053 			return -10;
1054 		if (get_msr(cpu, MSR_PKG_ANY_CORE_C0_RES, &p->pkg_any_core_c0))
1055 			return -11;
1056 		if (get_msr(cpu, MSR_PKG_ANY_GFXE_C0_RES, &p->pkg_any_gfxe_c0))
1057 			return -12;
1058 		if (get_msr(cpu, MSR_PKG_BOTH_CORE_GFXE_C0_RES, &p->pkg_both_core_gfxe_c0))
1059 			return -13;
1060 	}
1061 	if (do_pc3)
1062 		if (get_msr(cpu, MSR_PKG_C3_RESIDENCY, &p->pc3))
1063 			return -9;
1064 	if (do_pc6)
1065 		if (get_msr(cpu, MSR_PKG_C6_RESIDENCY, &p->pc6))
1066 			return -10;
1067 	if (do_pc2)
1068 		if (get_msr(cpu, MSR_PKG_C2_RESIDENCY, &p->pc2))
1069 			return -11;
1070 	if (do_pc7)
1071 		if (get_msr(cpu, MSR_PKG_C7_RESIDENCY, &p->pc7))
1072 			return -12;
1073 	if (do_c8_c9_c10) {
1074 		if (get_msr(cpu, MSR_PKG_C8_RESIDENCY, &p->pc8))
1075 			return -13;
1076 		if (get_msr(cpu, MSR_PKG_C9_RESIDENCY, &p->pc9))
1077 			return -13;
1078 		if (get_msr(cpu, MSR_PKG_C10_RESIDENCY, &p->pc10))
1079 			return -13;
1080 	}
1081 	if (do_rapl & RAPL_PKG) {
1082 		if (get_msr(cpu, MSR_PKG_ENERGY_STATUS, &msr))
1083 			return -13;
1084 		p->energy_pkg = msr & 0xFFFFFFFF;
1085 	}
1086 	if (do_rapl & RAPL_CORES) {
1087 		if (get_msr(cpu, MSR_PP0_ENERGY_STATUS, &msr))
1088 			return -14;
1089 		p->energy_cores = msr & 0xFFFFFFFF;
1090 	}
1091 	if (do_rapl & RAPL_DRAM) {
1092 		if (get_msr(cpu, MSR_DRAM_ENERGY_STATUS, &msr))
1093 			return -15;
1094 		p->energy_dram = msr & 0xFFFFFFFF;
1095 	}
1096 	if (do_rapl & RAPL_GFX) {
1097 		if (get_msr(cpu, MSR_PP1_ENERGY_STATUS, &msr))
1098 			return -16;
1099 		p->energy_gfx = msr & 0xFFFFFFFF;
1100 	}
1101 	if (do_rapl & RAPL_PKG_PERF_STATUS) {
1102 		if (get_msr(cpu, MSR_PKG_PERF_STATUS, &msr))
1103 			return -16;
1104 		p->rapl_pkg_perf_status = msr & 0xFFFFFFFF;
1105 	}
1106 	if (do_rapl & RAPL_DRAM_PERF_STATUS) {
1107 		if (get_msr(cpu, MSR_DRAM_PERF_STATUS, &msr))
1108 			return -16;
1109 		p->rapl_dram_perf_status = msr & 0xFFFFFFFF;
1110 	}
1111 	if (do_ptm) {
1112 		if (get_msr(cpu, MSR_IA32_PACKAGE_THERM_STATUS, &msr))
1113 			return -17;
1114 		p->pkg_temp_c = tcc_activation_temp - ((msr >> 16) & 0x7F);
1115 	}
1116 	return 0;
1117 }
1118 
1119 /*
1120  * MSR_PKG_CST_CONFIG_CONTROL decoding for pkg_cstate_limit:
1121  * If you change the values, note they are used both in comparisons
1122  * (>= PCL__7) and to index pkg_cstate_limit_strings[].
1123  */
1124 
1125 #define PCLUKN 0 /* Unknown */
1126 #define PCLRSV 1 /* Reserved */
1127 #define PCL__0 2 /* PC0 */
1128 #define PCL__1 3 /* PC1 */
1129 #define PCL__2 4 /* PC2 */
1130 #define PCL__3 5 /* PC3 */
1131 #define PCL__4 6 /* PC4 */
1132 #define PCL__6 7 /* PC6 */
1133 #define PCL_6N 8 /* PC6 No Retention */
1134 #define PCL_6R 9 /* PC6 Retention */
1135 #define PCL__7 10 /* PC7 */
1136 #define PCL_7S 11 /* PC7 Shrink */
1137 #define PCL__8 12 /* PC8 */
1138 #define PCL__9 13 /* PC9 */
1139 #define PCLUNL 14 /* Unlimited */
1140 
1141 int pkg_cstate_limit = PCLUKN;
1142 char *pkg_cstate_limit_strings[] = { "reserved", "unknown", "pc0", "pc1", "pc2",
1143 	"pc3", "pc4", "pc6", "pc6n", "pc6r", "pc7", "pc7s", "pc8", "pc9", "unlimited"};
1144 
1145 int nhm_pkg_cstate_limits[16] = {PCL__0, PCL__1, PCL__3, PCL__6, PCL__7, PCLRSV, PCLRSV, PCLUNL, PCLRSV, PCLRSV, PCLRSV, PCLRSV, PCLRSV, PCLRSV, PCLRSV, PCLRSV};
1146 int snb_pkg_cstate_limits[16] = {PCL__0, PCL__2, PCL_6N, PCL_6R, PCL__7, PCL_7S, PCLRSV, PCLUNL, PCLRSV, PCLRSV, PCLRSV, PCLRSV, PCLRSV, PCLRSV, PCLRSV, PCLRSV};
1147 int hsw_pkg_cstate_limits[16] = {PCL__0, PCL__2, PCL__3, PCL__6, PCL__7, PCL_7S, PCL__8, PCL__9, PCLUNL, PCLRSV, PCLRSV, PCLRSV, PCLRSV, PCLRSV, PCLRSV, PCLRSV};
1148 int slv_pkg_cstate_limits[16] = {PCL__0, PCL__1, PCLRSV, PCLRSV, PCL__4, PCLRSV, PCL__6, PCL__7, PCLRSV, PCLRSV, PCLRSV, PCLRSV, PCLRSV, PCLRSV, PCLRSV, PCLRSV};
1149 int amt_pkg_cstate_limits[16] = {PCL__0, PCL__1, PCL__2, PCLRSV, PCLRSV, PCLRSV, PCL__6, PCL__7, PCLRSV, PCLRSV, PCLRSV, PCLRSV, PCLRSV, PCLRSV, PCLRSV, PCLRSV};
1150 int phi_pkg_cstate_limits[16] = {PCL__0, PCL__2, PCL_6N, PCL_6R, PCLRSV, PCLRSV, PCLRSV, PCLUNL, PCLRSV, PCLRSV, PCLRSV, PCLRSV, PCLRSV, PCLRSV, PCLRSV, PCLRSV};
1151 
1152 static void
1153 dump_nhm_platform_info(void)
1154 {
1155 	unsigned long long msr;
1156 	unsigned int ratio;
1157 
1158 	get_msr(base_cpu, MSR_NHM_PLATFORM_INFO, &msr);
1159 
1160 	fprintf(stderr, "cpu0: MSR_NHM_PLATFORM_INFO: 0x%08llx\n", msr);
1161 
1162 	ratio = (msr >> 40) & 0xFF;
1163 	fprintf(stderr, "%d * %.0f = %.0f MHz max efficiency frequency\n",
1164 		ratio, bclk, ratio * bclk);
1165 
1166 	ratio = (msr >> 8) & 0xFF;
1167 	fprintf(stderr, "%d * %.0f = %.0f MHz base frequency\n",
1168 		ratio, bclk, ratio * bclk);
1169 
1170 	get_msr(base_cpu, MSR_IA32_POWER_CTL, &msr);
1171 	fprintf(stderr, "cpu0: MSR_IA32_POWER_CTL: 0x%08llx (C1E auto-promotion: %sabled)\n",
1172 		msr, msr & 0x2 ? "EN" : "DIS");
1173 
1174 	return;
1175 }
1176 
1177 static void
1178 dump_hsw_turbo_ratio_limits(void)
1179 {
1180 	unsigned long long msr;
1181 	unsigned int ratio;
1182 
1183 	get_msr(base_cpu, MSR_TURBO_RATIO_LIMIT2, &msr);
1184 
1185 	fprintf(stderr, "cpu0: MSR_TURBO_RATIO_LIMIT2: 0x%08llx\n", msr);
1186 
1187 	ratio = (msr >> 8) & 0xFF;
1188 	if (ratio)
1189 		fprintf(stderr, "%d * %.0f = %.0f MHz max turbo 18 active cores\n",
1190 			ratio, bclk, ratio * bclk);
1191 
1192 	ratio = (msr >> 0) & 0xFF;
1193 	if (ratio)
1194 		fprintf(stderr, "%d * %.0f = %.0f MHz max turbo 17 active cores\n",
1195 			ratio, bclk, ratio * bclk);
1196 	return;
1197 }
1198 
1199 static void
1200 dump_ivt_turbo_ratio_limits(void)
1201 {
1202 	unsigned long long msr;
1203 	unsigned int ratio;
1204 
1205 	get_msr(base_cpu, MSR_TURBO_RATIO_LIMIT1, &msr);
1206 
1207 	fprintf(stderr, "cpu0: MSR_TURBO_RATIO_LIMIT1: 0x%08llx\n", msr);
1208 
1209 	ratio = (msr >> 56) & 0xFF;
1210 	if (ratio)
1211 		fprintf(stderr, "%d * %.0f = %.0f MHz max turbo 16 active cores\n",
1212 			ratio, bclk, ratio * bclk);
1213 
1214 	ratio = (msr >> 48) & 0xFF;
1215 	if (ratio)
1216 		fprintf(stderr, "%d * %.0f = %.0f MHz max turbo 15 active cores\n",
1217 			ratio, bclk, ratio * bclk);
1218 
1219 	ratio = (msr >> 40) & 0xFF;
1220 	if (ratio)
1221 		fprintf(stderr, "%d * %.0f = %.0f MHz max turbo 14 active cores\n",
1222 			ratio, bclk, ratio * bclk);
1223 
1224 	ratio = (msr >> 32) & 0xFF;
1225 	if (ratio)
1226 		fprintf(stderr, "%d * %.0f = %.0f MHz max turbo 13 active cores\n",
1227 			ratio, bclk, ratio * bclk);
1228 
1229 	ratio = (msr >> 24) & 0xFF;
1230 	if (ratio)
1231 		fprintf(stderr, "%d * %.0f = %.0f MHz max turbo 12 active cores\n",
1232 			ratio, bclk, ratio * bclk);
1233 
1234 	ratio = (msr >> 16) & 0xFF;
1235 	if (ratio)
1236 		fprintf(stderr, "%d * %.0f = %.0f MHz max turbo 11 active cores\n",
1237 			ratio, bclk, ratio * bclk);
1238 
1239 	ratio = (msr >> 8) & 0xFF;
1240 	if (ratio)
1241 		fprintf(stderr, "%d * %.0f = %.0f MHz max turbo 10 active cores\n",
1242 			ratio, bclk, ratio * bclk);
1243 
1244 	ratio = (msr >> 0) & 0xFF;
1245 	if (ratio)
1246 		fprintf(stderr, "%d * %.0f = %.0f MHz max turbo 9 active cores\n",
1247 			ratio, bclk, ratio * bclk);
1248 	return;
1249 }
1250 
1251 static void
1252 dump_nhm_turbo_ratio_limits(void)
1253 {
1254 	unsigned long long msr;
1255 	unsigned int ratio;
1256 
1257 	get_msr(base_cpu, MSR_TURBO_RATIO_LIMIT, &msr);
1258 
1259 	fprintf(stderr, "cpu0: MSR_TURBO_RATIO_LIMIT: 0x%08llx\n", msr);
1260 
1261 	ratio = (msr >> 56) & 0xFF;
1262 	if (ratio)
1263 		fprintf(stderr, "%d * %.0f = %.0f MHz max turbo 8 active cores\n",
1264 			ratio, bclk, ratio * bclk);
1265 
1266 	ratio = (msr >> 48) & 0xFF;
1267 	if (ratio)
1268 		fprintf(stderr, "%d * %.0f = %.0f MHz max turbo 7 active cores\n",
1269 			ratio, bclk, ratio * bclk);
1270 
1271 	ratio = (msr >> 40) & 0xFF;
1272 	if (ratio)
1273 		fprintf(stderr, "%d * %.0f = %.0f MHz max turbo 6 active cores\n",
1274 			ratio, bclk, ratio * bclk);
1275 
1276 	ratio = (msr >> 32) & 0xFF;
1277 	if (ratio)
1278 		fprintf(stderr, "%d * %.0f = %.0f MHz max turbo 5 active cores\n",
1279 			ratio, bclk, ratio * bclk);
1280 
1281 	ratio = (msr >> 24) & 0xFF;
1282 	if (ratio)
1283 		fprintf(stderr, "%d * %.0f = %.0f MHz max turbo 4 active cores\n",
1284 			ratio, bclk, ratio * bclk);
1285 
1286 	ratio = (msr >> 16) & 0xFF;
1287 	if (ratio)
1288 		fprintf(stderr, "%d * %.0f = %.0f MHz max turbo 3 active cores\n",
1289 			ratio, bclk, ratio * bclk);
1290 
1291 	ratio = (msr >> 8) & 0xFF;
1292 	if (ratio)
1293 		fprintf(stderr, "%d * %.0f = %.0f MHz max turbo 2 active cores\n",
1294 			ratio, bclk, ratio * bclk);
1295 
1296 	ratio = (msr >> 0) & 0xFF;
1297 	if (ratio)
1298 		fprintf(stderr, "%d * %.0f = %.0f MHz max turbo 1 active cores\n",
1299 			ratio, bclk, ratio * bclk);
1300 	return;
1301 }
1302 
1303 static void
1304 dump_knl_turbo_ratio_limits(void)
1305 {
1306 	int cores;
1307 	unsigned int ratio;
1308 	unsigned long long msr;
1309 	int delta_cores;
1310 	int delta_ratio;
1311 	int i;
1312 
1313 	get_msr(base_cpu, MSR_NHM_TURBO_RATIO_LIMIT, &msr);
1314 
1315 	fprintf(stderr, "cpu0: MSR_NHM_TURBO_RATIO_LIMIT: 0x%08llx\n",
1316 	msr);
1317 
1318 	/**
1319 	 * Turbo encoding in KNL is as follows:
1320 	 * [7:0] -- Base value of number of active cores of bucket 1.
1321 	 * [15:8] -- Base value of freq ratio of bucket 1.
1322 	 * [20:16] -- +ve delta of number of active cores of bucket 2.
1323 	 * i.e. active cores of bucket 2 =
1324 	 * active cores of bucket 1 + delta
1325 	 * [23:21] -- Negative delta of freq ratio of bucket 2.
1326 	 * i.e. freq ratio of bucket 2 =
1327 	 * freq ratio of bucket 1 - delta
1328 	 * [28:24]-- +ve delta of number of active cores of bucket 3.
1329 	 * [31:29]-- -ve delta of freq ratio of bucket 3.
1330 	 * [36:32]-- +ve delta of number of active cores of bucket 4.
1331 	 * [39:37]-- -ve delta of freq ratio of bucket 4.
1332 	 * [44:40]-- +ve delta of number of active cores of bucket 5.
1333 	 * [47:45]-- -ve delta of freq ratio of bucket 5.
1334 	 * [52:48]-- +ve delta of number of active cores of bucket 6.
1335 	 * [55:53]-- -ve delta of freq ratio of bucket 6.
1336 	 * [60:56]-- +ve delta of number of active cores of bucket 7.
1337 	 * [63:61]-- -ve delta of freq ratio of bucket 7.
1338 	 */
1339 	cores = msr & 0xFF;
1340 	ratio = (msr >> 8) && 0xFF;
1341 	if (ratio > 0)
1342 		fprintf(stderr,
1343 			"%d * %.0f = %.0f MHz max turbo %d active cores\n",
1344 			ratio, bclk, ratio * bclk, cores);
1345 
1346 	for (i = 16; i < 64; i = i + 8) {
1347 		delta_cores = (msr >> i) & 0x1F;
1348 		delta_ratio = (msr >> (i + 5)) && 0x7;
1349 		if (!delta_cores || !delta_ratio)
1350 			return;
1351 		cores = cores + delta_cores;
1352 		ratio = ratio - delta_ratio;
1353 
1354 		/** -ve ratios will make successive ratio calculations
1355 		 * negative. Hence return instead of carrying on.
1356 		 */
1357 		if (ratio > 0)
1358 			fprintf(stderr,
1359 				"%d * %.0f = %.0f MHz max turbo %d active cores\n",
1360 				ratio, bclk, ratio * bclk, cores);
1361 	}
1362 }
1363 
1364 static void
1365 dump_nhm_cst_cfg(void)
1366 {
1367 	unsigned long long msr;
1368 
1369 	get_msr(base_cpu, MSR_NHM_SNB_PKG_CST_CFG_CTL, &msr);
1370 
1371 #define SNB_C1_AUTO_UNDEMOTE              (1UL << 27)
1372 #define SNB_C3_AUTO_UNDEMOTE              (1UL << 28)
1373 
1374 	fprintf(stderr, "cpu0: MSR_NHM_SNB_PKG_CST_CFG_CTL: 0x%08llx", msr);
1375 
1376 	fprintf(stderr, " (%s%s%s%s%slocked: pkg-cstate-limit=%d: %s)\n",
1377 		(msr & SNB_C3_AUTO_UNDEMOTE) ? "UNdemote-C3, " : "",
1378 		(msr & SNB_C1_AUTO_UNDEMOTE) ? "UNdemote-C1, " : "",
1379 		(msr & NHM_C3_AUTO_DEMOTE) ? "demote-C3, " : "",
1380 		(msr & NHM_C1_AUTO_DEMOTE) ? "demote-C1, " : "",
1381 		(msr & (1 << 15)) ? "" : "UN",
1382 		(unsigned int)msr & 7,
1383 		pkg_cstate_limit_strings[pkg_cstate_limit]);
1384 	return;
1385 }
1386 
1387 void free_all_buffers(void)
1388 {
1389 	CPU_FREE(cpu_present_set);
1390 	cpu_present_set = NULL;
1391 	cpu_present_set = 0;
1392 
1393 	CPU_FREE(cpu_affinity_set);
1394 	cpu_affinity_set = NULL;
1395 	cpu_affinity_setsize = 0;
1396 
1397 	free(thread_even);
1398 	free(core_even);
1399 	free(package_even);
1400 
1401 	thread_even = NULL;
1402 	core_even = NULL;
1403 	package_even = NULL;
1404 
1405 	free(thread_odd);
1406 	free(core_odd);
1407 	free(package_odd);
1408 
1409 	thread_odd = NULL;
1410 	core_odd = NULL;
1411 	package_odd = NULL;
1412 
1413 	free(output_buffer);
1414 	output_buffer = NULL;
1415 	outp = NULL;
1416 }
1417 
1418 /*
1419  * Open a file, and exit on failure
1420  */
1421 FILE *fopen_or_die(const char *path, const char *mode)
1422 {
1423 	FILE *filep = fopen(path, "r");
1424 	if (!filep)
1425 		err(1, "%s: open failed", path);
1426 	return filep;
1427 }
1428 
1429 /*
1430  * Parse a file containing a single int.
1431  */
1432 int parse_int_file(const char *fmt, ...)
1433 {
1434 	va_list args;
1435 	char path[PATH_MAX];
1436 	FILE *filep;
1437 	int value;
1438 
1439 	va_start(args, fmt);
1440 	vsnprintf(path, sizeof(path), fmt, args);
1441 	va_end(args);
1442 	filep = fopen_or_die(path, "r");
1443 	if (fscanf(filep, "%d", &value) != 1)
1444 		err(1, "%s: failed to parse number from file", path);
1445 	fclose(filep);
1446 	return value;
1447 }
1448 
1449 /*
1450  * get_cpu_position_in_core(cpu)
1451  * return the position of the CPU among its HT siblings in the core
1452  * return -1 if the sibling is not in list
1453  */
1454 int get_cpu_position_in_core(int cpu)
1455 {
1456 	char path[64];
1457 	FILE *filep;
1458 	int this_cpu;
1459 	char character;
1460 	int i;
1461 
1462 	sprintf(path,
1463 		"/sys/devices/system/cpu/cpu%d/topology/thread_siblings_list",
1464 		cpu);
1465 	filep = fopen(path, "r");
1466 	if (filep == NULL) {
1467 		perror(path);
1468 		exit(1);
1469 	}
1470 
1471 	for (i = 0; i < topo.num_threads_per_core; i++) {
1472 		fscanf(filep, "%d", &this_cpu);
1473 		if (this_cpu == cpu) {
1474 			fclose(filep);
1475 			return i;
1476 		}
1477 
1478 		/* Account for no separator after last thread*/
1479 		if (i != (topo.num_threads_per_core - 1))
1480 			fscanf(filep, "%c", &character);
1481 	}
1482 
1483 	fclose(filep);
1484 	return -1;
1485 }
1486 
1487 /*
1488  * cpu_is_first_core_in_package(cpu)
1489  * return 1 if given CPU is 1st core in package
1490  */
1491 int cpu_is_first_core_in_package(int cpu)
1492 {
1493 	return cpu == parse_int_file("/sys/devices/system/cpu/cpu%d/topology/core_siblings_list", cpu);
1494 }
1495 
1496 int get_physical_package_id(int cpu)
1497 {
1498 	return parse_int_file("/sys/devices/system/cpu/cpu%d/topology/physical_package_id", cpu);
1499 }
1500 
1501 int get_core_id(int cpu)
1502 {
1503 	return parse_int_file("/sys/devices/system/cpu/cpu%d/topology/core_id", cpu);
1504 }
1505 
1506 int get_num_ht_siblings(int cpu)
1507 {
1508 	char path[80];
1509 	FILE *filep;
1510 	int sib1;
1511 	int matches = 0;
1512 	char character;
1513 	char str[100];
1514 	char *ch;
1515 
1516 	sprintf(path, "/sys/devices/system/cpu/cpu%d/topology/thread_siblings_list", cpu);
1517 	filep = fopen_or_die(path, "r");
1518 
1519 	/*
1520 	 * file format:
1521 	 * A ',' separated or '-' separated set of numbers
1522 	 * (eg 1-2 or 1,3,4,5)
1523 	 */
1524 	fscanf(filep, "%d%c\n", &sib1, &character);
1525 	fseek(filep, 0, SEEK_SET);
1526 	fgets(str, 100, filep);
1527 	ch = strchr(str, character);
1528 	while (ch != NULL) {
1529 		matches++;
1530 		ch = strchr(ch+1, character);
1531 	}
1532 
1533 	fclose(filep);
1534 	return matches+1;
1535 }
1536 
1537 /*
1538  * run func(thread, core, package) in topology order
1539  * skip non-present cpus
1540  */
1541 
1542 int for_all_cpus_2(int (func)(struct thread_data *, struct core_data *,
1543 	struct pkg_data *, struct thread_data *, struct core_data *,
1544 	struct pkg_data *), struct thread_data *thread_base,
1545 	struct core_data *core_base, struct pkg_data *pkg_base,
1546 	struct thread_data *thread_base2, struct core_data *core_base2,
1547 	struct pkg_data *pkg_base2)
1548 {
1549 	int retval, pkg_no, core_no, thread_no;
1550 
1551 	for (pkg_no = 0; pkg_no < topo.num_packages; ++pkg_no) {
1552 		for (core_no = 0; core_no < topo.num_cores_per_pkg; ++core_no) {
1553 			for (thread_no = 0; thread_no <
1554 				topo.num_threads_per_core; ++thread_no) {
1555 				struct thread_data *t, *t2;
1556 				struct core_data *c, *c2;
1557 				struct pkg_data *p, *p2;
1558 
1559 				t = GET_THREAD(thread_base, thread_no, core_no, pkg_no);
1560 
1561 				if (cpu_is_not_present(t->cpu_id))
1562 					continue;
1563 
1564 				t2 = GET_THREAD(thread_base2, thread_no, core_no, pkg_no);
1565 
1566 				c = GET_CORE(core_base, core_no, pkg_no);
1567 				c2 = GET_CORE(core_base2, core_no, pkg_no);
1568 
1569 				p = GET_PKG(pkg_base, pkg_no);
1570 				p2 = GET_PKG(pkg_base2, pkg_no);
1571 
1572 				retval = func(t, c, p, t2, c2, p2);
1573 				if (retval)
1574 					return retval;
1575 			}
1576 		}
1577 	}
1578 	return 0;
1579 }
1580 
1581 /*
1582  * run func(cpu) on every cpu in /proc/stat
1583  * return max_cpu number
1584  */
1585 int for_all_proc_cpus(int (func)(int))
1586 {
1587 	FILE *fp;
1588 	int cpu_num;
1589 	int retval;
1590 
1591 	fp = fopen_or_die(proc_stat, "r");
1592 
1593 	retval = fscanf(fp, "cpu %*d %*d %*d %*d %*d %*d %*d %*d %*d %*d\n");
1594 	if (retval != 0)
1595 		err(1, "%s: failed to parse format", proc_stat);
1596 
1597 	while (1) {
1598 		retval = fscanf(fp, "cpu%u %*d %*d %*d %*d %*d %*d %*d %*d %*d %*d\n", &cpu_num);
1599 		if (retval != 1)
1600 			break;
1601 
1602 		retval = func(cpu_num);
1603 		if (retval) {
1604 			fclose(fp);
1605 			return(retval);
1606 		}
1607 	}
1608 	fclose(fp);
1609 	return 0;
1610 }
1611 
1612 void re_initialize(void)
1613 {
1614 	free_all_buffers();
1615 	setup_all_buffers();
1616 	printf("turbostat: re-initialized with num_cpus %d\n", topo.num_cpus);
1617 }
1618 
1619 
1620 /*
1621  * count_cpus()
1622  * remember the last one seen, it will be the max
1623  */
1624 int count_cpus(int cpu)
1625 {
1626 	if (topo.max_cpu_num < cpu)
1627 		topo.max_cpu_num = cpu;
1628 
1629 	topo.num_cpus += 1;
1630 	return 0;
1631 }
1632 int mark_cpu_present(int cpu)
1633 {
1634 	CPU_SET_S(cpu, cpu_present_setsize, cpu_present_set);
1635 	return 0;
1636 }
1637 
1638 void turbostat_loop()
1639 {
1640 	int retval;
1641 	int restarted = 0;
1642 
1643 restart:
1644 	restarted++;
1645 
1646 	retval = for_all_cpus(get_counters, EVEN_COUNTERS);
1647 	if (retval < -1) {
1648 		exit(retval);
1649 	} else if (retval == -1) {
1650 		if (restarted > 1) {
1651 			exit(retval);
1652 		}
1653 		re_initialize();
1654 		goto restart;
1655 	}
1656 	restarted = 0;
1657 	gettimeofday(&tv_even, (struct timezone *)NULL);
1658 
1659 	while (1) {
1660 		if (for_all_proc_cpus(cpu_is_not_present)) {
1661 			re_initialize();
1662 			goto restart;
1663 		}
1664 		sleep(interval_sec);
1665 		retval = for_all_cpus(get_counters, ODD_COUNTERS);
1666 		if (retval < -1) {
1667 			exit(retval);
1668 		} else if (retval == -1) {
1669 			re_initialize();
1670 			goto restart;
1671 		}
1672 		gettimeofday(&tv_odd, (struct timezone *)NULL);
1673 		timersub(&tv_odd, &tv_even, &tv_delta);
1674 		for_all_cpus_2(delta_cpu, ODD_COUNTERS, EVEN_COUNTERS);
1675 		compute_average(EVEN_COUNTERS);
1676 		format_all_counters(EVEN_COUNTERS);
1677 		flush_stdout();
1678 		sleep(interval_sec);
1679 		retval = for_all_cpus(get_counters, EVEN_COUNTERS);
1680 		if (retval < -1) {
1681 			exit(retval);
1682 		} else if (retval == -1) {
1683 			re_initialize();
1684 			goto restart;
1685 		}
1686 		gettimeofday(&tv_even, (struct timezone *)NULL);
1687 		timersub(&tv_even, &tv_odd, &tv_delta);
1688 		for_all_cpus_2(delta_cpu, EVEN_COUNTERS, ODD_COUNTERS);
1689 		compute_average(ODD_COUNTERS);
1690 		format_all_counters(ODD_COUNTERS);
1691 		flush_stdout();
1692 	}
1693 }
1694 
1695 void check_dev_msr()
1696 {
1697 	struct stat sb;
1698 	char pathname[32];
1699 
1700 	sprintf(pathname, "/dev/cpu/%d/msr", base_cpu);
1701 	if (stat(pathname, &sb))
1702  		if (system("/sbin/modprobe msr > /dev/null 2>&1"))
1703 			err(-5, "no /dev/cpu/0/msr, Try \"# modprobe msr\" ");
1704 }
1705 
1706 void check_permissions()
1707 {
1708 	struct __user_cap_header_struct cap_header_data;
1709 	cap_user_header_t cap_header = &cap_header_data;
1710 	struct __user_cap_data_struct cap_data_data;
1711 	cap_user_data_t cap_data = &cap_data_data;
1712 	extern int capget(cap_user_header_t hdrp, cap_user_data_t datap);
1713 	int do_exit = 0;
1714 	char pathname[32];
1715 
1716 	/* check for CAP_SYS_RAWIO */
1717 	cap_header->pid = getpid();
1718 	cap_header->version = _LINUX_CAPABILITY_VERSION;
1719 	if (capget(cap_header, cap_data) < 0)
1720 		err(-6, "capget(2) failed");
1721 
1722 	if ((cap_data->effective & (1 << CAP_SYS_RAWIO)) == 0) {
1723 		do_exit++;
1724 		warnx("capget(CAP_SYS_RAWIO) failed,"
1725 			" try \"# setcap cap_sys_rawio=ep %s\"", progname);
1726 	}
1727 
1728 	/* test file permissions */
1729 	sprintf(pathname, "/dev/cpu/%d/msr", base_cpu);
1730 	if (euidaccess(pathname, R_OK)) {
1731 		do_exit++;
1732 		warn("/dev/cpu/0/msr open failed, try chown or chmod +r /dev/cpu/*/msr");
1733 	}
1734 
1735 	/* if all else fails, thell them to be root */
1736 	if (do_exit)
1737 		if (getuid() != 0)
1738 			warnx("... or simply run as root");
1739 
1740 	if (do_exit)
1741 		exit(-6);
1742 }
1743 
1744 /*
1745  * NHM adds support for additional MSRs:
1746  *
1747  * MSR_SMI_COUNT                   0x00000034
1748  *
1749  * MSR_NHM_PLATFORM_INFO           0x000000ce
1750  * MSR_NHM_SNB_PKG_CST_CFG_CTL     0x000000e2
1751  *
1752  * MSR_PKG_C3_RESIDENCY            0x000003f8
1753  * MSR_PKG_C6_RESIDENCY            0x000003f9
1754  * MSR_CORE_C3_RESIDENCY           0x000003fc
1755  * MSR_CORE_C6_RESIDENCY           0x000003fd
1756  *
1757  * Side effect:
1758  * sets global pkg_cstate_limit to decode MSR_NHM_SNB_PKG_CST_CFG_CTL
1759  */
1760 int probe_nhm_msrs(unsigned int family, unsigned int model)
1761 {
1762 	unsigned long long msr;
1763 	int *pkg_cstate_limits;
1764 
1765 	if (!genuine_intel)
1766 		return 0;
1767 
1768 	if (family != 6)
1769 		return 0;
1770 
1771 	switch (model) {
1772 	case 0x1A:	/* Core i7, Xeon 5500 series - Bloomfield, Gainstown NHM-EP */
1773 	case 0x1E:	/* Core i7 and i5 Processor - Clarksfield, Lynnfield, Jasper Forest */
1774 	case 0x1F:	/* Core i7 and i5 Processor - Nehalem */
1775 	case 0x25:	/* Westmere Client - Clarkdale, Arrandale */
1776 	case 0x2C:	/* Westmere EP - Gulftown */
1777 	case 0x2E:	/* Nehalem-EX Xeon - Beckton */
1778 	case 0x2F:	/* Westmere-EX Xeon - Eagleton */
1779 		pkg_cstate_limits = nhm_pkg_cstate_limits;
1780 		break;
1781 	case 0x2A:	/* SNB */
1782 	case 0x2D:	/* SNB Xeon */
1783 	case 0x3A:	/* IVB */
1784 	case 0x3E:	/* IVB Xeon */
1785 		pkg_cstate_limits = snb_pkg_cstate_limits;
1786 		break;
1787 	case 0x3C:	/* HSW */
1788 	case 0x3F:	/* HSX */
1789 	case 0x45:	/* HSW */
1790 	case 0x46:	/* HSW */
1791 	case 0x3D:	/* BDW */
1792 	case 0x47:	/* BDW */
1793 	case 0x4F:	/* BDX */
1794 	case 0x56:	/* BDX-DE */
1795 	case 0x4E:	/* SKL */
1796 	case 0x5E:	/* SKL */
1797 		pkg_cstate_limits = hsw_pkg_cstate_limits;
1798 		break;
1799 	case 0x37:	/* BYT */
1800 	case 0x4D:	/* AVN */
1801 		pkg_cstate_limits = slv_pkg_cstate_limits;
1802 		break;
1803 	case 0x4C:	/* AMT */
1804 		pkg_cstate_limits = amt_pkg_cstate_limits;
1805 		break;
1806 	case 0x57:	/* PHI */
1807 		pkg_cstate_limits = phi_pkg_cstate_limits;
1808 		break;
1809 	default:
1810 		return 0;
1811 	}
1812 	get_msr(base_cpu, MSR_NHM_SNB_PKG_CST_CFG_CTL, &msr);
1813 
1814 	pkg_cstate_limit = pkg_cstate_limits[msr & 0xF];
1815 
1816 	return 1;
1817 }
1818 int has_nhm_turbo_ratio_limit(unsigned int family, unsigned int model)
1819 {
1820 	switch (model) {
1821 	/* Nehalem compatible, but do not include turbo-ratio limit support */
1822 	case 0x2E:	/* Nehalem-EX Xeon - Beckton */
1823 	case 0x2F:	/* Westmere-EX Xeon - Eagleton */
1824 		return 0;
1825 	default:
1826 		return 1;
1827 	}
1828 }
1829 int has_ivt_turbo_ratio_limit(unsigned int family, unsigned int model)
1830 {
1831 	if (!genuine_intel)
1832 		return 0;
1833 
1834 	if (family != 6)
1835 		return 0;
1836 
1837 	switch (model) {
1838 	case 0x3E:	/* IVB Xeon */
1839 	case 0x3F:	/* HSW Xeon */
1840 		return 1;
1841 	default:
1842 		return 0;
1843 	}
1844 }
1845 int has_hsw_turbo_ratio_limit(unsigned int family, unsigned int model)
1846 {
1847 	if (!genuine_intel)
1848 		return 0;
1849 
1850 	if (family != 6)
1851 		return 0;
1852 
1853 	switch (model) {
1854 	case 0x3F:	/* HSW Xeon */
1855 		return 1;
1856 	default:
1857 		return 0;
1858 	}
1859 }
1860 
1861 int has_knl_turbo_ratio_limit(unsigned int family, unsigned int model)
1862 {
1863 	if (!genuine_intel)
1864 		return 0;
1865 
1866 	if (family != 6)
1867 		return 0;
1868 
1869 	switch (model) {
1870 	case 0x57:	/* Knights Landing */
1871 		return 1;
1872 	default:
1873 		return 0;
1874 	}
1875 }
1876 static void
1877 dump_cstate_pstate_config_info(family, model)
1878 {
1879 	if (!do_nhm_platform_info)
1880 		return;
1881 
1882 	dump_nhm_platform_info();
1883 
1884 	if (has_hsw_turbo_ratio_limit(family, model))
1885 		dump_hsw_turbo_ratio_limits();
1886 
1887 	if (has_ivt_turbo_ratio_limit(family, model))
1888 		dump_ivt_turbo_ratio_limits();
1889 
1890 	if (has_nhm_turbo_ratio_limit(family, model))
1891 		dump_nhm_turbo_ratio_limits();
1892 
1893 	if (has_knl_turbo_ratio_limit(family, model))
1894 		dump_knl_turbo_ratio_limits();
1895 
1896 	dump_nhm_cst_cfg();
1897 }
1898 
1899 
1900 /*
1901  * print_epb()
1902  * Decode the ENERGY_PERF_BIAS MSR
1903  */
1904 int print_epb(struct thread_data *t, struct core_data *c, struct pkg_data *p)
1905 {
1906 	unsigned long long msr;
1907 	char *epb_string;
1908 	int cpu;
1909 
1910 	if (!has_epb)
1911 		return 0;
1912 
1913 	cpu = t->cpu_id;
1914 
1915 	/* EPB is per-package */
1916 	if (!(t->flags & CPU_IS_FIRST_THREAD_IN_CORE) || !(t->flags & CPU_IS_FIRST_CORE_IN_PACKAGE))
1917 		return 0;
1918 
1919 	if (cpu_migrate(cpu)) {
1920 		fprintf(stderr, "Could not migrate to CPU %d\n", cpu);
1921 		return -1;
1922 	}
1923 
1924 	if (get_msr(cpu, MSR_IA32_ENERGY_PERF_BIAS, &msr))
1925 		return 0;
1926 
1927 	switch (msr & 0xF) {
1928 	case ENERGY_PERF_BIAS_PERFORMANCE:
1929 		epb_string = "performance";
1930 		break;
1931 	case ENERGY_PERF_BIAS_NORMAL:
1932 		epb_string = "balanced";
1933 		break;
1934 	case ENERGY_PERF_BIAS_POWERSAVE:
1935 		epb_string = "powersave";
1936 		break;
1937 	default:
1938 		epb_string = "custom";
1939 		break;
1940 	}
1941 	fprintf(stderr, "cpu%d: MSR_IA32_ENERGY_PERF_BIAS: 0x%08llx (%s)\n", cpu, msr, epb_string);
1942 
1943 	return 0;
1944 }
1945 
1946 /*
1947  * print_perf_limit()
1948  */
1949 int print_perf_limit(struct thread_data *t, struct core_data *c, struct pkg_data *p)
1950 {
1951 	unsigned long long msr;
1952 	int cpu;
1953 
1954 	cpu = t->cpu_id;
1955 
1956 	/* per-package */
1957 	if (!(t->flags & CPU_IS_FIRST_THREAD_IN_CORE) || !(t->flags & CPU_IS_FIRST_CORE_IN_PACKAGE))
1958 		return 0;
1959 
1960 	if (cpu_migrate(cpu)) {
1961 		fprintf(stderr, "Could not migrate to CPU %d\n", cpu);
1962 		return -1;
1963 	}
1964 
1965 	if (do_core_perf_limit_reasons) {
1966 		get_msr(cpu, MSR_CORE_PERF_LIMIT_REASONS, &msr);
1967 		fprintf(stderr, "cpu%d: MSR_CORE_PERF_LIMIT_REASONS, 0x%08llx", cpu, msr);
1968 		fprintf(stderr, " (Active: %s%s%s%s%s%s%s%s%s%s%s%s%s%s)",
1969 			(msr & 1 << 15) ? "bit15, " : "",
1970 			(msr & 1 << 14) ? "bit14, " : "",
1971 			(msr & 1 << 13) ? "Transitions, " : "",
1972 			(msr & 1 << 12) ? "MultiCoreTurbo, " : "",
1973 			(msr & 1 << 11) ? "PkgPwrL2, " : "",
1974 			(msr & 1 << 10) ? "PkgPwrL1, " : "",
1975 			(msr & 1 << 9) ? "CorePwr, " : "",
1976 			(msr & 1 << 8) ? "Amps, " : "",
1977 			(msr & 1 << 6) ? "VR-Therm, " : "",
1978 			(msr & 1 << 5) ? "Auto-HWP, " : "",
1979 			(msr & 1 << 4) ? "Graphics, " : "",
1980 			(msr & 1 << 2) ? "bit2, " : "",
1981 			(msr & 1 << 1) ? "ThermStatus, " : "",
1982 			(msr & 1 << 0) ? "PROCHOT, " : "");
1983 		fprintf(stderr, " (Logged: %s%s%s%s%s%s%s%s%s%s%s%s%s%s)\n",
1984 			(msr & 1 << 31) ? "bit31, " : "",
1985 			(msr & 1 << 30) ? "bit30, " : "",
1986 			(msr & 1 << 29) ? "Transitions, " : "",
1987 			(msr & 1 << 28) ? "MultiCoreTurbo, " : "",
1988 			(msr & 1 << 27) ? "PkgPwrL2, " : "",
1989 			(msr & 1 << 26) ? "PkgPwrL1, " : "",
1990 			(msr & 1 << 25) ? "CorePwr, " : "",
1991 			(msr & 1 << 24) ? "Amps, " : "",
1992 			(msr & 1 << 22) ? "VR-Therm, " : "",
1993 			(msr & 1 << 21) ? "Auto-HWP, " : "",
1994 			(msr & 1 << 20) ? "Graphics, " : "",
1995 			(msr & 1 << 18) ? "bit18, " : "",
1996 			(msr & 1 << 17) ? "ThermStatus, " : "",
1997 			(msr & 1 << 16) ? "PROCHOT, " : "");
1998 
1999 	}
2000 	if (do_gfx_perf_limit_reasons) {
2001 		get_msr(cpu, MSR_GFX_PERF_LIMIT_REASONS, &msr);
2002 		fprintf(stderr, "cpu%d: MSR_GFX_PERF_LIMIT_REASONS, 0x%08llx", cpu, msr);
2003 		fprintf(stderr, " (Active: %s%s%s%s%s%s%s%s)",
2004 			(msr & 1 << 0) ? "PROCHOT, " : "",
2005 			(msr & 1 << 1) ? "ThermStatus, " : "",
2006 			(msr & 1 << 4) ? "Graphics, " : "",
2007 			(msr & 1 << 6) ? "VR-Therm, " : "",
2008 			(msr & 1 << 8) ? "Amps, " : "",
2009 			(msr & 1 << 9) ? "GFXPwr, " : "",
2010 			(msr & 1 << 10) ? "PkgPwrL1, " : "",
2011 			(msr & 1 << 11) ? "PkgPwrL2, " : "");
2012 		fprintf(stderr, " (Logged: %s%s%s%s%s%s%s%s)\n",
2013 			(msr & 1 << 16) ? "PROCHOT, " : "",
2014 			(msr & 1 << 17) ? "ThermStatus, " : "",
2015 			(msr & 1 << 20) ? "Graphics, " : "",
2016 			(msr & 1 << 22) ? "VR-Therm, " : "",
2017 			(msr & 1 << 24) ? "Amps, " : "",
2018 			(msr & 1 << 25) ? "GFXPwr, " : "",
2019 			(msr & 1 << 26) ? "PkgPwrL1, " : "",
2020 			(msr & 1 << 27) ? "PkgPwrL2, " : "");
2021 	}
2022 	if (do_ring_perf_limit_reasons) {
2023 		get_msr(cpu, MSR_RING_PERF_LIMIT_REASONS, &msr);
2024 		fprintf(stderr, "cpu%d: MSR_RING_PERF_LIMIT_REASONS, 0x%08llx", cpu, msr);
2025 		fprintf(stderr, " (Active: %s%s%s%s%s%s)",
2026 			(msr & 1 << 0) ? "PROCHOT, " : "",
2027 			(msr & 1 << 1) ? "ThermStatus, " : "",
2028 			(msr & 1 << 6) ? "VR-Therm, " : "",
2029 			(msr & 1 << 8) ? "Amps, " : "",
2030 			(msr & 1 << 10) ? "PkgPwrL1, " : "",
2031 			(msr & 1 << 11) ? "PkgPwrL2, " : "");
2032 		fprintf(stderr, " (Logged: %s%s%s%s%s%s)\n",
2033 			(msr & 1 << 16) ? "PROCHOT, " : "",
2034 			(msr & 1 << 17) ? "ThermStatus, " : "",
2035 			(msr & 1 << 22) ? "VR-Therm, " : "",
2036 			(msr & 1 << 24) ? "Amps, " : "",
2037 			(msr & 1 << 26) ? "PkgPwrL1, " : "",
2038 			(msr & 1 << 27) ? "PkgPwrL2, " : "");
2039 	}
2040 	return 0;
2041 }
2042 
2043 #define	RAPL_POWER_GRANULARITY	0x7FFF	/* 15 bit power granularity */
2044 #define	RAPL_TIME_GRANULARITY	0x3F /* 6 bit time granularity */
2045 
2046 double get_tdp(model)
2047 {
2048 	unsigned long long msr;
2049 
2050 	if (do_rapl & RAPL_PKG_POWER_INFO)
2051 		if (!get_msr(base_cpu, MSR_PKG_POWER_INFO, &msr))
2052 			return ((msr >> 0) & RAPL_POWER_GRANULARITY) * rapl_power_units;
2053 
2054 	switch (model) {
2055 	case 0x37:
2056 	case 0x4D:
2057 		return 30.0;
2058 	default:
2059 		return 135.0;
2060 	}
2061 }
2062 
2063 /*
2064  * rapl_dram_energy_units_probe()
2065  * Energy units are either hard-coded, or come from RAPL Energy Unit MSR.
2066  */
2067 static double
2068 rapl_dram_energy_units_probe(int  model, double rapl_energy_units)
2069 {
2070 	/* only called for genuine_intel, family 6 */
2071 
2072 	switch (model) {
2073 	case 0x3F:	/* HSX */
2074 	case 0x4F:	/* BDX */
2075 	case 0x56:	/* BDX-DE */
2076 	case 0x57:	/* KNL */
2077 		return (rapl_dram_energy_units = 15.3 / 1000000);
2078 	default:
2079 		return (rapl_energy_units);
2080 	}
2081 }
2082 
2083 
2084 /*
2085  * rapl_probe()
2086  *
2087  * sets do_rapl, rapl_power_units, rapl_energy_units, rapl_time_units
2088  */
2089 void rapl_probe(unsigned int family, unsigned int model)
2090 {
2091 	unsigned long long msr;
2092 	unsigned int time_unit;
2093 	double tdp;
2094 
2095 	if (!genuine_intel)
2096 		return;
2097 
2098 	if (family != 6)
2099 		return;
2100 
2101 	switch (model) {
2102 	case 0x2A:
2103 	case 0x3A:
2104 	case 0x3C:	/* HSW */
2105 	case 0x45:	/* HSW */
2106 	case 0x46:	/* HSW */
2107 	case 0x3D:	/* BDW */
2108 	case 0x47:	/* BDW */
2109 		do_rapl = RAPL_PKG | RAPL_CORES | RAPL_CORE_POLICY | RAPL_GFX | RAPL_PKG_POWER_INFO;
2110 		break;
2111 	case 0x4E:	/* SKL */
2112 	case 0x5E:	/* SKL */
2113 		do_rapl = RAPL_PKG | RAPL_DRAM | RAPL_DRAM_PERF_STATUS | RAPL_PKG_PERF_STATUS | RAPL_PKG_POWER_INFO;
2114 		break;
2115 	case 0x3F:	/* HSX */
2116 	case 0x4F:	/* BDX */
2117 	case 0x56:	/* BDX-DE */
2118 	case 0x57:	/* KNL */
2119 		do_rapl = RAPL_PKG | RAPL_DRAM | RAPL_DRAM_POWER_INFO | RAPL_DRAM_PERF_STATUS | RAPL_PKG_PERF_STATUS | RAPL_PKG_POWER_INFO;
2120 		break;
2121 	case 0x2D:
2122 	case 0x3E:
2123 		do_rapl = RAPL_PKG | RAPL_CORES | RAPL_CORE_POLICY | RAPL_DRAM | RAPL_DRAM_POWER_INFO | RAPL_PKG_PERF_STATUS | RAPL_DRAM_PERF_STATUS | RAPL_PKG_POWER_INFO;
2124 		break;
2125 	case 0x37:	/* BYT */
2126 	case 0x4D:	/* AVN */
2127 		do_rapl = RAPL_PKG | RAPL_CORES ;
2128 		break;
2129 	default:
2130 		return;
2131 	}
2132 
2133 	/* units on package 0, verify later other packages match */
2134 	if (get_msr(base_cpu, MSR_RAPL_POWER_UNIT, &msr))
2135 		return;
2136 
2137 	rapl_power_units = 1.0 / (1 << (msr & 0xF));
2138 	if (model == 0x37)
2139 		rapl_energy_units = 1.0 * (1 << (msr >> 8 & 0x1F)) / 1000000;
2140 	else
2141 		rapl_energy_units = 1.0 / (1 << (msr >> 8 & 0x1F));
2142 
2143 	rapl_dram_energy_units = rapl_dram_energy_units_probe(model, rapl_energy_units);
2144 
2145 	time_unit = msr >> 16 & 0xF;
2146 	if (time_unit == 0)
2147 		time_unit = 0xA;
2148 
2149 	rapl_time_units = 1.0 / (1 << (time_unit));
2150 
2151 	tdp = get_tdp(model);
2152 
2153 	rapl_joule_counter_range = 0xFFFFFFFF * rapl_energy_units / tdp;
2154 	if (debug)
2155 		fprintf(stderr, "RAPL: %.0f sec. Joule Counter Range, at %.0f Watts\n", rapl_joule_counter_range, tdp);
2156 
2157 	return;
2158 }
2159 
2160 void perf_limit_reasons_probe(family, model)
2161 {
2162 	if (!genuine_intel)
2163 		return;
2164 
2165 	if (family != 6)
2166 		return;
2167 
2168 	switch (model) {
2169 	case 0x3C:	/* HSW */
2170 	case 0x45:	/* HSW */
2171 	case 0x46:	/* HSW */
2172 		do_gfx_perf_limit_reasons = 1;
2173 	case 0x3F:	/* HSX */
2174 		do_core_perf_limit_reasons = 1;
2175 		do_ring_perf_limit_reasons = 1;
2176 	default:
2177 		return;
2178 	}
2179 }
2180 
2181 int print_thermal(struct thread_data *t, struct core_data *c, struct pkg_data *p)
2182 {
2183 	unsigned long long msr;
2184 	unsigned int dts;
2185 	int cpu;
2186 
2187 	if (!(do_dts || do_ptm))
2188 		return 0;
2189 
2190 	cpu = t->cpu_id;
2191 
2192 	/* DTS is per-core, no need to print for each thread */
2193 	if (!(t->flags & CPU_IS_FIRST_THREAD_IN_CORE))
2194 		return 0;
2195 
2196 	if (cpu_migrate(cpu)) {
2197 		fprintf(stderr, "Could not migrate to CPU %d\n", cpu);
2198 		return -1;
2199 	}
2200 
2201 	if (do_ptm && (t->flags & CPU_IS_FIRST_CORE_IN_PACKAGE)) {
2202 		if (get_msr(cpu, MSR_IA32_PACKAGE_THERM_STATUS, &msr))
2203 			return 0;
2204 
2205 		dts = (msr >> 16) & 0x7F;
2206 		fprintf(stderr, "cpu%d: MSR_IA32_PACKAGE_THERM_STATUS: 0x%08llx (%d C)\n",
2207 			cpu, msr, tcc_activation_temp - dts);
2208 
2209 #ifdef	THERM_DEBUG
2210 		if (get_msr(cpu, MSR_IA32_PACKAGE_THERM_INTERRUPT, &msr))
2211 			return 0;
2212 
2213 		dts = (msr >> 16) & 0x7F;
2214 		dts2 = (msr >> 8) & 0x7F;
2215 		fprintf(stderr, "cpu%d: MSR_IA32_PACKAGE_THERM_INTERRUPT: 0x%08llx (%d C, %d C)\n",
2216 			cpu, msr, tcc_activation_temp - dts, tcc_activation_temp - dts2);
2217 #endif
2218 	}
2219 
2220 
2221 	if (do_dts) {
2222 		unsigned int resolution;
2223 
2224 		if (get_msr(cpu, MSR_IA32_THERM_STATUS, &msr))
2225 			return 0;
2226 
2227 		dts = (msr >> 16) & 0x7F;
2228 		resolution = (msr >> 27) & 0xF;
2229 		fprintf(stderr, "cpu%d: MSR_IA32_THERM_STATUS: 0x%08llx (%d C +/- %d)\n",
2230 			cpu, msr, tcc_activation_temp - dts, resolution);
2231 
2232 #ifdef THERM_DEBUG
2233 		if (get_msr(cpu, MSR_IA32_THERM_INTERRUPT, &msr))
2234 			return 0;
2235 
2236 		dts = (msr >> 16) & 0x7F;
2237 		dts2 = (msr >> 8) & 0x7F;
2238 		fprintf(stderr, "cpu%d: MSR_IA32_THERM_INTERRUPT: 0x%08llx (%d C, %d C)\n",
2239 			cpu, msr, tcc_activation_temp - dts, tcc_activation_temp - dts2);
2240 #endif
2241 	}
2242 
2243 	return 0;
2244 }
2245 
2246 void print_power_limit_msr(int cpu, unsigned long long msr, char *label)
2247 {
2248 	fprintf(stderr, "cpu%d: %s: %sabled (%f Watts, %f sec, clamp %sabled)\n",
2249 		cpu, label,
2250 		((msr >> 15) & 1) ? "EN" : "DIS",
2251 		((msr >> 0) & 0x7FFF) * rapl_power_units,
2252 		(1.0 + (((msr >> 22) & 0x3)/4.0)) * (1 << ((msr >> 17) & 0x1F)) * rapl_time_units,
2253 		(((msr >> 16) & 1) ? "EN" : "DIS"));
2254 
2255 	return;
2256 }
2257 
2258 int print_rapl(struct thread_data *t, struct core_data *c, struct pkg_data *p)
2259 {
2260 	unsigned long long msr;
2261 	int cpu;
2262 
2263 	if (!do_rapl)
2264 		return 0;
2265 
2266 	/* RAPL counters are per package, so print only for 1st thread/package */
2267 	if (!(t->flags & CPU_IS_FIRST_THREAD_IN_CORE) || !(t->flags & CPU_IS_FIRST_CORE_IN_PACKAGE))
2268 		return 0;
2269 
2270 	cpu = t->cpu_id;
2271 	if (cpu_migrate(cpu)) {
2272 		fprintf(stderr, "Could not migrate to CPU %d\n", cpu);
2273 		return -1;
2274 	}
2275 
2276 	if (get_msr(cpu, MSR_RAPL_POWER_UNIT, &msr))
2277 		return -1;
2278 
2279 	if (debug) {
2280 		fprintf(stderr, "cpu%d: MSR_RAPL_POWER_UNIT: 0x%08llx "
2281 			"(%f Watts, %f Joules, %f sec.)\n", cpu, msr,
2282 			rapl_power_units, rapl_energy_units, rapl_time_units);
2283 	}
2284 	if (do_rapl & RAPL_PKG_POWER_INFO) {
2285 
2286 		if (get_msr(cpu, MSR_PKG_POWER_INFO, &msr))
2287                 	return -5;
2288 
2289 
2290 		fprintf(stderr, "cpu%d: MSR_PKG_POWER_INFO: 0x%08llx (%.0f W TDP, RAPL %.0f - %.0f W, %f sec.)\n",
2291 			cpu, msr,
2292 			((msr >>  0) & RAPL_POWER_GRANULARITY) * rapl_power_units,
2293 			((msr >> 16) & RAPL_POWER_GRANULARITY) * rapl_power_units,
2294 			((msr >> 32) & RAPL_POWER_GRANULARITY) * rapl_power_units,
2295 			((msr >> 48) & RAPL_TIME_GRANULARITY) * rapl_time_units);
2296 
2297 	}
2298 	if (do_rapl & RAPL_PKG) {
2299 
2300 		if (get_msr(cpu, MSR_PKG_POWER_LIMIT, &msr))
2301 			return -9;
2302 
2303 		fprintf(stderr, "cpu%d: MSR_PKG_POWER_LIMIT: 0x%08llx (%slocked)\n",
2304 			cpu, msr, (msr >> 63) & 1 ? "": "UN");
2305 
2306 		print_power_limit_msr(cpu, msr, "PKG Limit #1");
2307 		fprintf(stderr, "cpu%d: PKG Limit #2: %sabled (%f Watts, %f* sec, clamp %sabled)\n",
2308 			cpu,
2309 			((msr >> 47) & 1) ? "EN" : "DIS",
2310 			((msr >> 32) & 0x7FFF) * rapl_power_units,
2311 			(1.0 + (((msr >> 54) & 0x3)/4.0)) * (1 << ((msr >> 49) & 0x1F)) * rapl_time_units,
2312 			((msr >> 48) & 1) ? "EN" : "DIS");
2313 	}
2314 
2315 	if (do_rapl & RAPL_DRAM_POWER_INFO) {
2316 		if (get_msr(cpu, MSR_DRAM_POWER_INFO, &msr))
2317                 	return -6;
2318 
2319 		fprintf(stderr, "cpu%d: MSR_DRAM_POWER_INFO,: 0x%08llx (%.0f W TDP, RAPL %.0f - %.0f W, %f sec.)\n",
2320 			cpu, msr,
2321 			((msr >>  0) & RAPL_POWER_GRANULARITY) * rapl_power_units,
2322 			((msr >> 16) & RAPL_POWER_GRANULARITY) * rapl_power_units,
2323 			((msr >> 32) & RAPL_POWER_GRANULARITY) * rapl_power_units,
2324 			((msr >> 48) & RAPL_TIME_GRANULARITY) * rapl_time_units);
2325 	}
2326 	if (do_rapl & RAPL_DRAM) {
2327 		if (get_msr(cpu, MSR_DRAM_POWER_LIMIT, &msr))
2328 			return -9;
2329 		fprintf(stderr, "cpu%d: MSR_DRAM_POWER_LIMIT: 0x%08llx (%slocked)\n",
2330 				cpu, msr, (msr >> 31) & 1 ? "": "UN");
2331 
2332 		print_power_limit_msr(cpu, msr, "DRAM Limit");
2333 	}
2334 	if (do_rapl & RAPL_CORE_POLICY) {
2335 		if (debug) {
2336 			if (get_msr(cpu, MSR_PP0_POLICY, &msr))
2337 				return -7;
2338 
2339 			fprintf(stderr, "cpu%d: MSR_PP0_POLICY: %lld\n", cpu, msr & 0xF);
2340 		}
2341 	}
2342 	if (do_rapl & RAPL_CORES) {
2343 		if (debug) {
2344 
2345 			if (get_msr(cpu, MSR_PP0_POWER_LIMIT, &msr))
2346 				return -9;
2347 			fprintf(stderr, "cpu%d: MSR_PP0_POWER_LIMIT: 0x%08llx (%slocked)\n",
2348 					cpu, msr, (msr >> 31) & 1 ? "": "UN");
2349 			print_power_limit_msr(cpu, msr, "Cores Limit");
2350 		}
2351 	}
2352 	if (do_rapl & RAPL_GFX) {
2353 		if (debug) {
2354 			if (get_msr(cpu, MSR_PP1_POLICY, &msr))
2355 				return -8;
2356 
2357 			fprintf(stderr, "cpu%d: MSR_PP1_POLICY: %lld\n", cpu, msr & 0xF);
2358 
2359 			if (get_msr(cpu, MSR_PP1_POWER_LIMIT, &msr))
2360 				return -9;
2361 			fprintf(stderr, "cpu%d: MSR_PP1_POWER_LIMIT: 0x%08llx (%slocked)\n",
2362 					cpu, msr, (msr >> 31) & 1 ? "": "UN");
2363 			print_power_limit_msr(cpu, msr, "GFX Limit");
2364 		}
2365 	}
2366 	return 0;
2367 }
2368 
2369 /*
2370  * SNB adds support for additional MSRs:
2371  *
2372  * MSR_PKG_C7_RESIDENCY            0x000003fa
2373  * MSR_CORE_C7_RESIDENCY           0x000003fe
2374  * MSR_PKG_C2_RESIDENCY            0x0000060d
2375  */
2376 
2377 int has_snb_msrs(unsigned int family, unsigned int model)
2378 {
2379 	if (!genuine_intel)
2380 		return 0;
2381 
2382 	switch (model) {
2383 	case 0x2A:
2384 	case 0x2D:
2385 	case 0x3A:	/* IVB */
2386 	case 0x3E:	/* IVB Xeon */
2387 	case 0x3C:	/* HSW */
2388 	case 0x3F:	/* HSW */
2389 	case 0x45:	/* HSW */
2390 	case 0x46:	/* HSW */
2391 	case 0x3D:	/* BDW */
2392 	case 0x47:	/* BDW */
2393 	case 0x4F:	/* BDX */
2394 	case 0x56:	/* BDX-DE */
2395 	case 0x4E:	/* SKL */
2396 	case 0x5E:	/* SKL */
2397 		return 1;
2398 	}
2399 	return 0;
2400 }
2401 
2402 /*
2403  * HSW adds support for additional MSRs:
2404  *
2405  * MSR_PKG_C8_RESIDENCY            0x00000630
2406  * MSR_PKG_C9_RESIDENCY            0x00000631
2407  * MSR_PKG_C10_RESIDENCY           0x00000632
2408  */
2409 int has_hsw_msrs(unsigned int family, unsigned int model)
2410 {
2411 	if (!genuine_intel)
2412 		return 0;
2413 
2414 	switch (model) {
2415 	case 0x45:	/* HSW */
2416 	case 0x3D:	/* BDW */
2417 	case 0x4E:	/* SKL */
2418 	case 0x5E:	/* SKL */
2419 		return 1;
2420 	}
2421 	return 0;
2422 }
2423 
2424 /*
2425  * SKL adds support for additional MSRS:
2426  *
2427  * MSR_PKG_WEIGHTED_CORE_C0_RES    0x00000658
2428  * MSR_PKG_ANY_CORE_C0_RES         0x00000659
2429  * MSR_PKG_ANY_GFXE_C0_RES         0x0000065A
2430  * MSR_PKG_BOTH_CORE_GFXE_C0_RES   0x0000065B
2431  */
2432 int has_skl_msrs(unsigned int family, unsigned int model)
2433 {
2434 	if (!genuine_intel)
2435 		return 0;
2436 
2437 	switch (model) {
2438 	case 0x4E:	/* SKL */
2439 	case 0x5E:	/* SKL */
2440 		return 1;
2441 	}
2442 	return 0;
2443 }
2444 
2445 
2446 
2447 int is_slm(unsigned int family, unsigned int model)
2448 {
2449 	if (!genuine_intel)
2450 		return 0;
2451 	switch (model) {
2452 	case 0x37:	/* BYT */
2453 	case 0x4D:	/* AVN */
2454 		return 1;
2455 	}
2456 	return 0;
2457 }
2458 
2459 int is_knl(unsigned int family, unsigned int model)
2460 {
2461 	if (!genuine_intel)
2462 		return 0;
2463 	switch (model) {
2464 	case 0x57:	/* KNL */
2465 		return 1;
2466 	}
2467 	return 0;
2468 }
2469 
2470 #define SLM_BCLK_FREQS 5
2471 double slm_freq_table[SLM_BCLK_FREQS] = { 83.3, 100.0, 133.3, 116.7, 80.0};
2472 
2473 double slm_bclk(void)
2474 {
2475 	unsigned long long msr = 3;
2476 	unsigned int i;
2477 	double freq;
2478 
2479 	if (get_msr(base_cpu, MSR_FSB_FREQ, &msr))
2480 		fprintf(stderr, "SLM BCLK: unknown\n");
2481 
2482 	i = msr & 0xf;
2483 	if (i >= SLM_BCLK_FREQS) {
2484 		fprintf(stderr, "SLM BCLK[%d] invalid\n", i);
2485 		msr = 3;
2486 	}
2487 	freq = slm_freq_table[i];
2488 
2489 	fprintf(stderr, "SLM BCLK: %.1f Mhz\n", freq);
2490 
2491 	return freq;
2492 }
2493 
2494 double discover_bclk(unsigned int family, unsigned int model)
2495 {
2496 	if (has_snb_msrs(family, model))
2497 		return 100.00;
2498 	else if (is_slm(family, model))
2499 		return slm_bclk();
2500 	else
2501 		return 133.33;
2502 }
2503 
2504 /*
2505  * MSR_IA32_TEMPERATURE_TARGET indicates the temperature where
2506  * the Thermal Control Circuit (TCC) activates.
2507  * This is usually equal to tjMax.
2508  *
2509  * Older processors do not have this MSR, so there we guess,
2510  * but also allow cmdline over-ride with -T.
2511  *
2512  * Several MSR temperature values are in units of degrees-C
2513  * below this value, including the Digital Thermal Sensor (DTS),
2514  * Package Thermal Management Sensor (PTM), and thermal event thresholds.
2515  */
2516 int set_temperature_target(struct thread_data *t, struct core_data *c, struct pkg_data *p)
2517 {
2518 	unsigned long long msr;
2519 	unsigned int target_c_local;
2520 	int cpu;
2521 
2522 	/* tcc_activation_temp is used only for dts or ptm */
2523 	if (!(do_dts || do_ptm))
2524 		return 0;
2525 
2526 	/* this is a per-package concept */
2527 	if (!(t->flags & CPU_IS_FIRST_THREAD_IN_CORE) || !(t->flags & CPU_IS_FIRST_CORE_IN_PACKAGE))
2528 		return 0;
2529 
2530 	cpu = t->cpu_id;
2531 	if (cpu_migrate(cpu)) {
2532 		fprintf(stderr, "Could not migrate to CPU %d\n", cpu);
2533 		return -1;
2534 	}
2535 
2536 	if (tcc_activation_temp_override != 0) {
2537 		tcc_activation_temp = tcc_activation_temp_override;
2538 		fprintf(stderr, "cpu%d: Using cmdline TCC Target (%d C)\n",
2539 			cpu, tcc_activation_temp);
2540 		return 0;
2541 	}
2542 
2543 	/* Temperature Target MSR is Nehalem and newer only */
2544 	if (!do_nhm_platform_info)
2545 		goto guess;
2546 
2547 	if (get_msr(base_cpu, MSR_IA32_TEMPERATURE_TARGET, &msr))
2548 		goto guess;
2549 
2550 	target_c_local = (msr >> 16) & 0xFF;
2551 
2552 	if (debug)
2553 		fprintf(stderr, "cpu%d: MSR_IA32_TEMPERATURE_TARGET: 0x%08llx (%d C)\n",
2554 			cpu, msr, target_c_local);
2555 
2556 	if (!target_c_local)
2557 		goto guess;
2558 
2559 	tcc_activation_temp = target_c_local;
2560 
2561 	return 0;
2562 
2563 guess:
2564 	tcc_activation_temp = TJMAX_DEFAULT;
2565 	fprintf(stderr, "cpu%d: Guessing tjMax %d C, Please use -T to specify\n",
2566 		cpu, tcc_activation_temp);
2567 
2568 	return 0;
2569 }
2570 void process_cpuid()
2571 {
2572 	unsigned int eax, ebx, ecx, edx, max_level;
2573 	unsigned int fms, family, model, stepping;
2574 
2575 	eax = ebx = ecx = edx = 0;
2576 
2577 	__get_cpuid(0, &max_level, &ebx, &ecx, &edx);
2578 
2579 	if (ebx == 0x756e6547 && edx == 0x49656e69 && ecx == 0x6c65746e)
2580 		genuine_intel = 1;
2581 
2582 	if (debug)
2583 		fprintf(stderr, "CPUID(0): %.4s%.4s%.4s ",
2584 			(char *)&ebx, (char *)&edx, (char *)&ecx);
2585 
2586 	__get_cpuid(1, &fms, &ebx, &ecx, &edx);
2587 	family = (fms >> 8) & 0xf;
2588 	model = (fms >> 4) & 0xf;
2589 	stepping = fms & 0xf;
2590 	if (family == 6 || family == 0xf)
2591 		model += ((fms >> 16) & 0xf) << 4;
2592 
2593 	if (debug)
2594 		fprintf(stderr, "%d CPUID levels; family:model:stepping 0x%x:%x:%x (%d:%d:%d)\n",
2595 			max_level, family, model, stepping, family, model, stepping);
2596 
2597 	if (!(edx & (1 << 5)))
2598 		errx(1, "CPUID: no MSR");
2599 
2600 	/*
2601 	 * check max extended function levels of CPUID.
2602 	 * This is needed to check for invariant TSC.
2603 	 * This check is valid for both Intel and AMD.
2604 	 */
2605 	ebx = ecx = edx = 0;
2606 	__get_cpuid(0x80000000, &max_level, &ebx, &ecx, &edx);
2607 
2608 	if (max_level >= 0x80000007) {
2609 
2610 		/*
2611 		 * Non-Stop TSC is advertised by CPUID.EAX=0x80000007: EDX.bit8
2612 		 * this check is valid for both Intel and AMD
2613 		 */
2614 		__get_cpuid(0x80000007, &eax, &ebx, &ecx, &edx);
2615 		has_invariant_tsc = edx & (1 << 8);
2616 	}
2617 
2618 	/*
2619 	 * APERF/MPERF is advertised by CPUID.EAX=0x6: ECX.bit0
2620 	 * this check is valid for both Intel and AMD
2621 	 */
2622 
2623 	__get_cpuid(0x6, &eax, &ebx, &ecx, &edx);
2624 	has_aperf = ecx & (1 << 0);
2625 	do_dts = eax & (1 << 0);
2626 	do_ptm = eax & (1 << 6);
2627 	has_epb = ecx & (1 << 3);
2628 
2629 	if (debug)
2630 		fprintf(stderr, "CPUID(6): %sAPERF, %sDTS, %sPTM, %sEPB\n",
2631 			has_aperf ? "" : "No ",
2632 			do_dts ? "" : "No ",
2633 			do_ptm ? "" : "No ",
2634 			has_epb ? "" : "No ");
2635 
2636 	if (max_level > 0x15) {
2637 		unsigned int eax_crystal;
2638 		unsigned int ebx_tsc;
2639 
2640 		/*
2641 		 * CPUID 15H TSC/Crystal ratio, possibly Crystal Hz
2642 		 */
2643 		eax_crystal = ebx_tsc = crystal_hz = edx = 0;
2644 		__get_cpuid(0x15, &eax_crystal, &ebx_tsc, &crystal_hz, &edx);
2645 
2646 		if (ebx_tsc != 0) {
2647 
2648 			if (debug && (ebx != 0))
2649 				fprintf(stderr, "CPUID(0x15): eax_crystal: %d ebx_tsc: %d ecx_crystal_hz: %d\n",
2650 					eax_crystal, ebx_tsc, crystal_hz);
2651 
2652 			if (crystal_hz == 0)
2653 				switch(model) {
2654 				case 0x4E:	/* SKL */
2655 				case 0x5E:	/* SKL */
2656 					crystal_hz = 24000000;	/* 24 MHz */
2657 					break;
2658 				default:
2659 					crystal_hz = 0;
2660 			}
2661 
2662 			if (crystal_hz) {
2663 				tsc_hz =  (unsigned long long) crystal_hz * ebx_tsc / eax_crystal;
2664 				if (debug)
2665 					fprintf(stderr, "TSC: %lld MHz (%d Hz * %d / %d / 1000000)\n",
2666 						tsc_hz / 1000000, crystal_hz, ebx_tsc,  eax_crystal);
2667 			}
2668 		}
2669 	}
2670 
2671 	do_nhm_platform_info = do_nhm_cstates = do_smi = probe_nhm_msrs(family, model);
2672 	do_snb_cstates = has_snb_msrs(family, model);
2673 	do_pc2 = do_snb_cstates && (pkg_cstate_limit >= PCL__2);
2674 	do_pc3 = (pkg_cstate_limit >= PCL__3);
2675 	do_pc6 = (pkg_cstate_limit >= PCL__6);
2676 	do_pc7 = do_snb_cstates && (pkg_cstate_limit >= PCL__7);
2677 	do_c8_c9_c10 = has_hsw_msrs(family, model);
2678 	do_skl_residency = has_skl_msrs(family, model);
2679 	do_slm_cstates = is_slm(family, model);
2680 	do_knl_cstates  = is_knl(family, model);
2681 	bclk = discover_bclk(family, model);
2682 
2683 	rapl_probe(family, model);
2684 	perf_limit_reasons_probe(family, model);
2685 
2686 	if (debug)
2687 		dump_cstate_pstate_config_info();
2688 
2689 	return;
2690 }
2691 
2692 void help()
2693 {
2694 	fprintf(stderr,
2695 	"Usage: turbostat [OPTIONS][(--interval seconds) | COMMAND ...]\n"
2696 	"\n"
2697 	"Turbostat forks the specified COMMAND and prints statistics\n"
2698 	"when COMMAND completes.\n"
2699 	"If no COMMAND is specified, turbostat wakes every 5-seconds\n"
2700 	"to print statistics, until interrupted.\n"
2701 	"--debug	run in \"debug\" mode\n"
2702 	"--interval sec	Override default 5-second measurement interval\n"
2703 	"--help		print this help message\n"
2704 	"--counter msr	print 32-bit counter at address \"msr\"\n"
2705 	"--Counter msr	print 64-bit Counter at address \"msr\"\n"
2706 	"--msr msr	print 32-bit value at address \"msr\"\n"
2707 	"--MSR msr	print 64-bit Value at address \"msr\"\n"
2708 	"--version	print version information\n"
2709 	"\n"
2710 	"For more help, run \"man turbostat\"\n");
2711 }
2712 
2713 
2714 /*
2715  * in /dev/cpu/ return success for names that are numbers
2716  * ie. filter out ".", "..", "microcode".
2717  */
2718 int dir_filter(const struct dirent *dirp)
2719 {
2720 	if (isdigit(dirp->d_name[0]))
2721 		return 1;
2722 	else
2723 		return 0;
2724 }
2725 
2726 int open_dev_cpu_msr(int dummy1)
2727 {
2728 	return 0;
2729 }
2730 
2731 void topology_probe()
2732 {
2733 	int i;
2734 	int max_core_id = 0;
2735 	int max_package_id = 0;
2736 	int max_siblings = 0;
2737 	struct cpu_topology {
2738 		int core_id;
2739 		int physical_package_id;
2740 	} *cpus;
2741 
2742 	/* Initialize num_cpus, max_cpu_num */
2743 	topo.num_cpus = 0;
2744 	topo.max_cpu_num = 0;
2745 	for_all_proc_cpus(count_cpus);
2746 	if (!summary_only && topo.num_cpus > 1)
2747 		show_cpu = 1;
2748 
2749 	if (debug > 1)
2750 		fprintf(stderr, "num_cpus %d max_cpu_num %d\n", topo.num_cpus, topo.max_cpu_num);
2751 
2752 	cpus = calloc(1, (topo.max_cpu_num  + 1) * sizeof(struct cpu_topology));
2753 	if (cpus == NULL)
2754 		err(1, "calloc cpus");
2755 
2756 	/*
2757 	 * Allocate and initialize cpu_present_set
2758 	 */
2759 	cpu_present_set = CPU_ALLOC((topo.max_cpu_num + 1));
2760 	if (cpu_present_set == NULL)
2761 		err(3, "CPU_ALLOC");
2762 	cpu_present_setsize = CPU_ALLOC_SIZE((topo.max_cpu_num + 1));
2763 	CPU_ZERO_S(cpu_present_setsize, cpu_present_set);
2764 	for_all_proc_cpus(mark_cpu_present);
2765 
2766 	/*
2767 	 * Allocate and initialize cpu_affinity_set
2768 	 */
2769 	cpu_affinity_set = CPU_ALLOC((topo.max_cpu_num + 1));
2770 	if (cpu_affinity_set == NULL)
2771 		err(3, "CPU_ALLOC");
2772 	cpu_affinity_setsize = CPU_ALLOC_SIZE((topo.max_cpu_num + 1));
2773 	CPU_ZERO_S(cpu_affinity_setsize, cpu_affinity_set);
2774 
2775 
2776 	/*
2777 	 * For online cpus
2778 	 * find max_core_id, max_package_id
2779 	 */
2780 	for (i = 0; i <= topo.max_cpu_num; ++i) {
2781 		int siblings;
2782 
2783 		if (cpu_is_not_present(i)) {
2784 			if (debug > 1)
2785 				fprintf(stderr, "cpu%d NOT PRESENT\n", i);
2786 			continue;
2787 		}
2788 		cpus[i].core_id = get_core_id(i);
2789 		if (cpus[i].core_id > max_core_id)
2790 			max_core_id = cpus[i].core_id;
2791 
2792 		cpus[i].physical_package_id = get_physical_package_id(i);
2793 		if (cpus[i].physical_package_id > max_package_id)
2794 			max_package_id = cpus[i].physical_package_id;
2795 
2796 		siblings = get_num_ht_siblings(i);
2797 		if (siblings > max_siblings)
2798 			max_siblings = siblings;
2799 		if (debug > 1)
2800 			fprintf(stderr, "cpu %d pkg %d core %d\n",
2801 				i, cpus[i].physical_package_id, cpus[i].core_id);
2802 	}
2803 	topo.num_cores_per_pkg = max_core_id + 1;
2804 	if (debug > 1)
2805 		fprintf(stderr, "max_core_id %d, sizing for %d cores per package\n",
2806 			max_core_id, topo.num_cores_per_pkg);
2807 	if (debug && !summary_only && topo.num_cores_per_pkg > 1)
2808 		show_core = 1;
2809 
2810 	topo.num_packages = max_package_id + 1;
2811 	if (debug > 1)
2812 		fprintf(stderr, "max_package_id %d, sizing for %d packages\n",
2813 			max_package_id, topo.num_packages);
2814 	if (debug && !summary_only && topo.num_packages > 1)
2815 		show_pkg = 1;
2816 
2817 	topo.num_threads_per_core = max_siblings;
2818 	if (debug > 1)
2819 		fprintf(stderr, "max_siblings %d\n", max_siblings);
2820 
2821 	free(cpus);
2822 }
2823 
2824 void
2825 allocate_counters(struct thread_data **t, struct core_data **c, struct pkg_data **p)
2826 {
2827 	int i;
2828 
2829 	*t = calloc(topo.num_threads_per_core * topo.num_cores_per_pkg *
2830 		topo.num_packages, sizeof(struct thread_data));
2831 	if (*t == NULL)
2832 		goto error;
2833 
2834 	for (i = 0; i < topo.num_threads_per_core *
2835 		topo.num_cores_per_pkg * topo.num_packages; i++)
2836 		(*t)[i].cpu_id = -1;
2837 
2838 	*c = calloc(topo.num_cores_per_pkg * topo.num_packages,
2839 		sizeof(struct core_data));
2840 	if (*c == NULL)
2841 		goto error;
2842 
2843 	for (i = 0; i < topo.num_cores_per_pkg * topo.num_packages; i++)
2844 		(*c)[i].core_id = -1;
2845 
2846 	*p = calloc(topo.num_packages, sizeof(struct pkg_data));
2847 	if (*p == NULL)
2848 		goto error;
2849 
2850 	for (i = 0; i < topo.num_packages; i++)
2851 		(*p)[i].package_id = i;
2852 
2853 	return;
2854 error:
2855 	err(1, "calloc counters");
2856 }
2857 /*
2858  * init_counter()
2859  *
2860  * set cpu_id, core_num, pkg_num
2861  * set FIRST_THREAD_IN_CORE and FIRST_CORE_IN_PACKAGE
2862  *
2863  * increment topo.num_cores when 1st core in pkg seen
2864  */
2865 void init_counter(struct thread_data *thread_base, struct core_data *core_base,
2866 	struct pkg_data *pkg_base, int thread_num, int core_num,
2867 	int pkg_num, int cpu_id)
2868 {
2869 	struct thread_data *t;
2870 	struct core_data *c;
2871 	struct pkg_data *p;
2872 
2873 	t = GET_THREAD(thread_base, thread_num, core_num, pkg_num);
2874 	c = GET_CORE(core_base, core_num, pkg_num);
2875 	p = GET_PKG(pkg_base, pkg_num);
2876 
2877 	t->cpu_id = cpu_id;
2878 	if (thread_num == 0) {
2879 		t->flags |= CPU_IS_FIRST_THREAD_IN_CORE;
2880 		if (cpu_is_first_core_in_package(cpu_id))
2881 			t->flags |= CPU_IS_FIRST_CORE_IN_PACKAGE;
2882 	}
2883 
2884 	c->core_id = core_num;
2885 	p->package_id = pkg_num;
2886 }
2887 
2888 
2889 int initialize_counters(int cpu_id)
2890 {
2891 	int my_thread_id, my_core_id, my_package_id;
2892 
2893 	my_package_id = get_physical_package_id(cpu_id);
2894 	my_core_id = get_core_id(cpu_id);
2895 	my_thread_id = get_cpu_position_in_core(cpu_id);
2896 	if (!my_thread_id)
2897 		topo.num_cores++;
2898 
2899 	init_counter(EVEN_COUNTERS, my_thread_id, my_core_id, my_package_id, cpu_id);
2900 	init_counter(ODD_COUNTERS, my_thread_id, my_core_id, my_package_id, cpu_id);
2901 	return 0;
2902 }
2903 
2904 void allocate_output_buffer()
2905 {
2906 	output_buffer = calloc(1, (1 + topo.num_cpus) * 1024);
2907 	outp = output_buffer;
2908 	if (outp == NULL)
2909 		err(-1, "calloc output buffer");
2910 }
2911 
2912 void setup_all_buffers(void)
2913 {
2914 	topology_probe();
2915 	allocate_counters(&thread_even, &core_even, &package_even);
2916 	allocate_counters(&thread_odd, &core_odd, &package_odd);
2917 	allocate_output_buffer();
2918 	for_all_proc_cpus(initialize_counters);
2919 }
2920 
2921 void set_base_cpu(void)
2922 {
2923 	base_cpu = sched_getcpu();
2924 	if (base_cpu < 0)
2925 		err(-ENODEV, "No valid cpus found");
2926 
2927 	if (debug > 1)
2928 		fprintf(stderr, "base_cpu = %d\n", base_cpu);
2929 }
2930 
2931 void turbostat_init()
2932 {
2933 	setup_all_buffers();
2934 	set_base_cpu();
2935 	check_dev_msr();
2936 	check_permissions();
2937 	process_cpuid();
2938 
2939 
2940 	if (debug)
2941 		for_all_cpus(print_epb, ODD_COUNTERS);
2942 
2943 	if (debug)
2944 		for_all_cpus(print_perf_limit, ODD_COUNTERS);
2945 
2946 	if (debug)
2947 		for_all_cpus(print_rapl, ODD_COUNTERS);
2948 
2949 	for_all_cpus(set_temperature_target, ODD_COUNTERS);
2950 
2951 	if (debug)
2952 		for_all_cpus(print_thermal, ODD_COUNTERS);
2953 }
2954 
2955 int fork_it(char **argv)
2956 {
2957 	pid_t child_pid;
2958 	int status;
2959 
2960 	status = for_all_cpus(get_counters, EVEN_COUNTERS);
2961 	if (status)
2962 		exit(status);
2963 	/* clear affinity side-effect of get_counters() */
2964 	sched_setaffinity(0, cpu_present_setsize, cpu_present_set);
2965 	gettimeofday(&tv_even, (struct timezone *)NULL);
2966 
2967 	child_pid = fork();
2968 	if (!child_pid) {
2969 		/* child */
2970 		execvp(argv[0], argv);
2971 	} else {
2972 
2973 		/* parent */
2974 		if (child_pid == -1)
2975 			err(1, "fork");
2976 
2977 		signal(SIGINT, SIG_IGN);
2978 		signal(SIGQUIT, SIG_IGN);
2979 		if (waitpid(child_pid, &status, 0) == -1)
2980 			err(status, "waitpid");
2981 	}
2982 	/*
2983 	 * n.b. fork_it() does not check for errors from for_all_cpus()
2984 	 * because re-starting is problematic when forking
2985 	 */
2986 	for_all_cpus(get_counters, ODD_COUNTERS);
2987 	gettimeofday(&tv_odd, (struct timezone *)NULL);
2988 	timersub(&tv_odd, &tv_even, &tv_delta);
2989 	for_all_cpus_2(delta_cpu, ODD_COUNTERS, EVEN_COUNTERS);
2990 	compute_average(EVEN_COUNTERS);
2991 	format_all_counters(EVEN_COUNTERS);
2992 	flush_stderr();
2993 
2994 	fprintf(stderr, "%.6f sec\n", tv_delta.tv_sec + tv_delta.tv_usec/1000000.0);
2995 
2996 	return status;
2997 }
2998 
2999 int get_and_dump_counters(void)
3000 {
3001 	int status;
3002 
3003 	status = for_all_cpus(get_counters, ODD_COUNTERS);
3004 	if (status)
3005 		return status;
3006 
3007 	status = for_all_cpus(dump_counters, ODD_COUNTERS);
3008 	if (status)
3009 		return status;
3010 
3011 	flush_stdout();
3012 
3013 	return status;
3014 }
3015 
3016 void print_version() {
3017 	fprintf(stderr, "turbostat version 4.7 27-May, 2015"
3018 		" - Len Brown <lenb@kernel.org>\n");
3019 }
3020 
3021 void cmdline(int argc, char **argv)
3022 {
3023 	int opt;
3024 	int option_index = 0;
3025 	static struct option long_options[] = {
3026 		{"Counter",	required_argument,	0, 'C'},
3027 		{"counter",	required_argument,	0, 'c'},
3028 		{"Dump",	no_argument,		0, 'D'},
3029 		{"debug",	no_argument,		0, 'd'},
3030 		{"interval",	required_argument,	0, 'i'},
3031 		{"help",	no_argument,		0, 'h'},
3032 		{"Joules",	no_argument,		0, 'J'},
3033 		{"MSR",		required_argument,	0, 'M'},
3034 		{"msr",		required_argument,	0, 'm'},
3035 		{"Package",	no_argument,		0, 'p'},
3036 		{"processor",	no_argument,		0, 'p'},
3037 		{"Summary",	no_argument,		0, 'S'},
3038 		{"TCC",		required_argument,	0, 'T'},
3039 		{"version",	no_argument,		0, 'v' },
3040 		{0,		0,			0,  0 }
3041 	};
3042 
3043 	progname = argv[0];
3044 
3045 	while ((opt = getopt_long_only(argc, argv, "C:c:Ddhi:JM:m:PpST:v",
3046 				long_options, &option_index)) != -1) {
3047 		switch (opt) {
3048 		case 'C':
3049 			sscanf(optarg, "%x", &extra_delta_offset64);
3050 			break;
3051 		case 'c':
3052 			sscanf(optarg, "%x", &extra_delta_offset32);
3053 			break;
3054 		case 'D':
3055 			dump_only++;
3056 			break;
3057 		case 'd':
3058 			debug++;
3059 			break;
3060 		case 'h':
3061 		default:
3062 			help();
3063 			exit(1);
3064 		case 'i':
3065 			interval_sec = atoi(optarg);
3066 			break;
3067 		case 'J':
3068 			rapl_joules++;
3069 			break;
3070 		case 'M':
3071 			sscanf(optarg, "%x", &extra_msr_offset64);
3072 			break;
3073 		case 'm':
3074 			sscanf(optarg, "%x", &extra_msr_offset32);
3075 			break;
3076 		case 'P':
3077 			show_pkg_only++;
3078 			break;
3079 		case 'p':
3080 			show_core_only++;
3081 			break;
3082 		case 'S':
3083 			summary_only++;
3084 			break;
3085 		case 'T':
3086 			tcc_activation_temp_override = atoi(optarg);
3087 			break;
3088 		case 'v':
3089 			print_version();
3090 			exit(0);
3091 			break;
3092 		}
3093 	}
3094 }
3095 
3096 int main(int argc, char **argv)
3097 {
3098 	cmdline(argc, argv);
3099 
3100 	if (debug)
3101 		print_version();
3102 
3103 	turbostat_init();
3104 
3105 	/* dump counters and exit */
3106 	if (dump_only)
3107 		return get_and_dump_counters();
3108 
3109 	/*
3110 	 * if any params left, it must be a command to fork
3111 	 */
3112 	if (argc - optind)
3113 		return fork_it(argv + optind);
3114 	else
3115 		turbostat_loop();
3116 
3117 	return 0;
3118 }
3119