xref: /openbmc/linux/tools/power/x86/turbostat/turbostat.c (revision efdbd7345f8836f7495f3ac6ee237d86cb3bb6b0)
1 /*
2  * turbostat -- show CPU frequency and C-state residency
3  * on modern Intel turbo-capable processors.
4  *
5  * Copyright (c) 2013 Intel Corporation.
6  * Len Brown <len.brown@intel.com>
7  *
8  * This program is free software; you can redistribute it and/or modify it
9  * under the terms and conditions of the GNU General Public License,
10  * version 2, as published by the Free Software Foundation.
11  *
12  * This program is distributed in the hope it will be useful, but WITHOUT
13  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
14  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
15  * more details.
16  *
17  * You should have received a copy of the GNU General Public License along with
18  * this program; if not, write to the Free Software Foundation, Inc.,
19  * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
20  */
21 
22 #define _GNU_SOURCE
23 #include MSRHEADER
24 #include <stdarg.h>
25 #include <stdio.h>
26 #include <err.h>
27 #include <unistd.h>
28 #include <sys/types.h>
29 #include <sys/wait.h>
30 #include <sys/stat.h>
31 #include <sys/resource.h>
32 #include <fcntl.h>
33 #include <signal.h>
34 #include <sys/time.h>
35 #include <stdlib.h>
36 #include <getopt.h>
37 #include <dirent.h>
38 #include <string.h>
39 #include <ctype.h>
40 #include <sched.h>
41 #include <cpuid.h>
42 #include <linux/capability.h>
43 #include <errno.h>
44 
45 char *proc_stat = "/proc/stat";
46 unsigned int interval_sec = 5;
47 unsigned int debug;
48 unsigned int rapl_joules;
49 unsigned int summary_only;
50 unsigned int dump_only;
51 unsigned int skip_c0;
52 unsigned int skip_c1;
53 unsigned int do_nhm_cstates;
54 unsigned int do_snb_cstates;
55 unsigned int do_knl_cstates;
56 unsigned int do_pc2;
57 unsigned int do_pc3;
58 unsigned int do_pc6;
59 unsigned int do_pc7;
60 unsigned int do_c8_c9_c10;
61 unsigned int do_skl_residency;
62 unsigned int do_slm_cstates;
63 unsigned int use_c1_residency_msr;
64 unsigned int has_aperf;
65 unsigned int has_epb;
66 unsigned int units = 1000000;	/* MHz etc */
67 unsigned int genuine_intel;
68 unsigned int has_invariant_tsc;
69 unsigned int do_nhm_platform_info;
70 unsigned int extra_msr_offset32;
71 unsigned int extra_msr_offset64;
72 unsigned int extra_delta_offset32;
73 unsigned int extra_delta_offset64;
74 int do_smi;
75 double bclk;
76 unsigned int show_pkg;
77 unsigned int show_core;
78 unsigned int show_cpu;
79 unsigned int show_pkg_only;
80 unsigned int show_core_only;
81 char *output_buffer, *outp;
82 unsigned int do_rapl;
83 unsigned int do_dts;
84 unsigned int do_ptm;
85 unsigned int tcc_activation_temp;
86 unsigned int tcc_activation_temp_override;
87 double rapl_power_units, rapl_time_units;
88 double rapl_dram_energy_units, rapl_energy_units;
89 double rapl_joule_counter_range;
90 unsigned int do_core_perf_limit_reasons;
91 unsigned int do_gfx_perf_limit_reasons;
92 unsigned int do_ring_perf_limit_reasons;
93 unsigned int crystal_hz;
94 unsigned long long tsc_hz;
95 int base_cpu;
96 
97 #define RAPL_PKG		(1 << 0)
98 					/* 0x610 MSR_PKG_POWER_LIMIT */
99 					/* 0x611 MSR_PKG_ENERGY_STATUS */
100 #define RAPL_PKG_PERF_STATUS	(1 << 1)
101 					/* 0x613 MSR_PKG_PERF_STATUS */
102 #define RAPL_PKG_POWER_INFO	(1 << 2)
103 					/* 0x614 MSR_PKG_POWER_INFO */
104 
105 #define RAPL_DRAM		(1 << 3)
106 					/* 0x618 MSR_DRAM_POWER_LIMIT */
107 					/* 0x619 MSR_DRAM_ENERGY_STATUS */
108 #define RAPL_DRAM_PERF_STATUS	(1 << 4)
109 					/* 0x61b MSR_DRAM_PERF_STATUS */
110 #define RAPL_DRAM_POWER_INFO	(1 << 5)
111 					/* 0x61c MSR_DRAM_POWER_INFO */
112 
113 #define RAPL_CORES		(1 << 6)
114 					/* 0x638 MSR_PP0_POWER_LIMIT */
115 					/* 0x639 MSR_PP0_ENERGY_STATUS */
116 #define RAPL_CORE_POLICY	(1 << 7)
117 					/* 0x63a MSR_PP0_POLICY */
118 
119 #define RAPL_GFX		(1 << 8)
120 					/* 0x640 MSR_PP1_POWER_LIMIT */
121 					/* 0x641 MSR_PP1_ENERGY_STATUS */
122 					/* 0x642 MSR_PP1_POLICY */
123 #define	TJMAX_DEFAULT	100
124 
125 #define MAX(a, b) ((a) > (b) ? (a) : (b))
126 
127 int aperf_mperf_unstable;
128 int backwards_count;
129 char *progname;
130 
131 cpu_set_t *cpu_present_set, *cpu_affinity_set;
132 size_t cpu_present_setsize, cpu_affinity_setsize;
133 
134 struct thread_data {
135 	unsigned long long tsc;
136 	unsigned long long aperf;
137 	unsigned long long mperf;
138 	unsigned long long c1;
139 	unsigned long long extra_msr64;
140 	unsigned long long extra_delta64;
141 	unsigned long long extra_msr32;
142 	unsigned long long extra_delta32;
143 	unsigned int smi_count;
144 	unsigned int cpu_id;
145 	unsigned int flags;
146 #define CPU_IS_FIRST_THREAD_IN_CORE	0x2
147 #define CPU_IS_FIRST_CORE_IN_PACKAGE	0x4
148 } *thread_even, *thread_odd;
149 
150 struct core_data {
151 	unsigned long long c3;
152 	unsigned long long c6;
153 	unsigned long long c7;
154 	unsigned int core_temp_c;
155 	unsigned int core_id;
156 } *core_even, *core_odd;
157 
158 struct pkg_data {
159 	unsigned long long pc2;
160 	unsigned long long pc3;
161 	unsigned long long pc6;
162 	unsigned long long pc7;
163 	unsigned long long pc8;
164 	unsigned long long pc9;
165 	unsigned long long pc10;
166 	unsigned long long pkg_wtd_core_c0;
167 	unsigned long long pkg_any_core_c0;
168 	unsigned long long pkg_any_gfxe_c0;
169 	unsigned long long pkg_both_core_gfxe_c0;
170 	unsigned int package_id;
171 	unsigned int energy_pkg;	/* MSR_PKG_ENERGY_STATUS */
172 	unsigned int energy_dram;	/* MSR_DRAM_ENERGY_STATUS */
173 	unsigned int energy_cores;	/* MSR_PP0_ENERGY_STATUS */
174 	unsigned int energy_gfx;	/* MSR_PP1_ENERGY_STATUS */
175 	unsigned int rapl_pkg_perf_status;	/* MSR_PKG_PERF_STATUS */
176 	unsigned int rapl_dram_perf_status;	/* MSR_DRAM_PERF_STATUS */
177 	unsigned int pkg_temp_c;
178 
179 } *package_even, *package_odd;
180 
181 #define ODD_COUNTERS thread_odd, core_odd, package_odd
182 #define EVEN_COUNTERS thread_even, core_even, package_even
183 
184 #define GET_THREAD(thread_base, thread_no, core_no, pkg_no) \
185 	(thread_base + (pkg_no) * topo.num_cores_per_pkg * \
186 		topo.num_threads_per_core + \
187 		(core_no) * topo.num_threads_per_core + (thread_no))
188 #define GET_CORE(core_base, core_no, pkg_no) \
189 	(core_base + (pkg_no) * topo.num_cores_per_pkg + (core_no))
190 #define GET_PKG(pkg_base, pkg_no) (pkg_base + pkg_no)
191 
192 struct system_summary {
193 	struct thread_data threads;
194 	struct core_data cores;
195 	struct pkg_data packages;
196 } sum, average;
197 
198 
199 struct topo_params {
200 	int num_packages;
201 	int num_cpus;
202 	int num_cores;
203 	int max_cpu_num;
204 	int num_cores_per_pkg;
205 	int num_threads_per_core;
206 } topo;
207 
208 struct timeval tv_even, tv_odd, tv_delta;
209 
210 void setup_all_buffers(void);
211 
212 int cpu_is_not_present(int cpu)
213 {
214 	return !CPU_ISSET_S(cpu, cpu_present_setsize, cpu_present_set);
215 }
216 /*
217  * run func(thread, core, package) in topology order
218  * skip non-present cpus
219  */
220 
221 int for_all_cpus(int (func)(struct thread_data *, struct core_data *, struct pkg_data *),
222 	struct thread_data *thread_base, struct core_data *core_base, struct pkg_data *pkg_base)
223 {
224 	int retval, pkg_no, core_no, thread_no;
225 
226 	for (pkg_no = 0; pkg_no < topo.num_packages; ++pkg_no) {
227 		for (core_no = 0; core_no < topo.num_cores_per_pkg; ++core_no) {
228 			for (thread_no = 0; thread_no <
229 				topo.num_threads_per_core; ++thread_no) {
230 				struct thread_data *t;
231 				struct core_data *c;
232 				struct pkg_data *p;
233 
234 				t = GET_THREAD(thread_base, thread_no, core_no, pkg_no);
235 
236 				if (cpu_is_not_present(t->cpu_id))
237 					continue;
238 
239 				c = GET_CORE(core_base, core_no, pkg_no);
240 				p = GET_PKG(pkg_base, pkg_no);
241 
242 				retval = func(t, c, p);
243 				if (retval)
244 					return retval;
245 			}
246 		}
247 	}
248 	return 0;
249 }
250 
251 int cpu_migrate(int cpu)
252 {
253 	CPU_ZERO_S(cpu_affinity_setsize, cpu_affinity_set);
254 	CPU_SET_S(cpu, cpu_affinity_setsize, cpu_affinity_set);
255 	if (sched_setaffinity(0, cpu_affinity_setsize, cpu_affinity_set) == -1)
256 		return -1;
257 	else
258 		return 0;
259 }
260 
261 int get_msr(int cpu, off_t offset, unsigned long long *msr)
262 {
263 	ssize_t retval;
264 	char pathname[32];
265 	int fd;
266 
267 	sprintf(pathname, "/dev/cpu/%d/msr", cpu);
268 	fd = open(pathname, O_RDONLY);
269 	if (fd < 0)
270 		err(-1, "%s open failed, try chown or chmod +r /dev/cpu/*/msr, or run as root", pathname);
271 
272 	retval = pread(fd, msr, sizeof *msr, offset);
273 	close(fd);
274 
275 	if (retval != sizeof *msr)
276 		err(-1, "%s offset 0x%llx read failed", pathname, (unsigned long long)offset);
277 
278 	return 0;
279 }
280 
281 /*
282  * Example Format w/ field column widths:
283  *
284  *  Package    Core     CPU Avg_MHz Bzy_MHz TSC_MHz     SMI   %Busy CPU_%c1 CPU_%c3 CPU_%c6 CPU_%c7 CoreTmp  PkgTmp Pkg%pc2 Pkg%pc3 Pkg%pc6 Pkg%pc7 PkgWatt CorWatt GFXWatt
285  * 123456781234567812345678123456781234567812345678123456781234567812345678123456781234567812345678123456781234567812345678123456781234567812345678123456781234567812345678
286  */
287 
288 void print_header(void)
289 {
290 	if (show_pkg)
291 		outp += sprintf(outp, " Package");
292 	if (show_core)
293 		outp += sprintf(outp, "    Core");
294 	if (show_cpu)
295 		outp += sprintf(outp, "     CPU");
296 	if (has_aperf)
297 		outp += sprintf(outp, " Avg_MHz");
298 	if (has_aperf)
299 		outp += sprintf(outp, "   %%Busy");
300 	if (has_aperf)
301 		outp += sprintf(outp, " Bzy_MHz");
302 	outp += sprintf(outp, " TSC_MHz");
303 
304 	if (extra_delta_offset32)
305 		outp += sprintf(outp, "  count 0x%03X", extra_delta_offset32);
306 	if (extra_delta_offset64)
307 		outp += sprintf(outp, "  COUNT 0x%03X", extra_delta_offset64);
308 	if (extra_msr_offset32)
309 		outp += sprintf(outp, "   MSR 0x%03X", extra_msr_offset32);
310 	if (extra_msr_offset64)
311 		outp += sprintf(outp, "           MSR 0x%03X", extra_msr_offset64);
312 
313 	if (!debug)
314 		goto done;
315 
316 	if (do_smi)
317 		outp += sprintf(outp, "     SMI");
318 
319 	if (do_nhm_cstates)
320 		outp += sprintf(outp, "  CPU%%c1");
321 	if (do_nhm_cstates && !do_slm_cstates && !do_knl_cstates)
322 		outp += sprintf(outp, "  CPU%%c3");
323 	if (do_nhm_cstates)
324 		outp += sprintf(outp, "  CPU%%c6");
325 	if (do_snb_cstates)
326 		outp += sprintf(outp, "  CPU%%c7");
327 
328 	if (do_dts)
329 		outp += sprintf(outp, " CoreTmp");
330 	if (do_ptm)
331 		outp += sprintf(outp, "  PkgTmp");
332 
333 	if (do_skl_residency) {
334 		outp += sprintf(outp, " Totl%%C0");
335 		outp += sprintf(outp, "  Any%%C0");
336 		outp += sprintf(outp, "  GFX%%C0");
337 		outp += sprintf(outp, " CPUGFX%%");
338 	}
339 
340 	if (do_pc2)
341 		outp += sprintf(outp, " Pkg%%pc2");
342 	if (do_pc3)
343 		outp += sprintf(outp, " Pkg%%pc3");
344 	if (do_pc6)
345 		outp += sprintf(outp, " Pkg%%pc6");
346 	if (do_pc7)
347 		outp += sprintf(outp, " Pkg%%pc7");
348 	if (do_c8_c9_c10) {
349 		outp += sprintf(outp, " Pkg%%pc8");
350 		outp += sprintf(outp, " Pkg%%pc9");
351 		outp += sprintf(outp, " Pk%%pc10");
352 	}
353 
354 	if (do_rapl && !rapl_joules) {
355 		if (do_rapl & RAPL_PKG)
356 			outp += sprintf(outp, " PkgWatt");
357 		if (do_rapl & RAPL_CORES)
358 			outp += sprintf(outp, " CorWatt");
359 		if (do_rapl & RAPL_GFX)
360 			outp += sprintf(outp, " GFXWatt");
361 		if (do_rapl & RAPL_DRAM)
362 			outp += sprintf(outp, " RAMWatt");
363 		if (do_rapl & RAPL_PKG_PERF_STATUS)
364 			outp += sprintf(outp, "   PKG_%%");
365 		if (do_rapl & RAPL_DRAM_PERF_STATUS)
366 			outp += sprintf(outp, "   RAM_%%");
367 	} else if (do_rapl && rapl_joules) {
368 		if (do_rapl & RAPL_PKG)
369 			outp += sprintf(outp, "   Pkg_J");
370 		if (do_rapl & RAPL_CORES)
371 			outp += sprintf(outp, "   Cor_J");
372 		if (do_rapl & RAPL_GFX)
373 			outp += sprintf(outp, "   GFX_J");
374 		if (do_rapl & RAPL_DRAM)
375 			outp += sprintf(outp, "   RAM_J");
376 		if (do_rapl & RAPL_PKG_PERF_STATUS)
377 			outp += sprintf(outp, "   PKG_%%");
378 		if (do_rapl & RAPL_DRAM_PERF_STATUS)
379 			outp += sprintf(outp, "   RAM_%%");
380 		outp += sprintf(outp, "   time");
381 
382 	}
383     done:
384 	outp += sprintf(outp, "\n");
385 }
386 
387 int dump_counters(struct thread_data *t, struct core_data *c,
388 	struct pkg_data *p)
389 {
390 	outp += sprintf(outp, "t %p, c %p, p %p\n", t, c, p);
391 
392 	if (t) {
393 		outp += sprintf(outp, "CPU: %d flags 0x%x\n",
394 			t->cpu_id, t->flags);
395 		outp += sprintf(outp, "TSC: %016llX\n", t->tsc);
396 		outp += sprintf(outp, "aperf: %016llX\n", t->aperf);
397 		outp += sprintf(outp, "mperf: %016llX\n", t->mperf);
398 		outp += sprintf(outp, "c1: %016llX\n", t->c1);
399 		outp += sprintf(outp, "msr0x%x: %08llX\n",
400 			extra_delta_offset32, t->extra_delta32);
401 		outp += sprintf(outp, "msr0x%x: %016llX\n",
402 			extra_delta_offset64, t->extra_delta64);
403 		outp += sprintf(outp, "msr0x%x: %08llX\n",
404 			extra_msr_offset32, t->extra_msr32);
405 		outp += sprintf(outp, "msr0x%x: %016llX\n",
406 			extra_msr_offset64, t->extra_msr64);
407 		if (do_smi)
408 			outp += sprintf(outp, "SMI: %08X\n", t->smi_count);
409 	}
410 
411 	if (c) {
412 		outp += sprintf(outp, "core: %d\n", c->core_id);
413 		outp += sprintf(outp, "c3: %016llX\n", c->c3);
414 		outp += sprintf(outp, "c6: %016llX\n", c->c6);
415 		outp += sprintf(outp, "c7: %016llX\n", c->c7);
416 		outp += sprintf(outp, "DTS: %dC\n", c->core_temp_c);
417 	}
418 
419 	if (p) {
420 		outp += sprintf(outp, "package: %d\n", p->package_id);
421 
422 		outp += sprintf(outp, "Weighted cores: %016llX\n", p->pkg_wtd_core_c0);
423 		outp += sprintf(outp, "Any cores: %016llX\n", p->pkg_any_core_c0);
424 		outp += sprintf(outp, "Any GFX: %016llX\n", p->pkg_any_gfxe_c0);
425 		outp += sprintf(outp, "CPU + GFX: %016llX\n", p->pkg_both_core_gfxe_c0);
426 
427 		outp += sprintf(outp, "pc2: %016llX\n", p->pc2);
428 		if (do_pc3)
429 			outp += sprintf(outp, "pc3: %016llX\n", p->pc3);
430 		if (do_pc6)
431 			outp += sprintf(outp, "pc6: %016llX\n", p->pc6);
432 		if (do_pc7)
433 			outp += sprintf(outp, "pc7: %016llX\n", p->pc7);
434 		outp += sprintf(outp, "pc8: %016llX\n", p->pc8);
435 		outp += sprintf(outp, "pc9: %016llX\n", p->pc9);
436 		outp += sprintf(outp, "pc10: %016llX\n", p->pc10);
437 		outp += sprintf(outp, "Joules PKG: %0X\n", p->energy_pkg);
438 		outp += sprintf(outp, "Joules COR: %0X\n", p->energy_cores);
439 		outp += sprintf(outp, "Joules GFX: %0X\n", p->energy_gfx);
440 		outp += sprintf(outp, "Joules RAM: %0X\n", p->energy_dram);
441 		outp += sprintf(outp, "Throttle PKG: %0X\n",
442 			p->rapl_pkg_perf_status);
443 		outp += sprintf(outp, "Throttle RAM: %0X\n",
444 			p->rapl_dram_perf_status);
445 		outp += sprintf(outp, "PTM: %dC\n", p->pkg_temp_c);
446 	}
447 
448 	outp += sprintf(outp, "\n");
449 
450 	return 0;
451 }
452 
453 /*
454  * column formatting convention & formats
455  */
456 int format_counters(struct thread_data *t, struct core_data *c,
457 	struct pkg_data *p)
458 {
459 	double interval_float;
460 	char *fmt8;
461 
462 	 /* if showing only 1st thread in core and this isn't one, bail out */
463 	if (show_core_only && !(t->flags & CPU_IS_FIRST_THREAD_IN_CORE))
464 		return 0;
465 
466 	 /* if showing only 1st thread in pkg and this isn't one, bail out */
467 	if (show_pkg_only && !(t->flags & CPU_IS_FIRST_CORE_IN_PACKAGE))
468 		return 0;
469 
470 	interval_float = tv_delta.tv_sec + tv_delta.tv_usec/1000000.0;
471 
472 	/* topo columns, print blanks on 1st (average) line */
473 	if (t == &average.threads) {
474 		if (show_pkg)
475 			outp += sprintf(outp, "       -");
476 		if (show_core)
477 			outp += sprintf(outp, "       -");
478 		if (show_cpu)
479 			outp += sprintf(outp, "       -");
480 	} else {
481 		if (show_pkg) {
482 			if (p)
483 				outp += sprintf(outp, "%8d", p->package_id);
484 			else
485 				outp += sprintf(outp, "       -");
486 		}
487 		if (show_core) {
488 			if (c)
489 				outp += sprintf(outp, "%8d", c->core_id);
490 			else
491 				outp += sprintf(outp, "       -");
492 		}
493 		if (show_cpu)
494 			outp += sprintf(outp, "%8d", t->cpu_id);
495 	}
496 
497 	/* Avg_MHz */
498 	if (has_aperf)
499 		outp += sprintf(outp, "%8.0f",
500 			1.0 / units * t->aperf / interval_float);
501 
502 	/* %Busy */
503 	if (has_aperf) {
504 		if (!skip_c0)
505 			outp += sprintf(outp, "%8.2f", 100.0 * t->mperf/t->tsc);
506 		else
507 			outp += sprintf(outp, "********");
508 	}
509 
510 	/* Bzy_MHz */
511 	if (has_aperf)
512 		outp += sprintf(outp, "%8.0f",
513 			1.0 * t->tsc / units * t->aperf / t->mperf / interval_float);
514 
515 	/* TSC_MHz */
516 	outp += sprintf(outp, "%8.0f", 1.0 * t->tsc/units/interval_float);
517 
518 	/* delta */
519 	if (extra_delta_offset32)
520 		outp += sprintf(outp, "  %11llu", t->extra_delta32);
521 
522 	/* DELTA */
523 	if (extra_delta_offset64)
524 		outp += sprintf(outp, "  %11llu", t->extra_delta64);
525 	/* msr */
526 	if (extra_msr_offset32)
527 		outp += sprintf(outp, "  0x%08llx", t->extra_msr32);
528 
529 	/* MSR */
530 	if (extra_msr_offset64)
531 		outp += sprintf(outp, "  0x%016llx", t->extra_msr64);
532 
533 	if (!debug)
534 		goto done;
535 
536 	/* SMI */
537 	if (do_smi)
538 		outp += sprintf(outp, "%8d", t->smi_count);
539 
540 	if (do_nhm_cstates) {
541 		if (!skip_c1)
542 			outp += sprintf(outp, "%8.2f", 100.0 * t->c1/t->tsc);
543 		else
544 			outp += sprintf(outp, "********");
545 	}
546 
547 	/* print per-core data only for 1st thread in core */
548 	if (!(t->flags & CPU_IS_FIRST_THREAD_IN_CORE))
549 		goto done;
550 
551 	if (do_nhm_cstates && !do_slm_cstates && !do_knl_cstates)
552 		outp += sprintf(outp, "%8.2f", 100.0 * c->c3/t->tsc);
553 	if (do_nhm_cstates)
554 		outp += sprintf(outp, "%8.2f", 100.0 * c->c6/t->tsc);
555 	if (do_snb_cstates)
556 		outp += sprintf(outp, "%8.2f", 100.0 * c->c7/t->tsc);
557 
558 	if (do_dts)
559 		outp += sprintf(outp, "%8d", c->core_temp_c);
560 
561 	/* print per-package data only for 1st core in package */
562 	if (!(t->flags & CPU_IS_FIRST_CORE_IN_PACKAGE))
563 		goto done;
564 
565 	/* PkgTmp */
566 	if (do_ptm)
567 		outp += sprintf(outp, "%8d", p->pkg_temp_c);
568 
569 	/* Totl%C0, Any%C0 GFX%C0 CPUGFX% */
570 	if (do_skl_residency) {
571 		outp += sprintf(outp, "%8.2f", 100.0 * p->pkg_wtd_core_c0/t->tsc);
572 		outp += sprintf(outp, "%8.2f", 100.0 * p->pkg_any_core_c0/t->tsc);
573 		outp += sprintf(outp, "%8.2f", 100.0 * p->pkg_any_gfxe_c0/t->tsc);
574 		outp += sprintf(outp, "%8.2f", 100.0 * p->pkg_both_core_gfxe_c0/t->tsc);
575 	}
576 
577 	if (do_pc2)
578 		outp += sprintf(outp, "%8.2f", 100.0 * p->pc2/t->tsc);
579 	if (do_pc3)
580 		outp += sprintf(outp, "%8.2f", 100.0 * p->pc3/t->tsc);
581 	if (do_pc6)
582 		outp += sprintf(outp, "%8.2f", 100.0 * p->pc6/t->tsc);
583 	if (do_pc7)
584 		outp += sprintf(outp, "%8.2f", 100.0 * p->pc7/t->tsc);
585 	if (do_c8_c9_c10) {
586 		outp += sprintf(outp, "%8.2f", 100.0 * p->pc8/t->tsc);
587 		outp += sprintf(outp, "%8.2f", 100.0 * p->pc9/t->tsc);
588 		outp += sprintf(outp, "%8.2f", 100.0 * p->pc10/t->tsc);
589 	}
590 
591 	/*
592  	 * If measurement interval exceeds minimum RAPL Joule Counter range,
593  	 * indicate that results are suspect by printing "**" in fraction place.
594  	 */
595 	if (interval_float < rapl_joule_counter_range)
596 		fmt8 = "%8.2f";
597 	else
598 		fmt8 = " %6.0f**";
599 
600 	if (do_rapl && !rapl_joules) {
601 		if (do_rapl & RAPL_PKG)
602 			outp += sprintf(outp, fmt8, p->energy_pkg * rapl_energy_units / interval_float);
603 		if (do_rapl & RAPL_CORES)
604 			outp += sprintf(outp, fmt8, p->energy_cores * rapl_energy_units / interval_float);
605 		if (do_rapl & RAPL_GFX)
606 			outp += sprintf(outp, fmt8, p->energy_gfx * rapl_energy_units / interval_float);
607 		if (do_rapl & RAPL_DRAM)
608 			outp += sprintf(outp, fmt8, p->energy_dram * rapl_dram_energy_units / interval_float);
609 		if (do_rapl & RAPL_PKG_PERF_STATUS)
610 			outp += sprintf(outp, fmt8, 100.0 * p->rapl_pkg_perf_status * rapl_time_units / interval_float);
611 		if (do_rapl & RAPL_DRAM_PERF_STATUS)
612 			outp += sprintf(outp, fmt8, 100.0 * p->rapl_dram_perf_status * rapl_time_units / interval_float);
613 	} else if (do_rapl && rapl_joules) {
614 		if (do_rapl & RAPL_PKG)
615 			outp += sprintf(outp, fmt8,
616 					p->energy_pkg * rapl_energy_units);
617 		if (do_rapl & RAPL_CORES)
618 			outp += sprintf(outp, fmt8,
619 					p->energy_cores * rapl_energy_units);
620 		if (do_rapl & RAPL_GFX)
621 			outp += sprintf(outp, fmt8,
622 					p->energy_gfx * rapl_energy_units);
623 		if (do_rapl & RAPL_DRAM)
624 			outp += sprintf(outp, fmt8,
625 					p->energy_dram * rapl_dram_energy_units);
626 		if (do_rapl & RAPL_PKG_PERF_STATUS)
627 			outp += sprintf(outp, fmt8, 100.0 * p->rapl_pkg_perf_status * rapl_time_units / interval_float);
628 		if (do_rapl & RAPL_DRAM_PERF_STATUS)
629 			outp += sprintf(outp, fmt8, 100.0 * p->rapl_dram_perf_status * rapl_time_units / interval_float);
630 
631 		outp += sprintf(outp, fmt8, interval_float);
632 	}
633 done:
634 	outp += sprintf(outp, "\n");
635 
636 	return 0;
637 }
638 
639 void flush_stdout()
640 {
641 	fputs(output_buffer, stdout);
642 	fflush(stdout);
643 	outp = output_buffer;
644 }
645 void flush_stderr()
646 {
647 	fputs(output_buffer, stderr);
648 	outp = output_buffer;
649 }
650 void format_all_counters(struct thread_data *t, struct core_data *c, struct pkg_data *p)
651 {
652 	static int printed;
653 
654 	if (!printed || !summary_only)
655 		print_header();
656 
657 	if (topo.num_cpus > 1)
658 		format_counters(&average.threads, &average.cores,
659 			&average.packages);
660 
661 	printed = 1;
662 
663 	if (summary_only)
664 		return;
665 
666 	for_all_cpus(format_counters, t, c, p);
667 }
668 
669 #define DELTA_WRAP32(new, old)			\
670 	if (new > old) {			\
671 		old = new - old;		\
672 	} else {				\
673 		old = 0x100000000 + new - old;	\
674 	}
675 
676 void
677 delta_package(struct pkg_data *new, struct pkg_data *old)
678 {
679 
680 	if (do_skl_residency) {
681 		old->pkg_wtd_core_c0 = new->pkg_wtd_core_c0 - old->pkg_wtd_core_c0;
682 		old->pkg_any_core_c0 = new->pkg_any_core_c0 - old->pkg_any_core_c0;
683 		old->pkg_any_gfxe_c0 = new->pkg_any_gfxe_c0 - old->pkg_any_gfxe_c0;
684 		old->pkg_both_core_gfxe_c0 = new->pkg_both_core_gfxe_c0 - old->pkg_both_core_gfxe_c0;
685 	}
686 	old->pc2 = new->pc2 - old->pc2;
687 	if (do_pc3)
688 		old->pc3 = new->pc3 - old->pc3;
689 	if (do_pc6)
690 		old->pc6 = new->pc6 - old->pc6;
691 	if (do_pc7)
692 		old->pc7 = new->pc7 - old->pc7;
693 	old->pc8 = new->pc8 - old->pc8;
694 	old->pc9 = new->pc9 - old->pc9;
695 	old->pc10 = new->pc10 - old->pc10;
696 	old->pkg_temp_c = new->pkg_temp_c;
697 
698 	DELTA_WRAP32(new->energy_pkg, old->energy_pkg);
699 	DELTA_WRAP32(new->energy_cores, old->energy_cores);
700 	DELTA_WRAP32(new->energy_gfx, old->energy_gfx);
701 	DELTA_WRAP32(new->energy_dram, old->energy_dram);
702 	DELTA_WRAP32(new->rapl_pkg_perf_status, old->rapl_pkg_perf_status);
703 	DELTA_WRAP32(new->rapl_dram_perf_status, old->rapl_dram_perf_status);
704 }
705 
706 void
707 delta_core(struct core_data *new, struct core_data *old)
708 {
709 	old->c3 = new->c3 - old->c3;
710 	old->c6 = new->c6 - old->c6;
711 	old->c7 = new->c7 - old->c7;
712 	old->core_temp_c = new->core_temp_c;
713 }
714 
715 /*
716  * old = new - old
717  */
718 void
719 delta_thread(struct thread_data *new, struct thread_data *old,
720 	struct core_data *core_delta)
721 {
722 	old->tsc = new->tsc - old->tsc;
723 
724 	/* check for TSC < 1 Mcycles over interval */
725 	if (old->tsc < (1000 * 1000))
726 		errx(-3, "Insanely slow TSC rate, TSC stops in idle?\n"
727 		     "You can disable all c-states by booting with \"idle=poll\"\n"
728 		     "or just the deep ones with \"processor.max_cstate=1\"");
729 
730 	old->c1 = new->c1 - old->c1;
731 
732 	if (has_aperf) {
733 		if ((new->aperf > old->aperf) && (new->mperf > old->mperf)) {
734 			old->aperf = new->aperf - old->aperf;
735 			old->mperf = new->mperf - old->mperf;
736 		} else {
737 
738 			if (!aperf_mperf_unstable) {
739 				fprintf(stderr, "%s: APERF or MPERF went backwards *\n", progname);
740 				fprintf(stderr, "* Frequency results do not cover entire interval *\n");
741 				fprintf(stderr, "* fix this by running Linux-2.6.30 or later *\n");
742 
743 				aperf_mperf_unstable = 1;
744 			}
745 			/*
746 			 * mperf delta is likely a huge "positive" number
747 			 * can not use it for calculating c0 time
748 			 */
749 			skip_c0 = 1;
750 			skip_c1 = 1;
751 		}
752 	}
753 
754 
755 	if (use_c1_residency_msr) {
756 		/*
757 		 * Some models have a dedicated C1 residency MSR,
758 		 * which should be more accurate than the derivation below.
759 		 */
760 	} else {
761 		/*
762 		 * As counter collection is not atomic,
763 		 * it is possible for mperf's non-halted cycles + idle states
764 		 * to exceed TSC's all cycles: show c1 = 0% in that case.
765 		 */
766 		if ((old->mperf + core_delta->c3 + core_delta->c6 + core_delta->c7) > old->tsc)
767 			old->c1 = 0;
768 		else {
769 			/* normal case, derive c1 */
770 			old->c1 = old->tsc - old->mperf - core_delta->c3
771 				- core_delta->c6 - core_delta->c7;
772 		}
773 	}
774 
775 	if (old->mperf == 0) {
776 		if (debug > 1) fprintf(stderr, "cpu%d MPERF 0!\n", old->cpu_id);
777 		old->mperf = 1;	/* divide by 0 protection */
778 	}
779 
780 	old->extra_delta32 = new->extra_delta32 - old->extra_delta32;
781 	old->extra_delta32 &= 0xFFFFFFFF;
782 
783 	old->extra_delta64 = new->extra_delta64 - old->extra_delta64;
784 
785 	/*
786 	 * Extra MSR is just a snapshot, simply copy latest w/o subtracting
787 	 */
788 	old->extra_msr32 = new->extra_msr32;
789 	old->extra_msr64 = new->extra_msr64;
790 
791 	if (do_smi)
792 		old->smi_count = new->smi_count - old->smi_count;
793 }
794 
795 int delta_cpu(struct thread_data *t, struct core_data *c,
796 	struct pkg_data *p, struct thread_data *t2,
797 	struct core_data *c2, struct pkg_data *p2)
798 {
799 	/* calculate core delta only for 1st thread in core */
800 	if (t->flags & CPU_IS_FIRST_THREAD_IN_CORE)
801 		delta_core(c, c2);
802 
803 	/* always calculate thread delta */
804 	delta_thread(t, t2, c2);	/* c2 is core delta */
805 
806 	/* calculate package delta only for 1st core in package */
807 	if (t->flags & CPU_IS_FIRST_CORE_IN_PACKAGE)
808 		delta_package(p, p2);
809 
810 	return 0;
811 }
812 
813 void clear_counters(struct thread_data *t, struct core_data *c, struct pkg_data *p)
814 {
815 	t->tsc = 0;
816 	t->aperf = 0;
817 	t->mperf = 0;
818 	t->c1 = 0;
819 
820 	t->smi_count = 0;
821 	t->extra_delta32 = 0;
822 	t->extra_delta64 = 0;
823 
824 	/* tells format_counters to dump all fields from this set */
825 	t->flags = CPU_IS_FIRST_THREAD_IN_CORE | CPU_IS_FIRST_CORE_IN_PACKAGE;
826 
827 	c->c3 = 0;
828 	c->c6 = 0;
829 	c->c7 = 0;
830 	c->core_temp_c = 0;
831 
832 	p->pkg_wtd_core_c0 = 0;
833 	p->pkg_any_core_c0 = 0;
834 	p->pkg_any_gfxe_c0 = 0;
835 	p->pkg_both_core_gfxe_c0 = 0;
836 
837 	p->pc2 = 0;
838 	if (do_pc3)
839 		p->pc3 = 0;
840 	if (do_pc6)
841 		p->pc6 = 0;
842 	if (do_pc7)
843 		p->pc7 = 0;
844 	p->pc8 = 0;
845 	p->pc9 = 0;
846 	p->pc10 = 0;
847 
848 	p->energy_pkg = 0;
849 	p->energy_dram = 0;
850 	p->energy_cores = 0;
851 	p->energy_gfx = 0;
852 	p->rapl_pkg_perf_status = 0;
853 	p->rapl_dram_perf_status = 0;
854 	p->pkg_temp_c = 0;
855 }
856 int sum_counters(struct thread_data *t, struct core_data *c,
857 	struct pkg_data *p)
858 {
859 	average.threads.tsc += t->tsc;
860 	average.threads.aperf += t->aperf;
861 	average.threads.mperf += t->mperf;
862 	average.threads.c1 += t->c1;
863 
864 	average.threads.extra_delta32 += t->extra_delta32;
865 	average.threads.extra_delta64 += t->extra_delta64;
866 
867 	/* sum per-core values only for 1st thread in core */
868 	if (!(t->flags & CPU_IS_FIRST_THREAD_IN_CORE))
869 		return 0;
870 
871 	average.cores.c3 += c->c3;
872 	average.cores.c6 += c->c6;
873 	average.cores.c7 += c->c7;
874 
875 	average.cores.core_temp_c = MAX(average.cores.core_temp_c, c->core_temp_c);
876 
877 	/* sum per-pkg values only for 1st core in pkg */
878 	if (!(t->flags & CPU_IS_FIRST_CORE_IN_PACKAGE))
879 		return 0;
880 
881 	if (do_skl_residency) {
882 		average.packages.pkg_wtd_core_c0 += p->pkg_wtd_core_c0;
883 		average.packages.pkg_any_core_c0 += p->pkg_any_core_c0;
884 		average.packages.pkg_any_gfxe_c0 += p->pkg_any_gfxe_c0;
885 		average.packages.pkg_both_core_gfxe_c0 += p->pkg_both_core_gfxe_c0;
886 	}
887 
888 	average.packages.pc2 += p->pc2;
889 	if (do_pc3)
890 		average.packages.pc3 += p->pc3;
891 	if (do_pc6)
892 		average.packages.pc6 += p->pc6;
893 	if (do_pc7)
894 		average.packages.pc7 += p->pc7;
895 	average.packages.pc8 += p->pc8;
896 	average.packages.pc9 += p->pc9;
897 	average.packages.pc10 += p->pc10;
898 
899 	average.packages.energy_pkg += p->energy_pkg;
900 	average.packages.energy_dram += p->energy_dram;
901 	average.packages.energy_cores += p->energy_cores;
902 	average.packages.energy_gfx += p->energy_gfx;
903 
904 	average.packages.pkg_temp_c = MAX(average.packages.pkg_temp_c, p->pkg_temp_c);
905 
906 	average.packages.rapl_pkg_perf_status += p->rapl_pkg_perf_status;
907 	average.packages.rapl_dram_perf_status += p->rapl_dram_perf_status;
908 	return 0;
909 }
910 /*
911  * sum the counters for all cpus in the system
912  * compute the weighted average
913  */
914 void compute_average(struct thread_data *t, struct core_data *c,
915 	struct pkg_data *p)
916 {
917 	clear_counters(&average.threads, &average.cores, &average.packages);
918 
919 	for_all_cpus(sum_counters, t, c, p);
920 
921 	average.threads.tsc /= topo.num_cpus;
922 	average.threads.aperf /= topo.num_cpus;
923 	average.threads.mperf /= topo.num_cpus;
924 	average.threads.c1 /= topo.num_cpus;
925 
926 	average.threads.extra_delta32 /= topo.num_cpus;
927 	average.threads.extra_delta32 &= 0xFFFFFFFF;
928 
929 	average.threads.extra_delta64 /= topo.num_cpus;
930 
931 	average.cores.c3 /= topo.num_cores;
932 	average.cores.c6 /= topo.num_cores;
933 	average.cores.c7 /= topo.num_cores;
934 
935 	if (do_skl_residency) {
936 		average.packages.pkg_wtd_core_c0 /= topo.num_packages;
937 		average.packages.pkg_any_core_c0 /= topo.num_packages;
938 		average.packages.pkg_any_gfxe_c0 /= topo.num_packages;
939 		average.packages.pkg_both_core_gfxe_c0 /= topo.num_packages;
940 	}
941 
942 	average.packages.pc2 /= topo.num_packages;
943 	if (do_pc3)
944 		average.packages.pc3 /= topo.num_packages;
945 	if (do_pc6)
946 		average.packages.pc6 /= topo.num_packages;
947 	if (do_pc7)
948 		average.packages.pc7 /= topo.num_packages;
949 
950 	average.packages.pc8 /= topo.num_packages;
951 	average.packages.pc9 /= topo.num_packages;
952 	average.packages.pc10 /= topo.num_packages;
953 }
954 
955 static unsigned long long rdtsc(void)
956 {
957 	unsigned int low, high;
958 
959 	asm volatile("rdtsc" : "=a" (low), "=d" (high));
960 
961 	return low | ((unsigned long long)high) << 32;
962 }
963 
964 
965 /*
966  * get_counters(...)
967  * migrate to cpu
968  * acquire and record local counters for that cpu
969  */
970 int get_counters(struct thread_data *t, struct core_data *c, struct pkg_data *p)
971 {
972 	int cpu = t->cpu_id;
973 	unsigned long long msr;
974 
975 	if (cpu_migrate(cpu)) {
976 		fprintf(stderr, "Could not migrate to CPU %d\n", cpu);
977 		return -1;
978 	}
979 
980 	t->tsc = rdtsc();	/* we are running on local CPU of interest */
981 
982 	if (has_aperf) {
983 		if (get_msr(cpu, MSR_IA32_APERF, &t->aperf))
984 			return -3;
985 		if (get_msr(cpu, MSR_IA32_MPERF, &t->mperf))
986 			return -4;
987 	}
988 
989 	if (do_smi) {
990 		if (get_msr(cpu, MSR_SMI_COUNT, &msr))
991 			return -5;
992 		t->smi_count = msr & 0xFFFFFFFF;
993 	}
994 	if (extra_delta_offset32) {
995 		if (get_msr(cpu, extra_delta_offset32, &msr))
996 			return -5;
997 		t->extra_delta32 = msr & 0xFFFFFFFF;
998 	}
999 
1000 	if (extra_delta_offset64)
1001 		if (get_msr(cpu, extra_delta_offset64, &t->extra_delta64))
1002 			return -5;
1003 
1004 	if (extra_msr_offset32) {
1005 		if (get_msr(cpu, extra_msr_offset32, &msr))
1006 			return -5;
1007 		t->extra_msr32 = msr & 0xFFFFFFFF;
1008 	}
1009 
1010 	if (extra_msr_offset64)
1011 		if (get_msr(cpu, extra_msr_offset64, &t->extra_msr64))
1012 			return -5;
1013 
1014 	if (use_c1_residency_msr) {
1015 		if (get_msr(cpu, MSR_CORE_C1_RES, &t->c1))
1016 			return -6;
1017 	}
1018 
1019 	/* collect core counters only for 1st thread in core */
1020 	if (!(t->flags & CPU_IS_FIRST_THREAD_IN_CORE))
1021 		return 0;
1022 
1023 	if (do_nhm_cstates && !do_slm_cstates && !do_knl_cstates) {
1024 		if (get_msr(cpu, MSR_CORE_C3_RESIDENCY, &c->c3))
1025 			return -6;
1026 	}
1027 
1028 	if (do_nhm_cstates && !do_knl_cstates) {
1029 		if (get_msr(cpu, MSR_CORE_C6_RESIDENCY, &c->c6))
1030 			return -7;
1031 	} else if (do_knl_cstates) {
1032 		if (get_msr(cpu, MSR_KNL_CORE_C6_RESIDENCY, &c->c6))
1033 			return -7;
1034 	}
1035 
1036 	if (do_snb_cstates)
1037 		if (get_msr(cpu, MSR_CORE_C7_RESIDENCY, &c->c7))
1038 			return -8;
1039 
1040 	if (do_dts) {
1041 		if (get_msr(cpu, MSR_IA32_THERM_STATUS, &msr))
1042 			return -9;
1043 		c->core_temp_c = tcc_activation_temp - ((msr >> 16) & 0x7F);
1044 	}
1045 
1046 
1047 	/* collect package counters only for 1st core in package */
1048 	if (!(t->flags & CPU_IS_FIRST_CORE_IN_PACKAGE))
1049 		return 0;
1050 
1051 	if (do_skl_residency) {
1052 		if (get_msr(cpu, MSR_PKG_WEIGHTED_CORE_C0_RES, &p->pkg_wtd_core_c0))
1053 			return -10;
1054 		if (get_msr(cpu, MSR_PKG_ANY_CORE_C0_RES, &p->pkg_any_core_c0))
1055 			return -11;
1056 		if (get_msr(cpu, MSR_PKG_ANY_GFXE_C0_RES, &p->pkg_any_gfxe_c0))
1057 			return -12;
1058 		if (get_msr(cpu, MSR_PKG_BOTH_CORE_GFXE_C0_RES, &p->pkg_both_core_gfxe_c0))
1059 			return -13;
1060 	}
1061 	if (do_pc3)
1062 		if (get_msr(cpu, MSR_PKG_C3_RESIDENCY, &p->pc3))
1063 			return -9;
1064 	if (do_pc6)
1065 		if (get_msr(cpu, MSR_PKG_C6_RESIDENCY, &p->pc6))
1066 			return -10;
1067 	if (do_pc2)
1068 		if (get_msr(cpu, MSR_PKG_C2_RESIDENCY, &p->pc2))
1069 			return -11;
1070 	if (do_pc7)
1071 		if (get_msr(cpu, MSR_PKG_C7_RESIDENCY, &p->pc7))
1072 			return -12;
1073 	if (do_c8_c9_c10) {
1074 		if (get_msr(cpu, MSR_PKG_C8_RESIDENCY, &p->pc8))
1075 			return -13;
1076 		if (get_msr(cpu, MSR_PKG_C9_RESIDENCY, &p->pc9))
1077 			return -13;
1078 		if (get_msr(cpu, MSR_PKG_C10_RESIDENCY, &p->pc10))
1079 			return -13;
1080 	}
1081 	if (do_rapl & RAPL_PKG) {
1082 		if (get_msr(cpu, MSR_PKG_ENERGY_STATUS, &msr))
1083 			return -13;
1084 		p->energy_pkg = msr & 0xFFFFFFFF;
1085 	}
1086 	if (do_rapl & RAPL_CORES) {
1087 		if (get_msr(cpu, MSR_PP0_ENERGY_STATUS, &msr))
1088 			return -14;
1089 		p->energy_cores = msr & 0xFFFFFFFF;
1090 	}
1091 	if (do_rapl & RAPL_DRAM) {
1092 		if (get_msr(cpu, MSR_DRAM_ENERGY_STATUS, &msr))
1093 			return -15;
1094 		p->energy_dram = msr & 0xFFFFFFFF;
1095 	}
1096 	if (do_rapl & RAPL_GFX) {
1097 		if (get_msr(cpu, MSR_PP1_ENERGY_STATUS, &msr))
1098 			return -16;
1099 		p->energy_gfx = msr & 0xFFFFFFFF;
1100 	}
1101 	if (do_rapl & RAPL_PKG_PERF_STATUS) {
1102 		if (get_msr(cpu, MSR_PKG_PERF_STATUS, &msr))
1103 			return -16;
1104 		p->rapl_pkg_perf_status = msr & 0xFFFFFFFF;
1105 	}
1106 	if (do_rapl & RAPL_DRAM_PERF_STATUS) {
1107 		if (get_msr(cpu, MSR_DRAM_PERF_STATUS, &msr))
1108 			return -16;
1109 		p->rapl_dram_perf_status = msr & 0xFFFFFFFF;
1110 	}
1111 	if (do_ptm) {
1112 		if (get_msr(cpu, MSR_IA32_PACKAGE_THERM_STATUS, &msr))
1113 			return -17;
1114 		p->pkg_temp_c = tcc_activation_temp - ((msr >> 16) & 0x7F);
1115 	}
1116 	return 0;
1117 }
1118 
1119 /*
1120  * MSR_PKG_CST_CONFIG_CONTROL decoding for pkg_cstate_limit:
1121  * If you change the values, note they are used both in comparisons
1122  * (>= PCL__7) and to index pkg_cstate_limit_strings[].
1123  */
1124 
1125 #define PCLUKN 0 /* Unknown */
1126 #define PCLRSV 1 /* Reserved */
1127 #define PCL__0 2 /* PC0 */
1128 #define PCL__1 3 /* PC1 */
1129 #define PCL__2 4 /* PC2 */
1130 #define PCL__3 5 /* PC3 */
1131 #define PCL__4 6 /* PC4 */
1132 #define PCL__6 7 /* PC6 */
1133 #define PCL_6N 8 /* PC6 No Retention */
1134 #define PCL_6R 9 /* PC6 Retention */
1135 #define PCL__7 10 /* PC7 */
1136 #define PCL_7S 11 /* PC7 Shrink */
1137 #define PCL__8 12 /* PC8 */
1138 #define PCL__9 13 /* PC9 */
1139 #define PCLUNL 14 /* Unlimited */
1140 
1141 int pkg_cstate_limit = PCLUKN;
1142 char *pkg_cstate_limit_strings[] = { "reserved", "unknown", "pc0", "pc1", "pc2",
1143 	"pc3", "pc4", "pc6", "pc6n", "pc6r", "pc7", "pc7s", "pc8", "pc9", "unlimited"};
1144 
1145 int nhm_pkg_cstate_limits[16] = {PCL__0, PCL__1, PCL__3, PCL__6, PCL__7, PCLRSV, PCLRSV, PCLUNL, PCLRSV, PCLRSV, PCLRSV, PCLRSV, PCLRSV, PCLRSV, PCLRSV, PCLRSV};
1146 int snb_pkg_cstate_limits[16] = {PCL__0, PCL__2, PCL_6N, PCL_6R, PCL__7, PCL_7S, PCLRSV, PCLUNL, PCLRSV, PCLRSV, PCLRSV, PCLRSV, PCLRSV, PCLRSV, PCLRSV, PCLRSV};
1147 int hsw_pkg_cstate_limits[16] = {PCL__0, PCL__2, PCL__3, PCL__6, PCL__7, PCL_7S, PCL__8, PCL__9, PCLUNL, PCLRSV, PCLRSV, PCLRSV, PCLRSV, PCLRSV, PCLRSV, PCLRSV};
1148 int slv_pkg_cstate_limits[16] = {PCL__0, PCL__1, PCLRSV, PCLRSV, PCL__4, PCLRSV, PCL__6, PCL__7, PCLRSV, PCLRSV, PCLRSV, PCLRSV, PCLRSV, PCLRSV, PCLRSV, PCLRSV};
1149 int amt_pkg_cstate_limits[16] = {PCL__0, PCL__1, PCL__2, PCLRSV, PCLRSV, PCLRSV, PCL__6, PCL__7, PCLRSV, PCLRSV, PCLRSV, PCLRSV, PCLRSV, PCLRSV, PCLRSV, PCLRSV};
1150 int phi_pkg_cstate_limits[16] = {PCL__0, PCL__2, PCL_6N, PCL_6R, PCLRSV, PCLRSV, PCLRSV, PCLUNL, PCLRSV, PCLRSV, PCLRSV, PCLRSV, PCLRSV, PCLRSV, PCLRSV, PCLRSV};
1151 
1152 static void
1153 dump_nhm_platform_info(void)
1154 {
1155 	unsigned long long msr;
1156 	unsigned int ratio;
1157 
1158 	get_msr(base_cpu, MSR_NHM_PLATFORM_INFO, &msr);
1159 
1160 	fprintf(stderr, "cpu%d: MSR_NHM_PLATFORM_INFO: 0x%08llx\n", base_cpu, msr);
1161 
1162 	ratio = (msr >> 40) & 0xFF;
1163 	fprintf(stderr, "%d * %.0f = %.0f MHz max efficiency frequency\n",
1164 		ratio, bclk, ratio * bclk);
1165 
1166 	ratio = (msr >> 8) & 0xFF;
1167 	fprintf(stderr, "%d * %.0f = %.0f MHz base frequency\n",
1168 		ratio, bclk, ratio * bclk);
1169 
1170 	get_msr(base_cpu, MSR_IA32_POWER_CTL, &msr);
1171 	fprintf(stderr, "cpu%d: MSR_IA32_POWER_CTL: 0x%08llx (C1E auto-promotion: %sabled)\n",
1172 		base_cpu, msr, msr & 0x2 ? "EN" : "DIS");
1173 
1174 	return;
1175 }
1176 
1177 static void
1178 dump_hsw_turbo_ratio_limits(void)
1179 {
1180 	unsigned long long msr;
1181 	unsigned int ratio;
1182 
1183 	get_msr(base_cpu, MSR_TURBO_RATIO_LIMIT2, &msr);
1184 
1185 	fprintf(stderr, "cpu%d: MSR_TURBO_RATIO_LIMIT2: 0x%08llx\n", base_cpu, msr);
1186 
1187 	ratio = (msr >> 8) & 0xFF;
1188 	if (ratio)
1189 		fprintf(stderr, "%d * %.0f = %.0f MHz max turbo 18 active cores\n",
1190 			ratio, bclk, ratio * bclk);
1191 
1192 	ratio = (msr >> 0) & 0xFF;
1193 	if (ratio)
1194 		fprintf(stderr, "%d * %.0f = %.0f MHz max turbo 17 active cores\n",
1195 			ratio, bclk, ratio * bclk);
1196 	return;
1197 }
1198 
1199 static void
1200 dump_ivt_turbo_ratio_limits(void)
1201 {
1202 	unsigned long long msr;
1203 	unsigned int ratio;
1204 
1205 	get_msr(base_cpu, MSR_TURBO_RATIO_LIMIT1, &msr);
1206 
1207 	fprintf(stderr, "cpu%d: MSR_TURBO_RATIO_LIMIT1: 0x%08llx\n", base_cpu, msr);
1208 
1209 	ratio = (msr >> 56) & 0xFF;
1210 	if (ratio)
1211 		fprintf(stderr, "%d * %.0f = %.0f MHz max turbo 16 active cores\n",
1212 			ratio, bclk, ratio * bclk);
1213 
1214 	ratio = (msr >> 48) & 0xFF;
1215 	if (ratio)
1216 		fprintf(stderr, "%d * %.0f = %.0f MHz max turbo 15 active cores\n",
1217 			ratio, bclk, ratio * bclk);
1218 
1219 	ratio = (msr >> 40) & 0xFF;
1220 	if (ratio)
1221 		fprintf(stderr, "%d * %.0f = %.0f MHz max turbo 14 active cores\n",
1222 			ratio, bclk, ratio * bclk);
1223 
1224 	ratio = (msr >> 32) & 0xFF;
1225 	if (ratio)
1226 		fprintf(stderr, "%d * %.0f = %.0f MHz max turbo 13 active cores\n",
1227 			ratio, bclk, ratio * bclk);
1228 
1229 	ratio = (msr >> 24) & 0xFF;
1230 	if (ratio)
1231 		fprintf(stderr, "%d * %.0f = %.0f MHz max turbo 12 active cores\n",
1232 			ratio, bclk, ratio * bclk);
1233 
1234 	ratio = (msr >> 16) & 0xFF;
1235 	if (ratio)
1236 		fprintf(stderr, "%d * %.0f = %.0f MHz max turbo 11 active cores\n",
1237 			ratio, bclk, ratio * bclk);
1238 
1239 	ratio = (msr >> 8) & 0xFF;
1240 	if (ratio)
1241 		fprintf(stderr, "%d * %.0f = %.0f MHz max turbo 10 active cores\n",
1242 			ratio, bclk, ratio * bclk);
1243 
1244 	ratio = (msr >> 0) & 0xFF;
1245 	if (ratio)
1246 		fprintf(stderr, "%d * %.0f = %.0f MHz max turbo 9 active cores\n",
1247 			ratio, bclk, ratio * bclk);
1248 	return;
1249 }
1250 
1251 static void
1252 dump_nhm_turbo_ratio_limits(void)
1253 {
1254 	unsigned long long msr;
1255 	unsigned int ratio;
1256 
1257 	get_msr(base_cpu, MSR_TURBO_RATIO_LIMIT, &msr);
1258 
1259 	fprintf(stderr, "cpu%d: MSR_TURBO_RATIO_LIMIT: 0x%08llx\n", base_cpu, msr);
1260 
1261 	ratio = (msr >> 56) & 0xFF;
1262 	if (ratio)
1263 		fprintf(stderr, "%d * %.0f = %.0f MHz max turbo 8 active cores\n",
1264 			ratio, bclk, ratio * bclk);
1265 
1266 	ratio = (msr >> 48) & 0xFF;
1267 	if (ratio)
1268 		fprintf(stderr, "%d * %.0f = %.0f MHz max turbo 7 active cores\n",
1269 			ratio, bclk, ratio * bclk);
1270 
1271 	ratio = (msr >> 40) & 0xFF;
1272 	if (ratio)
1273 		fprintf(stderr, "%d * %.0f = %.0f MHz max turbo 6 active cores\n",
1274 			ratio, bclk, ratio * bclk);
1275 
1276 	ratio = (msr >> 32) & 0xFF;
1277 	if (ratio)
1278 		fprintf(stderr, "%d * %.0f = %.0f MHz max turbo 5 active cores\n",
1279 			ratio, bclk, ratio * bclk);
1280 
1281 	ratio = (msr >> 24) & 0xFF;
1282 	if (ratio)
1283 		fprintf(stderr, "%d * %.0f = %.0f MHz max turbo 4 active cores\n",
1284 			ratio, bclk, ratio * bclk);
1285 
1286 	ratio = (msr >> 16) & 0xFF;
1287 	if (ratio)
1288 		fprintf(stderr, "%d * %.0f = %.0f MHz max turbo 3 active cores\n",
1289 			ratio, bclk, ratio * bclk);
1290 
1291 	ratio = (msr >> 8) & 0xFF;
1292 	if (ratio)
1293 		fprintf(stderr, "%d * %.0f = %.0f MHz max turbo 2 active cores\n",
1294 			ratio, bclk, ratio * bclk);
1295 
1296 	ratio = (msr >> 0) & 0xFF;
1297 	if (ratio)
1298 		fprintf(stderr, "%d * %.0f = %.0f MHz max turbo 1 active cores\n",
1299 			ratio, bclk, ratio * bclk);
1300 	return;
1301 }
1302 
1303 static void
1304 dump_knl_turbo_ratio_limits(void)
1305 {
1306 	int cores;
1307 	unsigned int ratio;
1308 	unsigned long long msr;
1309 	int delta_cores;
1310 	int delta_ratio;
1311 	int i;
1312 
1313 	get_msr(base_cpu, MSR_NHM_TURBO_RATIO_LIMIT, &msr);
1314 
1315 	fprintf(stderr, "cpu%d: MSR_NHM_TURBO_RATIO_LIMIT: 0x%08llx\n",
1316 		base_cpu, msr);
1317 
1318 	/**
1319 	 * Turbo encoding in KNL is as follows:
1320 	 * [7:0] -- Base value of number of active cores of bucket 1.
1321 	 * [15:8] -- Base value of freq ratio of bucket 1.
1322 	 * [20:16] -- +ve delta of number of active cores of bucket 2.
1323 	 * i.e. active cores of bucket 2 =
1324 	 * active cores of bucket 1 + delta
1325 	 * [23:21] -- Negative delta of freq ratio of bucket 2.
1326 	 * i.e. freq ratio of bucket 2 =
1327 	 * freq ratio of bucket 1 - delta
1328 	 * [28:24]-- +ve delta of number of active cores of bucket 3.
1329 	 * [31:29]-- -ve delta of freq ratio of bucket 3.
1330 	 * [36:32]-- +ve delta of number of active cores of bucket 4.
1331 	 * [39:37]-- -ve delta of freq ratio of bucket 4.
1332 	 * [44:40]-- +ve delta of number of active cores of bucket 5.
1333 	 * [47:45]-- -ve delta of freq ratio of bucket 5.
1334 	 * [52:48]-- +ve delta of number of active cores of bucket 6.
1335 	 * [55:53]-- -ve delta of freq ratio of bucket 6.
1336 	 * [60:56]-- +ve delta of number of active cores of bucket 7.
1337 	 * [63:61]-- -ve delta of freq ratio of bucket 7.
1338 	 */
1339 	cores = msr & 0xFF;
1340 	ratio = (msr >> 8) && 0xFF;
1341 	if (ratio > 0)
1342 		fprintf(stderr,
1343 			"%d * %.0f = %.0f MHz max turbo %d active cores\n",
1344 			ratio, bclk, ratio * bclk, cores);
1345 
1346 	for (i = 16; i < 64; i = i + 8) {
1347 		delta_cores = (msr >> i) & 0x1F;
1348 		delta_ratio = (msr >> (i + 5)) && 0x7;
1349 		if (!delta_cores || !delta_ratio)
1350 			return;
1351 		cores = cores + delta_cores;
1352 		ratio = ratio - delta_ratio;
1353 
1354 		/** -ve ratios will make successive ratio calculations
1355 		 * negative. Hence return instead of carrying on.
1356 		 */
1357 		if (ratio > 0)
1358 			fprintf(stderr,
1359 				"%d * %.0f = %.0f MHz max turbo %d active cores\n",
1360 				ratio, bclk, ratio * bclk, cores);
1361 	}
1362 }
1363 
1364 static void
1365 dump_nhm_cst_cfg(void)
1366 {
1367 	unsigned long long msr;
1368 
1369 	get_msr(base_cpu, MSR_NHM_SNB_PKG_CST_CFG_CTL, &msr);
1370 
1371 #define SNB_C1_AUTO_UNDEMOTE              (1UL << 27)
1372 #define SNB_C3_AUTO_UNDEMOTE              (1UL << 28)
1373 
1374 	fprintf(stderr, "cpu%d: MSR_NHM_SNB_PKG_CST_CFG_CTL: 0x%08llx", base_cpu, msr);
1375 
1376 	fprintf(stderr, " (%s%s%s%s%slocked: pkg-cstate-limit=%d: %s)\n",
1377 		(msr & SNB_C3_AUTO_UNDEMOTE) ? "UNdemote-C3, " : "",
1378 		(msr & SNB_C1_AUTO_UNDEMOTE) ? "UNdemote-C1, " : "",
1379 		(msr & NHM_C3_AUTO_DEMOTE) ? "demote-C3, " : "",
1380 		(msr & NHM_C1_AUTO_DEMOTE) ? "demote-C1, " : "",
1381 		(msr & (1 << 15)) ? "" : "UN",
1382 		(unsigned int)msr & 7,
1383 		pkg_cstate_limit_strings[pkg_cstate_limit]);
1384 	return;
1385 }
1386 
1387 static void
1388 dump_config_tdp(void)
1389 {
1390 	unsigned long long msr;
1391 
1392 	get_msr(base_cpu, MSR_CONFIG_TDP_NOMINAL, &msr);
1393 	fprintf(stderr, "cpu%d: MSR_CONFIG_TDP_NOMINAL: 0x%08llx", base_cpu, msr);
1394 	fprintf(stderr, " (base_ratio=%d)\n", (unsigned int)msr & 0xEF);
1395 
1396 	get_msr(base_cpu, MSR_CONFIG_TDP_LEVEL_1, &msr);
1397 	fprintf(stderr, "cpu%d: MSR_CONFIG_TDP_LEVEL_1: 0x%08llx (", base_cpu, msr);
1398 	if (msr) {
1399 		fprintf(stderr, "PKG_MIN_PWR_LVL1=%d ", (unsigned int)(msr >> 48) & 0xEFFF);
1400 		fprintf(stderr, "PKG_MAX_PWR_LVL1=%d ", (unsigned int)(msr >> 32) & 0xEFFF);
1401 		fprintf(stderr, "LVL1_RATIO=%d ", (unsigned int)(msr >> 16) & 0xEF);
1402 		fprintf(stderr, "PKG_TDP_LVL1=%d", (unsigned int)(msr) & 0xEFFF);
1403 	}
1404 	fprintf(stderr, ")\n");
1405 
1406 	get_msr(base_cpu, MSR_CONFIG_TDP_LEVEL_2, &msr);
1407 	fprintf(stderr, "cpu%d: MSR_CONFIG_TDP_LEVEL_2: 0x%08llx (", base_cpu, msr);
1408 	if (msr) {
1409 		fprintf(stderr, "PKG_MIN_PWR_LVL2=%d ", (unsigned int)(msr >> 48) & 0xEFFF);
1410 		fprintf(stderr, "PKG_MAX_PWR_LVL2=%d ", (unsigned int)(msr >> 32) & 0xEFFF);
1411 		fprintf(stderr, "LVL2_RATIO=%d ", (unsigned int)(msr >> 16) & 0xEF);
1412 		fprintf(stderr, "PKG_TDP_LVL2=%d", (unsigned int)(msr) & 0xEFFF);
1413 	}
1414 	fprintf(stderr, ")\n");
1415 
1416 	get_msr(base_cpu, MSR_CONFIG_TDP_CONTROL, &msr);
1417 	fprintf(stderr, "cpu%d: MSR_CONFIG_TDP_CONTROL: 0x%08llx (", base_cpu, msr);
1418 	if ((msr) & 0x3)
1419 		fprintf(stderr, "TDP_LEVEL=%d ", (unsigned int)(msr) & 0x3);
1420 	fprintf(stderr, " lock=%d", (unsigned int)(msr >> 31) & 1);
1421 	fprintf(stderr, ")\n");
1422 
1423 	get_msr(base_cpu, MSR_TURBO_ACTIVATION_RATIO, &msr);
1424 	fprintf(stderr, "cpu%d: MSR_TURBO_ACTIVATION_RATIO: 0x%08llx (", base_cpu, msr);
1425 	fprintf(stderr, "MAX_NON_TURBO_RATIO=%d", (unsigned int)(msr) & 0xEF);
1426 	fprintf(stderr, " lock=%d", (unsigned int)(msr >> 31) & 1);
1427 	fprintf(stderr, ")\n");
1428 }
1429 
1430 void free_all_buffers(void)
1431 {
1432 	CPU_FREE(cpu_present_set);
1433 	cpu_present_set = NULL;
1434 	cpu_present_set = 0;
1435 
1436 	CPU_FREE(cpu_affinity_set);
1437 	cpu_affinity_set = NULL;
1438 	cpu_affinity_setsize = 0;
1439 
1440 	free(thread_even);
1441 	free(core_even);
1442 	free(package_even);
1443 
1444 	thread_even = NULL;
1445 	core_even = NULL;
1446 	package_even = NULL;
1447 
1448 	free(thread_odd);
1449 	free(core_odd);
1450 	free(package_odd);
1451 
1452 	thread_odd = NULL;
1453 	core_odd = NULL;
1454 	package_odd = NULL;
1455 
1456 	free(output_buffer);
1457 	output_buffer = NULL;
1458 	outp = NULL;
1459 }
1460 
1461 /*
1462  * Open a file, and exit on failure
1463  */
1464 FILE *fopen_or_die(const char *path, const char *mode)
1465 {
1466 	FILE *filep = fopen(path, "r");
1467 	if (!filep)
1468 		err(1, "%s: open failed", path);
1469 	return filep;
1470 }
1471 
1472 /*
1473  * Parse a file containing a single int.
1474  */
1475 int parse_int_file(const char *fmt, ...)
1476 {
1477 	va_list args;
1478 	char path[PATH_MAX];
1479 	FILE *filep;
1480 	int value;
1481 
1482 	va_start(args, fmt);
1483 	vsnprintf(path, sizeof(path), fmt, args);
1484 	va_end(args);
1485 	filep = fopen_or_die(path, "r");
1486 	if (fscanf(filep, "%d", &value) != 1)
1487 		err(1, "%s: failed to parse number from file", path);
1488 	fclose(filep);
1489 	return value;
1490 }
1491 
1492 /*
1493  * get_cpu_position_in_core(cpu)
1494  * return the position of the CPU among its HT siblings in the core
1495  * return -1 if the sibling is not in list
1496  */
1497 int get_cpu_position_in_core(int cpu)
1498 {
1499 	char path[64];
1500 	FILE *filep;
1501 	int this_cpu;
1502 	char character;
1503 	int i;
1504 
1505 	sprintf(path,
1506 		"/sys/devices/system/cpu/cpu%d/topology/thread_siblings_list",
1507 		cpu);
1508 	filep = fopen(path, "r");
1509 	if (filep == NULL) {
1510 		perror(path);
1511 		exit(1);
1512 	}
1513 
1514 	for (i = 0; i < topo.num_threads_per_core; i++) {
1515 		fscanf(filep, "%d", &this_cpu);
1516 		if (this_cpu == cpu) {
1517 			fclose(filep);
1518 			return i;
1519 		}
1520 
1521 		/* Account for no separator after last thread*/
1522 		if (i != (topo.num_threads_per_core - 1))
1523 			fscanf(filep, "%c", &character);
1524 	}
1525 
1526 	fclose(filep);
1527 	return -1;
1528 }
1529 
1530 /*
1531  * cpu_is_first_core_in_package(cpu)
1532  * return 1 if given CPU is 1st core in package
1533  */
1534 int cpu_is_first_core_in_package(int cpu)
1535 {
1536 	return cpu == parse_int_file("/sys/devices/system/cpu/cpu%d/topology/core_siblings_list", cpu);
1537 }
1538 
1539 int get_physical_package_id(int cpu)
1540 {
1541 	return parse_int_file("/sys/devices/system/cpu/cpu%d/topology/physical_package_id", cpu);
1542 }
1543 
1544 int get_core_id(int cpu)
1545 {
1546 	return parse_int_file("/sys/devices/system/cpu/cpu%d/topology/core_id", cpu);
1547 }
1548 
1549 int get_num_ht_siblings(int cpu)
1550 {
1551 	char path[80];
1552 	FILE *filep;
1553 	int sib1;
1554 	int matches = 0;
1555 	char character;
1556 	char str[100];
1557 	char *ch;
1558 
1559 	sprintf(path, "/sys/devices/system/cpu/cpu%d/topology/thread_siblings_list", cpu);
1560 	filep = fopen_or_die(path, "r");
1561 
1562 	/*
1563 	 * file format:
1564 	 * A ',' separated or '-' separated set of numbers
1565 	 * (eg 1-2 or 1,3,4,5)
1566 	 */
1567 	fscanf(filep, "%d%c\n", &sib1, &character);
1568 	fseek(filep, 0, SEEK_SET);
1569 	fgets(str, 100, filep);
1570 	ch = strchr(str, character);
1571 	while (ch != NULL) {
1572 		matches++;
1573 		ch = strchr(ch+1, character);
1574 	}
1575 
1576 	fclose(filep);
1577 	return matches+1;
1578 }
1579 
1580 /*
1581  * run func(thread, core, package) in topology order
1582  * skip non-present cpus
1583  */
1584 
1585 int for_all_cpus_2(int (func)(struct thread_data *, struct core_data *,
1586 	struct pkg_data *, struct thread_data *, struct core_data *,
1587 	struct pkg_data *), struct thread_data *thread_base,
1588 	struct core_data *core_base, struct pkg_data *pkg_base,
1589 	struct thread_data *thread_base2, struct core_data *core_base2,
1590 	struct pkg_data *pkg_base2)
1591 {
1592 	int retval, pkg_no, core_no, thread_no;
1593 
1594 	for (pkg_no = 0; pkg_no < topo.num_packages; ++pkg_no) {
1595 		for (core_no = 0; core_no < topo.num_cores_per_pkg; ++core_no) {
1596 			for (thread_no = 0; thread_no <
1597 				topo.num_threads_per_core; ++thread_no) {
1598 				struct thread_data *t, *t2;
1599 				struct core_data *c, *c2;
1600 				struct pkg_data *p, *p2;
1601 
1602 				t = GET_THREAD(thread_base, thread_no, core_no, pkg_no);
1603 
1604 				if (cpu_is_not_present(t->cpu_id))
1605 					continue;
1606 
1607 				t2 = GET_THREAD(thread_base2, thread_no, core_no, pkg_no);
1608 
1609 				c = GET_CORE(core_base, core_no, pkg_no);
1610 				c2 = GET_CORE(core_base2, core_no, pkg_no);
1611 
1612 				p = GET_PKG(pkg_base, pkg_no);
1613 				p2 = GET_PKG(pkg_base2, pkg_no);
1614 
1615 				retval = func(t, c, p, t2, c2, p2);
1616 				if (retval)
1617 					return retval;
1618 			}
1619 		}
1620 	}
1621 	return 0;
1622 }
1623 
1624 /*
1625  * run func(cpu) on every cpu in /proc/stat
1626  * return max_cpu number
1627  */
1628 int for_all_proc_cpus(int (func)(int))
1629 {
1630 	FILE *fp;
1631 	int cpu_num;
1632 	int retval;
1633 
1634 	fp = fopen_or_die(proc_stat, "r");
1635 
1636 	retval = fscanf(fp, "cpu %*d %*d %*d %*d %*d %*d %*d %*d %*d %*d\n");
1637 	if (retval != 0)
1638 		err(1, "%s: failed to parse format", proc_stat);
1639 
1640 	while (1) {
1641 		retval = fscanf(fp, "cpu%u %*d %*d %*d %*d %*d %*d %*d %*d %*d %*d\n", &cpu_num);
1642 		if (retval != 1)
1643 			break;
1644 
1645 		retval = func(cpu_num);
1646 		if (retval) {
1647 			fclose(fp);
1648 			return(retval);
1649 		}
1650 	}
1651 	fclose(fp);
1652 	return 0;
1653 }
1654 
1655 void re_initialize(void)
1656 {
1657 	free_all_buffers();
1658 	setup_all_buffers();
1659 	printf("turbostat: re-initialized with num_cpus %d\n", topo.num_cpus);
1660 }
1661 
1662 
1663 /*
1664  * count_cpus()
1665  * remember the last one seen, it will be the max
1666  */
1667 int count_cpus(int cpu)
1668 {
1669 	if (topo.max_cpu_num < cpu)
1670 		topo.max_cpu_num = cpu;
1671 
1672 	topo.num_cpus += 1;
1673 	return 0;
1674 }
1675 int mark_cpu_present(int cpu)
1676 {
1677 	CPU_SET_S(cpu, cpu_present_setsize, cpu_present_set);
1678 	return 0;
1679 }
1680 
1681 void turbostat_loop()
1682 {
1683 	int retval;
1684 	int restarted = 0;
1685 
1686 restart:
1687 	restarted++;
1688 
1689 	retval = for_all_cpus(get_counters, EVEN_COUNTERS);
1690 	if (retval < -1) {
1691 		exit(retval);
1692 	} else if (retval == -1) {
1693 		if (restarted > 1) {
1694 			exit(retval);
1695 		}
1696 		re_initialize();
1697 		goto restart;
1698 	}
1699 	restarted = 0;
1700 	gettimeofday(&tv_even, (struct timezone *)NULL);
1701 
1702 	while (1) {
1703 		if (for_all_proc_cpus(cpu_is_not_present)) {
1704 			re_initialize();
1705 			goto restart;
1706 		}
1707 		sleep(interval_sec);
1708 		retval = for_all_cpus(get_counters, ODD_COUNTERS);
1709 		if (retval < -1) {
1710 			exit(retval);
1711 		} else if (retval == -1) {
1712 			re_initialize();
1713 			goto restart;
1714 		}
1715 		gettimeofday(&tv_odd, (struct timezone *)NULL);
1716 		timersub(&tv_odd, &tv_even, &tv_delta);
1717 		for_all_cpus_2(delta_cpu, ODD_COUNTERS, EVEN_COUNTERS);
1718 		compute_average(EVEN_COUNTERS);
1719 		format_all_counters(EVEN_COUNTERS);
1720 		flush_stdout();
1721 		sleep(interval_sec);
1722 		retval = for_all_cpus(get_counters, EVEN_COUNTERS);
1723 		if (retval < -1) {
1724 			exit(retval);
1725 		} else if (retval == -1) {
1726 			re_initialize();
1727 			goto restart;
1728 		}
1729 		gettimeofday(&tv_even, (struct timezone *)NULL);
1730 		timersub(&tv_even, &tv_odd, &tv_delta);
1731 		for_all_cpus_2(delta_cpu, EVEN_COUNTERS, ODD_COUNTERS);
1732 		compute_average(ODD_COUNTERS);
1733 		format_all_counters(ODD_COUNTERS);
1734 		flush_stdout();
1735 	}
1736 }
1737 
1738 void check_dev_msr()
1739 {
1740 	struct stat sb;
1741 	char pathname[32];
1742 
1743 	sprintf(pathname, "/dev/cpu/%d/msr", base_cpu);
1744 	if (stat(pathname, &sb))
1745  		if (system("/sbin/modprobe msr > /dev/null 2>&1"))
1746 			err(-5, "no /dev/cpu/0/msr, Try \"# modprobe msr\" ");
1747 }
1748 
1749 void check_permissions()
1750 {
1751 	struct __user_cap_header_struct cap_header_data;
1752 	cap_user_header_t cap_header = &cap_header_data;
1753 	struct __user_cap_data_struct cap_data_data;
1754 	cap_user_data_t cap_data = &cap_data_data;
1755 	extern int capget(cap_user_header_t hdrp, cap_user_data_t datap);
1756 	int do_exit = 0;
1757 	char pathname[32];
1758 
1759 	/* check for CAP_SYS_RAWIO */
1760 	cap_header->pid = getpid();
1761 	cap_header->version = _LINUX_CAPABILITY_VERSION;
1762 	if (capget(cap_header, cap_data) < 0)
1763 		err(-6, "capget(2) failed");
1764 
1765 	if ((cap_data->effective & (1 << CAP_SYS_RAWIO)) == 0) {
1766 		do_exit++;
1767 		warnx("capget(CAP_SYS_RAWIO) failed,"
1768 			" try \"# setcap cap_sys_rawio=ep %s\"", progname);
1769 	}
1770 
1771 	/* test file permissions */
1772 	sprintf(pathname, "/dev/cpu/%d/msr", base_cpu);
1773 	if (euidaccess(pathname, R_OK)) {
1774 		do_exit++;
1775 		warn("/dev/cpu/0/msr open failed, try chown or chmod +r /dev/cpu/*/msr");
1776 	}
1777 
1778 	/* if all else fails, thell them to be root */
1779 	if (do_exit)
1780 		if (getuid() != 0)
1781 			warnx("... or simply run as root");
1782 
1783 	if (do_exit)
1784 		exit(-6);
1785 }
1786 
1787 /*
1788  * NHM adds support for additional MSRs:
1789  *
1790  * MSR_SMI_COUNT                   0x00000034
1791  *
1792  * MSR_NHM_PLATFORM_INFO           0x000000ce
1793  * MSR_NHM_SNB_PKG_CST_CFG_CTL     0x000000e2
1794  *
1795  * MSR_PKG_C3_RESIDENCY            0x000003f8
1796  * MSR_PKG_C6_RESIDENCY            0x000003f9
1797  * MSR_CORE_C3_RESIDENCY           0x000003fc
1798  * MSR_CORE_C6_RESIDENCY           0x000003fd
1799  *
1800  * Side effect:
1801  * sets global pkg_cstate_limit to decode MSR_NHM_SNB_PKG_CST_CFG_CTL
1802  */
1803 int probe_nhm_msrs(unsigned int family, unsigned int model)
1804 {
1805 	unsigned long long msr;
1806 	int *pkg_cstate_limits;
1807 
1808 	if (!genuine_intel)
1809 		return 0;
1810 
1811 	if (family != 6)
1812 		return 0;
1813 
1814 	switch (model) {
1815 	case 0x1A:	/* Core i7, Xeon 5500 series - Bloomfield, Gainstown NHM-EP */
1816 	case 0x1E:	/* Core i7 and i5 Processor - Clarksfield, Lynnfield, Jasper Forest */
1817 	case 0x1F:	/* Core i7 and i5 Processor - Nehalem */
1818 	case 0x25:	/* Westmere Client - Clarkdale, Arrandale */
1819 	case 0x2C:	/* Westmere EP - Gulftown */
1820 	case 0x2E:	/* Nehalem-EX Xeon - Beckton */
1821 	case 0x2F:	/* Westmere-EX Xeon - Eagleton */
1822 		pkg_cstate_limits = nhm_pkg_cstate_limits;
1823 		break;
1824 	case 0x2A:	/* SNB */
1825 	case 0x2D:	/* SNB Xeon */
1826 	case 0x3A:	/* IVB */
1827 	case 0x3E:	/* IVB Xeon */
1828 		pkg_cstate_limits = snb_pkg_cstate_limits;
1829 		break;
1830 	case 0x3C:	/* HSW */
1831 	case 0x3F:	/* HSX */
1832 	case 0x45:	/* HSW */
1833 	case 0x46:	/* HSW */
1834 	case 0x3D:	/* BDW */
1835 	case 0x47:	/* BDW */
1836 	case 0x4F:	/* BDX */
1837 	case 0x56:	/* BDX-DE */
1838 	case 0x4E:	/* SKL */
1839 	case 0x5E:	/* SKL */
1840 		pkg_cstate_limits = hsw_pkg_cstate_limits;
1841 		break;
1842 	case 0x37:	/* BYT */
1843 	case 0x4D:	/* AVN */
1844 		pkg_cstate_limits = slv_pkg_cstate_limits;
1845 		break;
1846 	case 0x4C:	/* AMT */
1847 		pkg_cstate_limits = amt_pkg_cstate_limits;
1848 		break;
1849 	case 0x57:	/* PHI */
1850 		pkg_cstate_limits = phi_pkg_cstate_limits;
1851 		break;
1852 	default:
1853 		return 0;
1854 	}
1855 	get_msr(base_cpu, MSR_NHM_SNB_PKG_CST_CFG_CTL, &msr);
1856 
1857 	pkg_cstate_limit = pkg_cstate_limits[msr & 0xF];
1858 
1859 	return 1;
1860 }
1861 int has_nhm_turbo_ratio_limit(unsigned int family, unsigned int model)
1862 {
1863 	switch (model) {
1864 	/* Nehalem compatible, but do not include turbo-ratio limit support */
1865 	case 0x2E:	/* Nehalem-EX Xeon - Beckton */
1866 	case 0x2F:	/* Westmere-EX Xeon - Eagleton */
1867 		return 0;
1868 	default:
1869 		return 1;
1870 	}
1871 }
1872 int has_ivt_turbo_ratio_limit(unsigned int family, unsigned int model)
1873 {
1874 	if (!genuine_intel)
1875 		return 0;
1876 
1877 	if (family != 6)
1878 		return 0;
1879 
1880 	switch (model) {
1881 	case 0x3E:	/* IVB Xeon */
1882 	case 0x3F:	/* HSW Xeon */
1883 		return 1;
1884 	default:
1885 		return 0;
1886 	}
1887 }
1888 int has_hsw_turbo_ratio_limit(unsigned int family, unsigned int model)
1889 {
1890 	if (!genuine_intel)
1891 		return 0;
1892 
1893 	if (family != 6)
1894 		return 0;
1895 
1896 	switch (model) {
1897 	case 0x3F:	/* HSW Xeon */
1898 		return 1;
1899 	default:
1900 		return 0;
1901 	}
1902 }
1903 
1904 int has_knl_turbo_ratio_limit(unsigned int family, unsigned int model)
1905 {
1906 	if (!genuine_intel)
1907 		return 0;
1908 
1909 	if (family != 6)
1910 		return 0;
1911 
1912 	switch (model) {
1913 	case 0x57:	/* Knights Landing */
1914 		return 1;
1915 	default:
1916 		return 0;
1917 	}
1918 }
1919 int has_config_tdp(unsigned int family, unsigned int model)
1920 {
1921 	if (!genuine_intel)
1922 		return 0;
1923 
1924 	if (family != 6)
1925 		return 0;
1926 
1927 	switch (model) {
1928 	case 0x3A:	/* IVB */
1929 	case 0x3E:	/* IVB Xeon */
1930 
1931 	case 0x3C:	/* HSW */
1932 	case 0x3F:	/* HSX */
1933 	case 0x45:	/* HSW */
1934 	case 0x46:	/* HSW */
1935 	case 0x3D:	/* BDW */
1936 	case 0x47:	/* BDW */
1937 	case 0x4F:	/* BDX */
1938 	case 0x56:	/* BDX-DE */
1939 	case 0x4E:	/* SKL */
1940 	case 0x5E:	/* SKL */
1941 
1942 	case 0x57:	/* Knights Landing */
1943 		return 1;
1944 	default:
1945 		return 0;
1946 	}
1947 }
1948 
1949 static void
1950 dump_cstate_pstate_config_info(family, model)
1951 {
1952 	if (!do_nhm_platform_info)
1953 		return;
1954 
1955 	dump_nhm_platform_info();
1956 
1957 	if (has_hsw_turbo_ratio_limit(family, model))
1958 		dump_hsw_turbo_ratio_limits();
1959 
1960 	if (has_ivt_turbo_ratio_limit(family, model))
1961 		dump_ivt_turbo_ratio_limits();
1962 
1963 	if (has_nhm_turbo_ratio_limit(family, model))
1964 		dump_nhm_turbo_ratio_limits();
1965 
1966 	if (has_knl_turbo_ratio_limit(family, model))
1967 		dump_knl_turbo_ratio_limits();
1968 
1969 	if (has_config_tdp(family, model))
1970 		dump_config_tdp();
1971 
1972 	dump_nhm_cst_cfg();
1973 }
1974 
1975 
1976 /*
1977  * print_epb()
1978  * Decode the ENERGY_PERF_BIAS MSR
1979  */
1980 int print_epb(struct thread_data *t, struct core_data *c, struct pkg_data *p)
1981 {
1982 	unsigned long long msr;
1983 	char *epb_string;
1984 	int cpu;
1985 
1986 	if (!has_epb)
1987 		return 0;
1988 
1989 	cpu = t->cpu_id;
1990 
1991 	/* EPB is per-package */
1992 	if (!(t->flags & CPU_IS_FIRST_THREAD_IN_CORE) || !(t->flags & CPU_IS_FIRST_CORE_IN_PACKAGE))
1993 		return 0;
1994 
1995 	if (cpu_migrate(cpu)) {
1996 		fprintf(stderr, "Could not migrate to CPU %d\n", cpu);
1997 		return -1;
1998 	}
1999 
2000 	if (get_msr(cpu, MSR_IA32_ENERGY_PERF_BIAS, &msr))
2001 		return 0;
2002 
2003 	switch (msr & 0xF) {
2004 	case ENERGY_PERF_BIAS_PERFORMANCE:
2005 		epb_string = "performance";
2006 		break;
2007 	case ENERGY_PERF_BIAS_NORMAL:
2008 		epb_string = "balanced";
2009 		break;
2010 	case ENERGY_PERF_BIAS_POWERSAVE:
2011 		epb_string = "powersave";
2012 		break;
2013 	default:
2014 		epb_string = "custom";
2015 		break;
2016 	}
2017 	fprintf(stderr, "cpu%d: MSR_IA32_ENERGY_PERF_BIAS: 0x%08llx (%s)\n", cpu, msr, epb_string);
2018 
2019 	return 0;
2020 }
2021 
2022 /*
2023  * print_perf_limit()
2024  */
2025 int print_perf_limit(struct thread_data *t, struct core_data *c, struct pkg_data *p)
2026 {
2027 	unsigned long long msr;
2028 	int cpu;
2029 
2030 	cpu = t->cpu_id;
2031 
2032 	/* per-package */
2033 	if (!(t->flags & CPU_IS_FIRST_THREAD_IN_CORE) || !(t->flags & CPU_IS_FIRST_CORE_IN_PACKAGE))
2034 		return 0;
2035 
2036 	if (cpu_migrate(cpu)) {
2037 		fprintf(stderr, "Could not migrate to CPU %d\n", cpu);
2038 		return -1;
2039 	}
2040 
2041 	if (do_core_perf_limit_reasons) {
2042 		get_msr(cpu, MSR_CORE_PERF_LIMIT_REASONS, &msr);
2043 		fprintf(stderr, "cpu%d: MSR_CORE_PERF_LIMIT_REASONS, 0x%08llx", cpu, msr);
2044 		fprintf(stderr, " (Active: %s%s%s%s%s%s%s%s%s%s%s%s%s%s)",
2045 			(msr & 1 << 15) ? "bit15, " : "",
2046 			(msr & 1 << 14) ? "bit14, " : "",
2047 			(msr & 1 << 13) ? "Transitions, " : "",
2048 			(msr & 1 << 12) ? "MultiCoreTurbo, " : "",
2049 			(msr & 1 << 11) ? "PkgPwrL2, " : "",
2050 			(msr & 1 << 10) ? "PkgPwrL1, " : "",
2051 			(msr & 1 << 9) ? "CorePwr, " : "",
2052 			(msr & 1 << 8) ? "Amps, " : "",
2053 			(msr & 1 << 6) ? "VR-Therm, " : "",
2054 			(msr & 1 << 5) ? "Auto-HWP, " : "",
2055 			(msr & 1 << 4) ? "Graphics, " : "",
2056 			(msr & 1 << 2) ? "bit2, " : "",
2057 			(msr & 1 << 1) ? "ThermStatus, " : "",
2058 			(msr & 1 << 0) ? "PROCHOT, " : "");
2059 		fprintf(stderr, " (Logged: %s%s%s%s%s%s%s%s%s%s%s%s%s%s)\n",
2060 			(msr & 1 << 31) ? "bit31, " : "",
2061 			(msr & 1 << 30) ? "bit30, " : "",
2062 			(msr & 1 << 29) ? "Transitions, " : "",
2063 			(msr & 1 << 28) ? "MultiCoreTurbo, " : "",
2064 			(msr & 1 << 27) ? "PkgPwrL2, " : "",
2065 			(msr & 1 << 26) ? "PkgPwrL1, " : "",
2066 			(msr & 1 << 25) ? "CorePwr, " : "",
2067 			(msr & 1 << 24) ? "Amps, " : "",
2068 			(msr & 1 << 22) ? "VR-Therm, " : "",
2069 			(msr & 1 << 21) ? "Auto-HWP, " : "",
2070 			(msr & 1 << 20) ? "Graphics, " : "",
2071 			(msr & 1 << 18) ? "bit18, " : "",
2072 			(msr & 1 << 17) ? "ThermStatus, " : "",
2073 			(msr & 1 << 16) ? "PROCHOT, " : "");
2074 
2075 	}
2076 	if (do_gfx_perf_limit_reasons) {
2077 		get_msr(cpu, MSR_GFX_PERF_LIMIT_REASONS, &msr);
2078 		fprintf(stderr, "cpu%d: MSR_GFX_PERF_LIMIT_REASONS, 0x%08llx", cpu, msr);
2079 		fprintf(stderr, " (Active: %s%s%s%s%s%s%s%s)",
2080 			(msr & 1 << 0) ? "PROCHOT, " : "",
2081 			(msr & 1 << 1) ? "ThermStatus, " : "",
2082 			(msr & 1 << 4) ? "Graphics, " : "",
2083 			(msr & 1 << 6) ? "VR-Therm, " : "",
2084 			(msr & 1 << 8) ? "Amps, " : "",
2085 			(msr & 1 << 9) ? "GFXPwr, " : "",
2086 			(msr & 1 << 10) ? "PkgPwrL1, " : "",
2087 			(msr & 1 << 11) ? "PkgPwrL2, " : "");
2088 		fprintf(stderr, " (Logged: %s%s%s%s%s%s%s%s)\n",
2089 			(msr & 1 << 16) ? "PROCHOT, " : "",
2090 			(msr & 1 << 17) ? "ThermStatus, " : "",
2091 			(msr & 1 << 20) ? "Graphics, " : "",
2092 			(msr & 1 << 22) ? "VR-Therm, " : "",
2093 			(msr & 1 << 24) ? "Amps, " : "",
2094 			(msr & 1 << 25) ? "GFXPwr, " : "",
2095 			(msr & 1 << 26) ? "PkgPwrL1, " : "",
2096 			(msr & 1 << 27) ? "PkgPwrL2, " : "");
2097 	}
2098 	if (do_ring_perf_limit_reasons) {
2099 		get_msr(cpu, MSR_RING_PERF_LIMIT_REASONS, &msr);
2100 		fprintf(stderr, "cpu%d: MSR_RING_PERF_LIMIT_REASONS, 0x%08llx", cpu, msr);
2101 		fprintf(stderr, " (Active: %s%s%s%s%s%s)",
2102 			(msr & 1 << 0) ? "PROCHOT, " : "",
2103 			(msr & 1 << 1) ? "ThermStatus, " : "",
2104 			(msr & 1 << 6) ? "VR-Therm, " : "",
2105 			(msr & 1 << 8) ? "Amps, " : "",
2106 			(msr & 1 << 10) ? "PkgPwrL1, " : "",
2107 			(msr & 1 << 11) ? "PkgPwrL2, " : "");
2108 		fprintf(stderr, " (Logged: %s%s%s%s%s%s)\n",
2109 			(msr & 1 << 16) ? "PROCHOT, " : "",
2110 			(msr & 1 << 17) ? "ThermStatus, " : "",
2111 			(msr & 1 << 22) ? "VR-Therm, " : "",
2112 			(msr & 1 << 24) ? "Amps, " : "",
2113 			(msr & 1 << 26) ? "PkgPwrL1, " : "",
2114 			(msr & 1 << 27) ? "PkgPwrL2, " : "");
2115 	}
2116 	return 0;
2117 }
2118 
2119 #define	RAPL_POWER_GRANULARITY	0x7FFF	/* 15 bit power granularity */
2120 #define	RAPL_TIME_GRANULARITY	0x3F /* 6 bit time granularity */
2121 
2122 double get_tdp(model)
2123 {
2124 	unsigned long long msr;
2125 
2126 	if (do_rapl & RAPL_PKG_POWER_INFO)
2127 		if (!get_msr(base_cpu, MSR_PKG_POWER_INFO, &msr))
2128 			return ((msr >> 0) & RAPL_POWER_GRANULARITY) * rapl_power_units;
2129 
2130 	switch (model) {
2131 	case 0x37:
2132 	case 0x4D:
2133 		return 30.0;
2134 	default:
2135 		return 135.0;
2136 	}
2137 }
2138 
2139 /*
2140  * rapl_dram_energy_units_probe()
2141  * Energy units are either hard-coded, or come from RAPL Energy Unit MSR.
2142  */
2143 static double
2144 rapl_dram_energy_units_probe(int  model, double rapl_energy_units)
2145 {
2146 	/* only called for genuine_intel, family 6 */
2147 
2148 	switch (model) {
2149 	case 0x3F:	/* HSX */
2150 	case 0x4F:	/* BDX */
2151 	case 0x56:	/* BDX-DE */
2152 	case 0x57:	/* KNL */
2153 		return (rapl_dram_energy_units = 15.3 / 1000000);
2154 	default:
2155 		return (rapl_energy_units);
2156 	}
2157 }
2158 
2159 
2160 /*
2161  * rapl_probe()
2162  *
2163  * sets do_rapl, rapl_power_units, rapl_energy_units, rapl_time_units
2164  */
2165 void rapl_probe(unsigned int family, unsigned int model)
2166 {
2167 	unsigned long long msr;
2168 	unsigned int time_unit;
2169 	double tdp;
2170 
2171 	if (!genuine_intel)
2172 		return;
2173 
2174 	if (family != 6)
2175 		return;
2176 
2177 	switch (model) {
2178 	case 0x2A:
2179 	case 0x3A:
2180 	case 0x3C:	/* HSW */
2181 	case 0x45:	/* HSW */
2182 	case 0x46:	/* HSW */
2183 	case 0x3D:	/* BDW */
2184 	case 0x47:	/* BDW */
2185 		do_rapl = RAPL_PKG | RAPL_CORES | RAPL_CORE_POLICY | RAPL_GFX | RAPL_PKG_POWER_INFO;
2186 		break;
2187 	case 0x4E:	/* SKL */
2188 	case 0x5E:	/* SKL */
2189 		do_rapl = RAPL_PKG | RAPL_DRAM | RAPL_DRAM_PERF_STATUS | RAPL_PKG_PERF_STATUS | RAPL_PKG_POWER_INFO;
2190 		break;
2191 	case 0x3F:	/* HSX */
2192 	case 0x4F:	/* BDX */
2193 	case 0x56:	/* BDX-DE */
2194 	case 0x57:	/* KNL */
2195 		do_rapl = RAPL_PKG | RAPL_DRAM | RAPL_DRAM_POWER_INFO | RAPL_DRAM_PERF_STATUS | RAPL_PKG_PERF_STATUS | RAPL_PKG_POWER_INFO;
2196 		break;
2197 	case 0x2D:
2198 	case 0x3E:
2199 		do_rapl = RAPL_PKG | RAPL_CORES | RAPL_CORE_POLICY | RAPL_DRAM | RAPL_DRAM_POWER_INFO | RAPL_PKG_PERF_STATUS | RAPL_DRAM_PERF_STATUS | RAPL_PKG_POWER_INFO;
2200 		break;
2201 	case 0x37:	/* BYT */
2202 	case 0x4D:	/* AVN */
2203 		do_rapl = RAPL_PKG | RAPL_CORES ;
2204 		break;
2205 	default:
2206 		return;
2207 	}
2208 
2209 	/* units on package 0, verify later other packages match */
2210 	if (get_msr(base_cpu, MSR_RAPL_POWER_UNIT, &msr))
2211 		return;
2212 
2213 	rapl_power_units = 1.0 / (1 << (msr & 0xF));
2214 	if (model == 0x37)
2215 		rapl_energy_units = 1.0 * (1 << (msr >> 8 & 0x1F)) / 1000000;
2216 	else
2217 		rapl_energy_units = 1.0 / (1 << (msr >> 8 & 0x1F));
2218 
2219 	rapl_dram_energy_units = rapl_dram_energy_units_probe(model, rapl_energy_units);
2220 
2221 	time_unit = msr >> 16 & 0xF;
2222 	if (time_unit == 0)
2223 		time_unit = 0xA;
2224 
2225 	rapl_time_units = 1.0 / (1 << (time_unit));
2226 
2227 	tdp = get_tdp(model);
2228 
2229 	rapl_joule_counter_range = 0xFFFFFFFF * rapl_energy_units / tdp;
2230 	if (debug)
2231 		fprintf(stderr, "RAPL: %.0f sec. Joule Counter Range, at %.0f Watts\n", rapl_joule_counter_range, tdp);
2232 
2233 	return;
2234 }
2235 
2236 void perf_limit_reasons_probe(family, model)
2237 {
2238 	if (!genuine_intel)
2239 		return;
2240 
2241 	if (family != 6)
2242 		return;
2243 
2244 	switch (model) {
2245 	case 0x3C:	/* HSW */
2246 	case 0x45:	/* HSW */
2247 	case 0x46:	/* HSW */
2248 		do_gfx_perf_limit_reasons = 1;
2249 	case 0x3F:	/* HSX */
2250 		do_core_perf_limit_reasons = 1;
2251 		do_ring_perf_limit_reasons = 1;
2252 	default:
2253 		return;
2254 	}
2255 }
2256 
2257 int print_thermal(struct thread_data *t, struct core_data *c, struct pkg_data *p)
2258 {
2259 	unsigned long long msr;
2260 	unsigned int dts;
2261 	int cpu;
2262 
2263 	if (!(do_dts || do_ptm))
2264 		return 0;
2265 
2266 	cpu = t->cpu_id;
2267 
2268 	/* DTS is per-core, no need to print for each thread */
2269 	if (!(t->flags & CPU_IS_FIRST_THREAD_IN_CORE))
2270 		return 0;
2271 
2272 	if (cpu_migrate(cpu)) {
2273 		fprintf(stderr, "Could not migrate to CPU %d\n", cpu);
2274 		return -1;
2275 	}
2276 
2277 	if (do_ptm && (t->flags & CPU_IS_FIRST_CORE_IN_PACKAGE)) {
2278 		if (get_msr(cpu, MSR_IA32_PACKAGE_THERM_STATUS, &msr))
2279 			return 0;
2280 
2281 		dts = (msr >> 16) & 0x7F;
2282 		fprintf(stderr, "cpu%d: MSR_IA32_PACKAGE_THERM_STATUS: 0x%08llx (%d C)\n",
2283 			cpu, msr, tcc_activation_temp - dts);
2284 
2285 #ifdef	THERM_DEBUG
2286 		if (get_msr(cpu, MSR_IA32_PACKAGE_THERM_INTERRUPT, &msr))
2287 			return 0;
2288 
2289 		dts = (msr >> 16) & 0x7F;
2290 		dts2 = (msr >> 8) & 0x7F;
2291 		fprintf(stderr, "cpu%d: MSR_IA32_PACKAGE_THERM_INTERRUPT: 0x%08llx (%d C, %d C)\n",
2292 			cpu, msr, tcc_activation_temp - dts, tcc_activation_temp - dts2);
2293 #endif
2294 	}
2295 
2296 
2297 	if (do_dts) {
2298 		unsigned int resolution;
2299 
2300 		if (get_msr(cpu, MSR_IA32_THERM_STATUS, &msr))
2301 			return 0;
2302 
2303 		dts = (msr >> 16) & 0x7F;
2304 		resolution = (msr >> 27) & 0xF;
2305 		fprintf(stderr, "cpu%d: MSR_IA32_THERM_STATUS: 0x%08llx (%d C +/- %d)\n",
2306 			cpu, msr, tcc_activation_temp - dts, resolution);
2307 
2308 #ifdef THERM_DEBUG
2309 		if (get_msr(cpu, MSR_IA32_THERM_INTERRUPT, &msr))
2310 			return 0;
2311 
2312 		dts = (msr >> 16) & 0x7F;
2313 		dts2 = (msr >> 8) & 0x7F;
2314 		fprintf(stderr, "cpu%d: MSR_IA32_THERM_INTERRUPT: 0x%08llx (%d C, %d C)\n",
2315 			cpu, msr, tcc_activation_temp - dts, tcc_activation_temp - dts2);
2316 #endif
2317 	}
2318 
2319 	return 0;
2320 }
2321 
2322 void print_power_limit_msr(int cpu, unsigned long long msr, char *label)
2323 {
2324 	fprintf(stderr, "cpu%d: %s: %sabled (%f Watts, %f sec, clamp %sabled)\n",
2325 		cpu, label,
2326 		((msr >> 15) & 1) ? "EN" : "DIS",
2327 		((msr >> 0) & 0x7FFF) * rapl_power_units,
2328 		(1.0 + (((msr >> 22) & 0x3)/4.0)) * (1 << ((msr >> 17) & 0x1F)) * rapl_time_units,
2329 		(((msr >> 16) & 1) ? "EN" : "DIS"));
2330 
2331 	return;
2332 }
2333 
2334 int print_rapl(struct thread_data *t, struct core_data *c, struct pkg_data *p)
2335 {
2336 	unsigned long long msr;
2337 	int cpu;
2338 
2339 	if (!do_rapl)
2340 		return 0;
2341 
2342 	/* RAPL counters are per package, so print only for 1st thread/package */
2343 	if (!(t->flags & CPU_IS_FIRST_THREAD_IN_CORE) || !(t->flags & CPU_IS_FIRST_CORE_IN_PACKAGE))
2344 		return 0;
2345 
2346 	cpu = t->cpu_id;
2347 	if (cpu_migrate(cpu)) {
2348 		fprintf(stderr, "Could not migrate to CPU %d\n", cpu);
2349 		return -1;
2350 	}
2351 
2352 	if (get_msr(cpu, MSR_RAPL_POWER_UNIT, &msr))
2353 		return -1;
2354 
2355 	if (debug) {
2356 		fprintf(stderr, "cpu%d: MSR_RAPL_POWER_UNIT: 0x%08llx "
2357 			"(%f Watts, %f Joules, %f sec.)\n", cpu, msr,
2358 			rapl_power_units, rapl_energy_units, rapl_time_units);
2359 	}
2360 	if (do_rapl & RAPL_PKG_POWER_INFO) {
2361 
2362 		if (get_msr(cpu, MSR_PKG_POWER_INFO, &msr))
2363                 	return -5;
2364 
2365 
2366 		fprintf(stderr, "cpu%d: MSR_PKG_POWER_INFO: 0x%08llx (%.0f W TDP, RAPL %.0f - %.0f W, %f sec.)\n",
2367 			cpu, msr,
2368 			((msr >>  0) & RAPL_POWER_GRANULARITY) * rapl_power_units,
2369 			((msr >> 16) & RAPL_POWER_GRANULARITY) * rapl_power_units,
2370 			((msr >> 32) & RAPL_POWER_GRANULARITY) * rapl_power_units,
2371 			((msr >> 48) & RAPL_TIME_GRANULARITY) * rapl_time_units);
2372 
2373 	}
2374 	if (do_rapl & RAPL_PKG) {
2375 
2376 		if (get_msr(cpu, MSR_PKG_POWER_LIMIT, &msr))
2377 			return -9;
2378 
2379 		fprintf(stderr, "cpu%d: MSR_PKG_POWER_LIMIT: 0x%08llx (%slocked)\n",
2380 			cpu, msr, (msr >> 63) & 1 ? "": "UN");
2381 
2382 		print_power_limit_msr(cpu, msr, "PKG Limit #1");
2383 		fprintf(stderr, "cpu%d: PKG Limit #2: %sabled (%f Watts, %f* sec, clamp %sabled)\n",
2384 			cpu,
2385 			((msr >> 47) & 1) ? "EN" : "DIS",
2386 			((msr >> 32) & 0x7FFF) * rapl_power_units,
2387 			(1.0 + (((msr >> 54) & 0x3)/4.0)) * (1 << ((msr >> 49) & 0x1F)) * rapl_time_units,
2388 			((msr >> 48) & 1) ? "EN" : "DIS");
2389 	}
2390 
2391 	if (do_rapl & RAPL_DRAM_POWER_INFO) {
2392 		if (get_msr(cpu, MSR_DRAM_POWER_INFO, &msr))
2393                 	return -6;
2394 
2395 		fprintf(stderr, "cpu%d: MSR_DRAM_POWER_INFO,: 0x%08llx (%.0f W TDP, RAPL %.0f - %.0f W, %f sec.)\n",
2396 			cpu, msr,
2397 			((msr >>  0) & RAPL_POWER_GRANULARITY) * rapl_power_units,
2398 			((msr >> 16) & RAPL_POWER_GRANULARITY) * rapl_power_units,
2399 			((msr >> 32) & RAPL_POWER_GRANULARITY) * rapl_power_units,
2400 			((msr >> 48) & RAPL_TIME_GRANULARITY) * rapl_time_units);
2401 	}
2402 	if (do_rapl & RAPL_DRAM) {
2403 		if (get_msr(cpu, MSR_DRAM_POWER_LIMIT, &msr))
2404 			return -9;
2405 		fprintf(stderr, "cpu%d: MSR_DRAM_POWER_LIMIT: 0x%08llx (%slocked)\n",
2406 				cpu, msr, (msr >> 31) & 1 ? "": "UN");
2407 
2408 		print_power_limit_msr(cpu, msr, "DRAM Limit");
2409 	}
2410 	if (do_rapl & RAPL_CORE_POLICY) {
2411 		if (debug) {
2412 			if (get_msr(cpu, MSR_PP0_POLICY, &msr))
2413 				return -7;
2414 
2415 			fprintf(stderr, "cpu%d: MSR_PP0_POLICY: %lld\n", cpu, msr & 0xF);
2416 		}
2417 	}
2418 	if (do_rapl & RAPL_CORES) {
2419 		if (debug) {
2420 
2421 			if (get_msr(cpu, MSR_PP0_POWER_LIMIT, &msr))
2422 				return -9;
2423 			fprintf(stderr, "cpu%d: MSR_PP0_POWER_LIMIT: 0x%08llx (%slocked)\n",
2424 					cpu, msr, (msr >> 31) & 1 ? "": "UN");
2425 			print_power_limit_msr(cpu, msr, "Cores Limit");
2426 		}
2427 	}
2428 	if (do_rapl & RAPL_GFX) {
2429 		if (debug) {
2430 			if (get_msr(cpu, MSR_PP1_POLICY, &msr))
2431 				return -8;
2432 
2433 			fprintf(stderr, "cpu%d: MSR_PP1_POLICY: %lld\n", cpu, msr & 0xF);
2434 
2435 			if (get_msr(cpu, MSR_PP1_POWER_LIMIT, &msr))
2436 				return -9;
2437 			fprintf(stderr, "cpu%d: MSR_PP1_POWER_LIMIT: 0x%08llx (%slocked)\n",
2438 					cpu, msr, (msr >> 31) & 1 ? "": "UN");
2439 			print_power_limit_msr(cpu, msr, "GFX Limit");
2440 		}
2441 	}
2442 	return 0;
2443 }
2444 
2445 /*
2446  * SNB adds support for additional MSRs:
2447  *
2448  * MSR_PKG_C7_RESIDENCY            0x000003fa
2449  * MSR_CORE_C7_RESIDENCY           0x000003fe
2450  * MSR_PKG_C2_RESIDENCY            0x0000060d
2451  */
2452 
2453 int has_snb_msrs(unsigned int family, unsigned int model)
2454 {
2455 	if (!genuine_intel)
2456 		return 0;
2457 
2458 	switch (model) {
2459 	case 0x2A:
2460 	case 0x2D:
2461 	case 0x3A:	/* IVB */
2462 	case 0x3E:	/* IVB Xeon */
2463 	case 0x3C:	/* HSW */
2464 	case 0x3F:	/* HSW */
2465 	case 0x45:	/* HSW */
2466 	case 0x46:	/* HSW */
2467 	case 0x3D:	/* BDW */
2468 	case 0x47:	/* BDW */
2469 	case 0x4F:	/* BDX */
2470 	case 0x56:	/* BDX-DE */
2471 	case 0x4E:	/* SKL */
2472 	case 0x5E:	/* SKL */
2473 		return 1;
2474 	}
2475 	return 0;
2476 }
2477 
2478 /*
2479  * HSW adds support for additional MSRs:
2480  *
2481  * MSR_PKG_C8_RESIDENCY            0x00000630
2482  * MSR_PKG_C9_RESIDENCY            0x00000631
2483  * MSR_PKG_C10_RESIDENCY           0x00000632
2484  */
2485 int has_hsw_msrs(unsigned int family, unsigned int model)
2486 {
2487 	if (!genuine_intel)
2488 		return 0;
2489 
2490 	switch (model) {
2491 	case 0x45:	/* HSW */
2492 	case 0x3D:	/* BDW */
2493 	case 0x4E:	/* SKL */
2494 	case 0x5E:	/* SKL */
2495 		return 1;
2496 	}
2497 	return 0;
2498 }
2499 
2500 /*
2501  * SKL adds support for additional MSRS:
2502  *
2503  * MSR_PKG_WEIGHTED_CORE_C0_RES    0x00000658
2504  * MSR_PKG_ANY_CORE_C0_RES         0x00000659
2505  * MSR_PKG_ANY_GFXE_C0_RES         0x0000065A
2506  * MSR_PKG_BOTH_CORE_GFXE_C0_RES   0x0000065B
2507  */
2508 int has_skl_msrs(unsigned int family, unsigned int model)
2509 {
2510 	if (!genuine_intel)
2511 		return 0;
2512 
2513 	switch (model) {
2514 	case 0x4E:	/* SKL */
2515 	case 0x5E:	/* SKL */
2516 		return 1;
2517 	}
2518 	return 0;
2519 }
2520 
2521 
2522 
2523 int is_slm(unsigned int family, unsigned int model)
2524 {
2525 	if (!genuine_intel)
2526 		return 0;
2527 	switch (model) {
2528 	case 0x37:	/* BYT */
2529 	case 0x4D:	/* AVN */
2530 		return 1;
2531 	}
2532 	return 0;
2533 }
2534 
2535 int is_knl(unsigned int family, unsigned int model)
2536 {
2537 	if (!genuine_intel)
2538 		return 0;
2539 	switch (model) {
2540 	case 0x57:	/* KNL */
2541 		return 1;
2542 	}
2543 	return 0;
2544 }
2545 
2546 #define SLM_BCLK_FREQS 5
2547 double slm_freq_table[SLM_BCLK_FREQS] = { 83.3, 100.0, 133.3, 116.7, 80.0};
2548 
2549 double slm_bclk(void)
2550 {
2551 	unsigned long long msr = 3;
2552 	unsigned int i;
2553 	double freq;
2554 
2555 	if (get_msr(base_cpu, MSR_FSB_FREQ, &msr))
2556 		fprintf(stderr, "SLM BCLK: unknown\n");
2557 
2558 	i = msr & 0xf;
2559 	if (i >= SLM_BCLK_FREQS) {
2560 		fprintf(stderr, "SLM BCLK[%d] invalid\n", i);
2561 		msr = 3;
2562 	}
2563 	freq = slm_freq_table[i];
2564 
2565 	fprintf(stderr, "SLM BCLK: %.1f Mhz\n", freq);
2566 
2567 	return freq;
2568 }
2569 
2570 double discover_bclk(unsigned int family, unsigned int model)
2571 {
2572 	if (has_snb_msrs(family, model))
2573 		return 100.00;
2574 	else if (is_slm(family, model))
2575 		return slm_bclk();
2576 	else
2577 		return 133.33;
2578 }
2579 
2580 /*
2581  * MSR_IA32_TEMPERATURE_TARGET indicates the temperature where
2582  * the Thermal Control Circuit (TCC) activates.
2583  * This is usually equal to tjMax.
2584  *
2585  * Older processors do not have this MSR, so there we guess,
2586  * but also allow cmdline over-ride with -T.
2587  *
2588  * Several MSR temperature values are in units of degrees-C
2589  * below this value, including the Digital Thermal Sensor (DTS),
2590  * Package Thermal Management Sensor (PTM), and thermal event thresholds.
2591  */
2592 int set_temperature_target(struct thread_data *t, struct core_data *c, struct pkg_data *p)
2593 {
2594 	unsigned long long msr;
2595 	unsigned int target_c_local;
2596 	int cpu;
2597 
2598 	/* tcc_activation_temp is used only for dts or ptm */
2599 	if (!(do_dts || do_ptm))
2600 		return 0;
2601 
2602 	/* this is a per-package concept */
2603 	if (!(t->flags & CPU_IS_FIRST_THREAD_IN_CORE) || !(t->flags & CPU_IS_FIRST_CORE_IN_PACKAGE))
2604 		return 0;
2605 
2606 	cpu = t->cpu_id;
2607 	if (cpu_migrate(cpu)) {
2608 		fprintf(stderr, "Could not migrate to CPU %d\n", cpu);
2609 		return -1;
2610 	}
2611 
2612 	if (tcc_activation_temp_override != 0) {
2613 		tcc_activation_temp = tcc_activation_temp_override;
2614 		fprintf(stderr, "cpu%d: Using cmdline TCC Target (%d C)\n",
2615 			cpu, tcc_activation_temp);
2616 		return 0;
2617 	}
2618 
2619 	/* Temperature Target MSR is Nehalem and newer only */
2620 	if (!do_nhm_platform_info)
2621 		goto guess;
2622 
2623 	if (get_msr(base_cpu, MSR_IA32_TEMPERATURE_TARGET, &msr))
2624 		goto guess;
2625 
2626 	target_c_local = (msr >> 16) & 0xFF;
2627 
2628 	if (debug)
2629 		fprintf(stderr, "cpu%d: MSR_IA32_TEMPERATURE_TARGET: 0x%08llx (%d C)\n",
2630 			cpu, msr, target_c_local);
2631 
2632 	if (!target_c_local)
2633 		goto guess;
2634 
2635 	tcc_activation_temp = target_c_local;
2636 
2637 	return 0;
2638 
2639 guess:
2640 	tcc_activation_temp = TJMAX_DEFAULT;
2641 	fprintf(stderr, "cpu%d: Guessing tjMax %d C, Please use -T to specify\n",
2642 		cpu, tcc_activation_temp);
2643 
2644 	return 0;
2645 }
2646 void process_cpuid()
2647 {
2648 	unsigned int eax, ebx, ecx, edx, max_level;
2649 	unsigned int fms, family, model, stepping;
2650 
2651 	eax = ebx = ecx = edx = 0;
2652 
2653 	__get_cpuid(0, &max_level, &ebx, &ecx, &edx);
2654 
2655 	if (ebx == 0x756e6547 && edx == 0x49656e69 && ecx == 0x6c65746e)
2656 		genuine_intel = 1;
2657 
2658 	if (debug)
2659 		fprintf(stderr, "CPUID(0): %.4s%.4s%.4s ",
2660 			(char *)&ebx, (char *)&edx, (char *)&ecx);
2661 
2662 	__get_cpuid(1, &fms, &ebx, &ecx, &edx);
2663 	family = (fms >> 8) & 0xf;
2664 	model = (fms >> 4) & 0xf;
2665 	stepping = fms & 0xf;
2666 	if (family == 6 || family == 0xf)
2667 		model += ((fms >> 16) & 0xf) << 4;
2668 
2669 	if (debug)
2670 		fprintf(stderr, "%d CPUID levels; family:model:stepping 0x%x:%x:%x (%d:%d:%d)\n",
2671 			max_level, family, model, stepping, family, model, stepping);
2672 
2673 	if (!(edx & (1 << 5)))
2674 		errx(1, "CPUID: no MSR");
2675 
2676 	/*
2677 	 * check max extended function levels of CPUID.
2678 	 * This is needed to check for invariant TSC.
2679 	 * This check is valid for both Intel and AMD.
2680 	 */
2681 	ebx = ecx = edx = 0;
2682 	__get_cpuid(0x80000000, &max_level, &ebx, &ecx, &edx);
2683 
2684 	if (max_level >= 0x80000007) {
2685 
2686 		/*
2687 		 * Non-Stop TSC is advertised by CPUID.EAX=0x80000007: EDX.bit8
2688 		 * this check is valid for both Intel and AMD
2689 		 */
2690 		__get_cpuid(0x80000007, &eax, &ebx, &ecx, &edx);
2691 		has_invariant_tsc = edx & (1 << 8);
2692 	}
2693 
2694 	/*
2695 	 * APERF/MPERF is advertised by CPUID.EAX=0x6: ECX.bit0
2696 	 * this check is valid for both Intel and AMD
2697 	 */
2698 
2699 	__get_cpuid(0x6, &eax, &ebx, &ecx, &edx);
2700 	has_aperf = ecx & (1 << 0);
2701 	do_dts = eax & (1 << 0);
2702 	do_ptm = eax & (1 << 6);
2703 	has_epb = ecx & (1 << 3);
2704 
2705 	if (debug)
2706 		fprintf(stderr, "CPUID(6): %sAPERF, %sDTS, %sPTM, %sEPB\n",
2707 			has_aperf ? "" : "No ",
2708 			do_dts ? "" : "No ",
2709 			do_ptm ? "" : "No ",
2710 			has_epb ? "" : "No ");
2711 
2712 	if (max_level > 0x15) {
2713 		unsigned int eax_crystal;
2714 		unsigned int ebx_tsc;
2715 
2716 		/*
2717 		 * CPUID 15H TSC/Crystal ratio, possibly Crystal Hz
2718 		 */
2719 		eax_crystal = ebx_tsc = crystal_hz = edx = 0;
2720 		__get_cpuid(0x15, &eax_crystal, &ebx_tsc, &crystal_hz, &edx);
2721 
2722 		if (ebx_tsc != 0) {
2723 
2724 			if (debug && (ebx != 0))
2725 				fprintf(stderr, "CPUID(0x15): eax_crystal: %d ebx_tsc: %d ecx_crystal_hz: %d\n",
2726 					eax_crystal, ebx_tsc, crystal_hz);
2727 
2728 			if (crystal_hz == 0)
2729 				switch(model) {
2730 				case 0x4E:	/* SKL */
2731 				case 0x5E:	/* SKL */
2732 					crystal_hz = 24000000;	/* 24 MHz */
2733 					break;
2734 				default:
2735 					crystal_hz = 0;
2736 			}
2737 
2738 			if (crystal_hz) {
2739 				tsc_hz =  (unsigned long long) crystal_hz * ebx_tsc / eax_crystal;
2740 				if (debug)
2741 					fprintf(stderr, "TSC: %lld MHz (%d Hz * %d / %d / 1000000)\n",
2742 						tsc_hz / 1000000, crystal_hz, ebx_tsc,  eax_crystal);
2743 			}
2744 		}
2745 	}
2746 
2747 	do_nhm_platform_info = do_nhm_cstates = do_smi = probe_nhm_msrs(family, model);
2748 	do_snb_cstates = has_snb_msrs(family, model);
2749 	do_pc2 = do_snb_cstates && (pkg_cstate_limit >= PCL__2);
2750 	do_pc3 = (pkg_cstate_limit >= PCL__3);
2751 	do_pc6 = (pkg_cstate_limit >= PCL__6);
2752 	do_pc7 = do_snb_cstates && (pkg_cstate_limit >= PCL__7);
2753 	do_c8_c9_c10 = has_hsw_msrs(family, model);
2754 	do_skl_residency = has_skl_msrs(family, model);
2755 	do_slm_cstates = is_slm(family, model);
2756 	do_knl_cstates  = is_knl(family, model);
2757 	bclk = discover_bclk(family, model);
2758 
2759 	rapl_probe(family, model);
2760 	perf_limit_reasons_probe(family, model);
2761 
2762 	if (debug)
2763 		dump_cstate_pstate_config_info();
2764 
2765 	return;
2766 }
2767 
2768 void help()
2769 {
2770 	fprintf(stderr,
2771 	"Usage: turbostat [OPTIONS][(--interval seconds) | COMMAND ...]\n"
2772 	"\n"
2773 	"Turbostat forks the specified COMMAND and prints statistics\n"
2774 	"when COMMAND completes.\n"
2775 	"If no COMMAND is specified, turbostat wakes every 5-seconds\n"
2776 	"to print statistics, until interrupted.\n"
2777 	"--debug	run in \"debug\" mode\n"
2778 	"--interval sec	Override default 5-second measurement interval\n"
2779 	"--help		print this help message\n"
2780 	"--counter msr	print 32-bit counter at address \"msr\"\n"
2781 	"--Counter msr	print 64-bit Counter at address \"msr\"\n"
2782 	"--msr msr	print 32-bit value at address \"msr\"\n"
2783 	"--MSR msr	print 64-bit Value at address \"msr\"\n"
2784 	"--version	print version information\n"
2785 	"\n"
2786 	"For more help, run \"man turbostat\"\n");
2787 }
2788 
2789 
2790 /*
2791  * in /dev/cpu/ return success for names that are numbers
2792  * ie. filter out ".", "..", "microcode".
2793  */
2794 int dir_filter(const struct dirent *dirp)
2795 {
2796 	if (isdigit(dirp->d_name[0]))
2797 		return 1;
2798 	else
2799 		return 0;
2800 }
2801 
2802 int open_dev_cpu_msr(int dummy1)
2803 {
2804 	return 0;
2805 }
2806 
2807 void topology_probe()
2808 {
2809 	int i;
2810 	int max_core_id = 0;
2811 	int max_package_id = 0;
2812 	int max_siblings = 0;
2813 	struct cpu_topology {
2814 		int core_id;
2815 		int physical_package_id;
2816 	} *cpus;
2817 
2818 	/* Initialize num_cpus, max_cpu_num */
2819 	topo.num_cpus = 0;
2820 	topo.max_cpu_num = 0;
2821 	for_all_proc_cpus(count_cpus);
2822 	if (!summary_only && topo.num_cpus > 1)
2823 		show_cpu = 1;
2824 
2825 	if (debug > 1)
2826 		fprintf(stderr, "num_cpus %d max_cpu_num %d\n", topo.num_cpus, topo.max_cpu_num);
2827 
2828 	cpus = calloc(1, (topo.max_cpu_num  + 1) * sizeof(struct cpu_topology));
2829 	if (cpus == NULL)
2830 		err(1, "calloc cpus");
2831 
2832 	/*
2833 	 * Allocate and initialize cpu_present_set
2834 	 */
2835 	cpu_present_set = CPU_ALLOC((topo.max_cpu_num + 1));
2836 	if (cpu_present_set == NULL)
2837 		err(3, "CPU_ALLOC");
2838 	cpu_present_setsize = CPU_ALLOC_SIZE((topo.max_cpu_num + 1));
2839 	CPU_ZERO_S(cpu_present_setsize, cpu_present_set);
2840 	for_all_proc_cpus(mark_cpu_present);
2841 
2842 	/*
2843 	 * Allocate and initialize cpu_affinity_set
2844 	 */
2845 	cpu_affinity_set = CPU_ALLOC((topo.max_cpu_num + 1));
2846 	if (cpu_affinity_set == NULL)
2847 		err(3, "CPU_ALLOC");
2848 	cpu_affinity_setsize = CPU_ALLOC_SIZE((topo.max_cpu_num + 1));
2849 	CPU_ZERO_S(cpu_affinity_setsize, cpu_affinity_set);
2850 
2851 
2852 	/*
2853 	 * For online cpus
2854 	 * find max_core_id, max_package_id
2855 	 */
2856 	for (i = 0; i <= topo.max_cpu_num; ++i) {
2857 		int siblings;
2858 
2859 		if (cpu_is_not_present(i)) {
2860 			if (debug > 1)
2861 				fprintf(stderr, "cpu%d NOT PRESENT\n", i);
2862 			continue;
2863 		}
2864 		cpus[i].core_id = get_core_id(i);
2865 		if (cpus[i].core_id > max_core_id)
2866 			max_core_id = cpus[i].core_id;
2867 
2868 		cpus[i].physical_package_id = get_physical_package_id(i);
2869 		if (cpus[i].physical_package_id > max_package_id)
2870 			max_package_id = cpus[i].physical_package_id;
2871 
2872 		siblings = get_num_ht_siblings(i);
2873 		if (siblings > max_siblings)
2874 			max_siblings = siblings;
2875 		if (debug > 1)
2876 			fprintf(stderr, "cpu %d pkg %d core %d\n",
2877 				i, cpus[i].physical_package_id, cpus[i].core_id);
2878 	}
2879 	topo.num_cores_per_pkg = max_core_id + 1;
2880 	if (debug > 1)
2881 		fprintf(stderr, "max_core_id %d, sizing for %d cores per package\n",
2882 			max_core_id, topo.num_cores_per_pkg);
2883 	if (debug && !summary_only && topo.num_cores_per_pkg > 1)
2884 		show_core = 1;
2885 
2886 	topo.num_packages = max_package_id + 1;
2887 	if (debug > 1)
2888 		fprintf(stderr, "max_package_id %d, sizing for %d packages\n",
2889 			max_package_id, topo.num_packages);
2890 	if (debug && !summary_only && topo.num_packages > 1)
2891 		show_pkg = 1;
2892 
2893 	topo.num_threads_per_core = max_siblings;
2894 	if (debug > 1)
2895 		fprintf(stderr, "max_siblings %d\n", max_siblings);
2896 
2897 	free(cpus);
2898 }
2899 
2900 void
2901 allocate_counters(struct thread_data **t, struct core_data **c, struct pkg_data **p)
2902 {
2903 	int i;
2904 
2905 	*t = calloc(topo.num_threads_per_core * topo.num_cores_per_pkg *
2906 		topo.num_packages, sizeof(struct thread_data));
2907 	if (*t == NULL)
2908 		goto error;
2909 
2910 	for (i = 0; i < topo.num_threads_per_core *
2911 		topo.num_cores_per_pkg * topo.num_packages; i++)
2912 		(*t)[i].cpu_id = -1;
2913 
2914 	*c = calloc(topo.num_cores_per_pkg * topo.num_packages,
2915 		sizeof(struct core_data));
2916 	if (*c == NULL)
2917 		goto error;
2918 
2919 	for (i = 0; i < topo.num_cores_per_pkg * topo.num_packages; i++)
2920 		(*c)[i].core_id = -1;
2921 
2922 	*p = calloc(topo.num_packages, sizeof(struct pkg_data));
2923 	if (*p == NULL)
2924 		goto error;
2925 
2926 	for (i = 0; i < topo.num_packages; i++)
2927 		(*p)[i].package_id = i;
2928 
2929 	return;
2930 error:
2931 	err(1, "calloc counters");
2932 }
2933 /*
2934  * init_counter()
2935  *
2936  * set cpu_id, core_num, pkg_num
2937  * set FIRST_THREAD_IN_CORE and FIRST_CORE_IN_PACKAGE
2938  *
2939  * increment topo.num_cores when 1st core in pkg seen
2940  */
2941 void init_counter(struct thread_data *thread_base, struct core_data *core_base,
2942 	struct pkg_data *pkg_base, int thread_num, int core_num,
2943 	int pkg_num, int cpu_id)
2944 {
2945 	struct thread_data *t;
2946 	struct core_data *c;
2947 	struct pkg_data *p;
2948 
2949 	t = GET_THREAD(thread_base, thread_num, core_num, pkg_num);
2950 	c = GET_CORE(core_base, core_num, pkg_num);
2951 	p = GET_PKG(pkg_base, pkg_num);
2952 
2953 	t->cpu_id = cpu_id;
2954 	if (thread_num == 0) {
2955 		t->flags |= CPU_IS_FIRST_THREAD_IN_CORE;
2956 		if (cpu_is_first_core_in_package(cpu_id))
2957 			t->flags |= CPU_IS_FIRST_CORE_IN_PACKAGE;
2958 	}
2959 
2960 	c->core_id = core_num;
2961 	p->package_id = pkg_num;
2962 }
2963 
2964 
2965 int initialize_counters(int cpu_id)
2966 {
2967 	int my_thread_id, my_core_id, my_package_id;
2968 
2969 	my_package_id = get_physical_package_id(cpu_id);
2970 	my_core_id = get_core_id(cpu_id);
2971 	my_thread_id = get_cpu_position_in_core(cpu_id);
2972 	if (!my_thread_id)
2973 		topo.num_cores++;
2974 
2975 	init_counter(EVEN_COUNTERS, my_thread_id, my_core_id, my_package_id, cpu_id);
2976 	init_counter(ODD_COUNTERS, my_thread_id, my_core_id, my_package_id, cpu_id);
2977 	return 0;
2978 }
2979 
2980 void allocate_output_buffer()
2981 {
2982 	output_buffer = calloc(1, (1 + topo.num_cpus) * 1024);
2983 	outp = output_buffer;
2984 	if (outp == NULL)
2985 		err(-1, "calloc output buffer");
2986 }
2987 
2988 void setup_all_buffers(void)
2989 {
2990 	topology_probe();
2991 	allocate_counters(&thread_even, &core_even, &package_even);
2992 	allocate_counters(&thread_odd, &core_odd, &package_odd);
2993 	allocate_output_buffer();
2994 	for_all_proc_cpus(initialize_counters);
2995 }
2996 
2997 void set_base_cpu(void)
2998 {
2999 	base_cpu = sched_getcpu();
3000 	if (base_cpu < 0)
3001 		err(-ENODEV, "No valid cpus found");
3002 
3003 	if (debug > 1)
3004 		fprintf(stderr, "base_cpu = %d\n", base_cpu);
3005 }
3006 
3007 void turbostat_init()
3008 {
3009 	setup_all_buffers();
3010 	set_base_cpu();
3011 	check_dev_msr();
3012 	check_permissions();
3013 	process_cpuid();
3014 
3015 
3016 	if (debug)
3017 		for_all_cpus(print_epb, ODD_COUNTERS);
3018 
3019 	if (debug)
3020 		for_all_cpus(print_perf_limit, ODD_COUNTERS);
3021 
3022 	if (debug)
3023 		for_all_cpus(print_rapl, ODD_COUNTERS);
3024 
3025 	for_all_cpus(set_temperature_target, ODD_COUNTERS);
3026 
3027 	if (debug)
3028 		for_all_cpus(print_thermal, ODD_COUNTERS);
3029 }
3030 
3031 int fork_it(char **argv)
3032 {
3033 	pid_t child_pid;
3034 	int status;
3035 
3036 	status = for_all_cpus(get_counters, EVEN_COUNTERS);
3037 	if (status)
3038 		exit(status);
3039 	/* clear affinity side-effect of get_counters() */
3040 	sched_setaffinity(0, cpu_present_setsize, cpu_present_set);
3041 	gettimeofday(&tv_even, (struct timezone *)NULL);
3042 
3043 	child_pid = fork();
3044 	if (!child_pid) {
3045 		/* child */
3046 		execvp(argv[0], argv);
3047 	} else {
3048 
3049 		/* parent */
3050 		if (child_pid == -1)
3051 			err(1, "fork");
3052 
3053 		signal(SIGINT, SIG_IGN);
3054 		signal(SIGQUIT, SIG_IGN);
3055 		if (waitpid(child_pid, &status, 0) == -1)
3056 			err(status, "waitpid");
3057 	}
3058 	/*
3059 	 * n.b. fork_it() does not check for errors from for_all_cpus()
3060 	 * because re-starting is problematic when forking
3061 	 */
3062 	for_all_cpus(get_counters, ODD_COUNTERS);
3063 	gettimeofday(&tv_odd, (struct timezone *)NULL);
3064 	timersub(&tv_odd, &tv_even, &tv_delta);
3065 	for_all_cpus_2(delta_cpu, ODD_COUNTERS, EVEN_COUNTERS);
3066 	compute_average(EVEN_COUNTERS);
3067 	format_all_counters(EVEN_COUNTERS);
3068 	flush_stderr();
3069 
3070 	fprintf(stderr, "%.6f sec\n", tv_delta.tv_sec + tv_delta.tv_usec/1000000.0);
3071 
3072 	return status;
3073 }
3074 
3075 int get_and_dump_counters(void)
3076 {
3077 	int status;
3078 
3079 	status = for_all_cpus(get_counters, ODD_COUNTERS);
3080 	if (status)
3081 		return status;
3082 
3083 	status = for_all_cpus(dump_counters, ODD_COUNTERS);
3084 	if (status)
3085 		return status;
3086 
3087 	flush_stdout();
3088 
3089 	return status;
3090 }
3091 
3092 void print_version() {
3093 	fprintf(stderr, "turbostat version 4.7 17-June, 2015"
3094 		" - Len Brown <lenb@kernel.org>\n");
3095 }
3096 
3097 void cmdline(int argc, char **argv)
3098 {
3099 	int opt;
3100 	int option_index = 0;
3101 	static struct option long_options[] = {
3102 		{"Counter",	required_argument,	0, 'C'},
3103 		{"counter",	required_argument,	0, 'c'},
3104 		{"Dump",	no_argument,		0, 'D'},
3105 		{"debug",	no_argument,		0, 'd'},
3106 		{"interval",	required_argument,	0, 'i'},
3107 		{"help",	no_argument,		0, 'h'},
3108 		{"Joules",	no_argument,		0, 'J'},
3109 		{"MSR",		required_argument,	0, 'M'},
3110 		{"msr",		required_argument,	0, 'm'},
3111 		{"Package",	no_argument,		0, 'p'},
3112 		{"processor",	no_argument,		0, 'p'},
3113 		{"Summary",	no_argument,		0, 'S'},
3114 		{"TCC",		required_argument,	0, 'T'},
3115 		{"version",	no_argument,		0, 'v' },
3116 		{0,		0,			0,  0 }
3117 	};
3118 
3119 	progname = argv[0];
3120 
3121 	while ((opt = getopt_long_only(argc, argv, "+C:c:Ddhi:JM:m:PpST:v",
3122 				long_options, &option_index)) != -1) {
3123 		switch (opt) {
3124 		case 'C':
3125 			sscanf(optarg, "%x", &extra_delta_offset64);
3126 			break;
3127 		case 'c':
3128 			sscanf(optarg, "%x", &extra_delta_offset32);
3129 			break;
3130 		case 'D':
3131 			dump_only++;
3132 			break;
3133 		case 'd':
3134 			debug++;
3135 			break;
3136 		case 'h':
3137 		default:
3138 			help();
3139 			exit(1);
3140 		case 'i':
3141 			interval_sec = atoi(optarg);
3142 			break;
3143 		case 'J':
3144 			rapl_joules++;
3145 			break;
3146 		case 'M':
3147 			sscanf(optarg, "%x", &extra_msr_offset64);
3148 			break;
3149 		case 'm':
3150 			sscanf(optarg, "%x", &extra_msr_offset32);
3151 			break;
3152 		case 'P':
3153 			show_pkg_only++;
3154 			break;
3155 		case 'p':
3156 			show_core_only++;
3157 			break;
3158 		case 'S':
3159 			summary_only++;
3160 			break;
3161 		case 'T':
3162 			tcc_activation_temp_override = atoi(optarg);
3163 			break;
3164 		case 'v':
3165 			print_version();
3166 			exit(0);
3167 			break;
3168 		}
3169 	}
3170 }
3171 
3172 int main(int argc, char **argv)
3173 {
3174 	cmdline(argc, argv);
3175 
3176 	if (debug)
3177 		print_version();
3178 
3179 	turbostat_init();
3180 
3181 	/* dump counters and exit */
3182 	if (dump_only)
3183 		return get_and_dump_counters();
3184 
3185 	/*
3186 	 * if any params left, it must be a command to fork
3187 	 */
3188 	if (argc - optind)
3189 		return fork_it(argv + optind);
3190 	else
3191 		turbostat_loop();
3192 
3193 	return 0;
3194 }
3195