1 /*
2  * turbostat -- show CPU frequency and C-state residency
3  * on modern Intel turbo-capable processors.
4  *
5  * Copyright (c) 2013 Intel Corporation.
6  * Len Brown <len.brown@intel.com>
7  *
8  * This program is free software; you can redistribute it and/or modify it
9  * under the terms and conditions of the GNU General Public License,
10  * version 2, as published by the Free Software Foundation.
11  *
12  * This program is distributed in the hope it will be useful, but WITHOUT
13  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
14  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
15  * more details.
16  *
17  * You should have received a copy of the GNU General Public License along with
18  * this program; if not, write to the Free Software Foundation, Inc.,
19  * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
20  */
21 
22 #define _GNU_SOURCE
23 #include MSRHEADER
24 #include INTEL_FAMILY_HEADER
25 #include <stdarg.h>
26 #include <stdio.h>
27 #include <err.h>
28 #include <unistd.h>
29 #include <sys/types.h>
30 #include <sys/wait.h>
31 #include <sys/stat.h>
32 #include <sys/resource.h>
33 #include <fcntl.h>
34 #include <signal.h>
35 #include <sys/time.h>
36 #include <stdlib.h>
37 #include <getopt.h>
38 #include <dirent.h>
39 #include <string.h>
40 #include <ctype.h>
41 #include <sched.h>
42 #include <time.h>
43 #include <cpuid.h>
44 #include <linux/capability.h>
45 #include <errno.h>
46 
47 char *proc_stat = "/proc/stat";
48 FILE *outf;
49 int *fd_percpu;
50 struct timespec interval_ts = {5, 0};
51 unsigned int debug;
52 unsigned int quiet;
53 unsigned int rapl_joules;
54 unsigned int summary_only;
55 unsigned int dump_only;
56 unsigned int do_snb_cstates;
57 unsigned int do_knl_cstates;
58 unsigned int do_skl_residency;
59 unsigned int do_slm_cstates;
60 unsigned int use_c1_residency_msr;
61 unsigned int has_aperf;
62 unsigned int has_epb;
63 unsigned int do_irtl_snb;
64 unsigned int do_irtl_hsw;
65 unsigned int units = 1000000;	/* MHz etc */
66 unsigned int genuine_intel;
67 unsigned int has_invariant_tsc;
68 unsigned int do_nhm_platform_info;
69 unsigned int no_MSR_MISC_PWR_MGMT;
70 unsigned int aperf_mperf_multiplier = 1;
71 double bclk;
72 double base_hz;
73 unsigned int has_base_hz;
74 double tsc_tweak = 1.0;
75 unsigned int show_pkg_only;
76 unsigned int show_core_only;
77 char *output_buffer, *outp;
78 unsigned int do_rapl;
79 unsigned int do_dts;
80 unsigned int do_ptm;
81 unsigned long long  gfx_cur_rc6_ms;
82 unsigned int gfx_cur_mhz;
83 unsigned int tcc_activation_temp;
84 unsigned int tcc_activation_temp_override;
85 double rapl_power_units, rapl_time_units;
86 double rapl_dram_energy_units, rapl_energy_units;
87 double rapl_joule_counter_range;
88 unsigned int do_core_perf_limit_reasons;
89 unsigned int do_gfx_perf_limit_reasons;
90 unsigned int do_ring_perf_limit_reasons;
91 unsigned int crystal_hz;
92 unsigned long long tsc_hz;
93 int base_cpu;
94 double discover_bclk(unsigned int family, unsigned int model);
95 unsigned int has_hwp;	/* IA32_PM_ENABLE, IA32_HWP_CAPABILITIES */
96 			/* IA32_HWP_REQUEST, IA32_HWP_STATUS */
97 unsigned int has_hwp_notify;		/* IA32_HWP_INTERRUPT */
98 unsigned int has_hwp_activity_window;	/* IA32_HWP_REQUEST[bits 41:32] */
99 unsigned int has_hwp_epp;		/* IA32_HWP_REQUEST[bits 31:24] */
100 unsigned int has_hwp_pkg;		/* IA32_HWP_REQUEST_PKG */
101 unsigned int has_misc_feature_control;
102 
103 #define RAPL_PKG		(1 << 0)
104 					/* 0x610 MSR_PKG_POWER_LIMIT */
105 					/* 0x611 MSR_PKG_ENERGY_STATUS */
106 #define RAPL_PKG_PERF_STATUS	(1 << 1)
107 					/* 0x613 MSR_PKG_PERF_STATUS */
108 #define RAPL_PKG_POWER_INFO	(1 << 2)
109 					/* 0x614 MSR_PKG_POWER_INFO */
110 
111 #define RAPL_DRAM		(1 << 3)
112 					/* 0x618 MSR_DRAM_POWER_LIMIT */
113 					/* 0x619 MSR_DRAM_ENERGY_STATUS */
114 #define RAPL_DRAM_PERF_STATUS	(1 << 4)
115 					/* 0x61b MSR_DRAM_PERF_STATUS */
116 #define RAPL_DRAM_POWER_INFO	(1 << 5)
117 					/* 0x61c MSR_DRAM_POWER_INFO */
118 
119 #define RAPL_CORES_POWER_LIMIT	(1 << 6)
120 					/* 0x638 MSR_PP0_POWER_LIMIT */
121 #define RAPL_CORE_POLICY	(1 << 7)
122 					/* 0x63a MSR_PP0_POLICY */
123 
124 #define RAPL_GFX		(1 << 8)
125 					/* 0x640 MSR_PP1_POWER_LIMIT */
126 					/* 0x641 MSR_PP1_ENERGY_STATUS */
127 					/* 0x642 MSR_PP1_POLICY */
128 
129 #define RAPL_CORES_ENERGY_STATUS	(1 << 9)
130 					/* 0x639 MSR_PP0_ENERGY_STATUS */
131 #define RAPL_CORES (RAPL_CORES_ENERGY_STATUS | RAPL_CORES_POWER_LIMIT)
132 #define	TJMAX_DEFAULT	100
133 
134 #define MAX(a, b) ((a) > (b) ? (a) : (b))
135 
136 /*
137  * buffer size used by sscanf() for added column names
138  * Usually truncated to 7 characters, but also handles 18 columns for raw 64-bit counters
139  */
140 #define	NAME_BYTES 20
141 #define PATH_BYTES 128
142 
143 int backwards_count;
144 char *progname;
145 
146 #define CPU_SUBSET_MAXCPUS	1024	/* need to use before probe... */
147 cpu_set_t *cpu_present_set, *cpu_affinity_set, *cpu_subset;
148 size_t cpu_present_setsize, cpu_affinity_setsize, cpu_subset_size;
149 #define MAX_ADDED_COUNTERS 16
150 
151 struct thread_data {
152 	unsigned long long tsc;
153 	unsigned long long aperf;
154 	unsigned long long mperf;
155 	unsigned long long c1;
156 	unsigned int irq_count;
157 	unsigned int smi_count;
158 	unsigned int cpu_id;
159 	unsigned int flags;
160 #define CPU_IS_FIRST_THREAD_IN_CORE	0x2
161 #define CPU_IS_FIRST_CORE_IN_PACKAGE	0x4
162 	unsigned long long counter[MAX_ADDED_COUNTERS];
163 } *thread_even, *thread_odd;
164 
165 struct core_data {
166 	unsigned long long c3;
167 	unsigned long long c6;
168 	unsigned long long c7;
169 	unsigned long long mc6_us;	/* duplicate as per-core for now, even though per module */
170 	unsigned int core_temp_c;
171 	unsigned int core_id;
172 	unsigned long long counter[MAX_ADDED_COUNTERS];
173 } *core_even, *core_odd;
174 
175 struct pkg_data {
176 	unsigned long long pc2;
177 	unsigned long long pc3;
178 	unsigned long long pc6;
179 	unsigned long long pc7;
180 	unsigned long long pc8;
181 	unsigned long long pc9;
182 	unsigned long long pc10;
183 	unsigned long long pkg_wtd_core_c0;
184 	unsigned long long pkg_any_core_c0;
185 	unsigned long long pkg_any_gfxe_c0;
186 	unsigned long long pkg_both_core_gfxe_c0;
187 	long long gfx_rc6_ms;
188 	unsigned int gfx_mhz;
189 	unsigned int package_id;
190 	unsigned int energy_pkg;	/* MSR_PKG_ENERGY_STATUS */
191 	unsigned int energy_dram;	/* MSR_DRAM_ENERGY_STATUS */
192 	unsigned int energy_cores;	/* MSR_PP0_ENERGY_STATUS */
193 	unsigned int energy_gfx;	/* MSR_PP1_ENERGY_STATUS */
194 	unsigned int rapl_pkg_perf_status;	/* MSR_PKG_PERF_STATUS */
195 	unsigned int rapl_dram_perf_status;	/* MSR_DRAM_PERF_STATUS */
196 	unsigned int pkg_temp_c;
197 	unsigned long long counter[MAX_ADDED_COUNTERS];
198 } *package_even, *package_odd;
199 
200 #define ODD_COUNTERS thread_odd, core_odd, package_odd
201 #define EVEN_COUNTERS thread_even, core_even, package_even
202 
203 #define GET_THREAD(thread_base, thread_no, core_no, pkg_no) \
204 	(thread_base + (pkg_no) * topo.num_cores_per_pkg * \
205 		topo.num_threads_per_core + \
206 		(core_no) * topo.num_threads_per_core + (thread_no))
207 #define GET_CORE(core_base, core_no, pkg_no) \
208 	(core_base + (pkg_no) * topo.num_cores_per_pkg + (core_no))
209 #define GET_PKG(pkg_base, pkg_no) (pkg_base + pkg_no)
210 
211 enum counter_scope {SCOPE_CPU, SCOPE_CORE, SCOPE_PACKAGE};
212 enum counter_type {COUNTER_ITEMS, COUNTER_CYCLES, COUNTER_SECONDS, COUNTER_USEC};
213 enum counter_format {FORMAT_RAW, FORMAT_DELTA, FORMAT_PERCENT};
214 
215 struct msr_counter {
216 	unsigned int msr_num;
217 	char name[NAME_BYTES];
218 	char path[PATH_BYTES];
219 	unsigned int width;
220 	enum counter_type type;
221 	enum counter_format format;
222 	struct msr_counter *next;
223 	unsigned int flags;
224 #define	FLAGS_HIDE	(1 << 0)
225 #define	FLAGS_SHOW	(1 << 1)
226 #define	SYSFS_PERCPU	(1 << 1)
227 };
228 
229 struct sys_counters {
230 	unsigned int added_thread_counters;
231 	unsigned int added_core_counters;
232 	unsigned int added_package_counters;
233 	struct msr_counter *tp;
234 	struct msr_counter *cp;
235 	struct msr_counter *pp;
236 } sys;
237 
238 struct system_summary {
239 	struct thread_data threads;
240 	struct core_data cores;
241 	struct pkg_data packages;
242 } average;
243 
244 
245 struct topo_params {
246 	int num_packages;
247 	int num_cpus;
248 	int num_cores;
249 	int max_cpu_num;
250 	int num_cores_per_pkg;
251 	int num_threads_per_core;
252 } topo;
253 
254 struct timeval tv_even, tv_odd, tv_delta;
255 
256 int *irq_column_2_cpu;	/* /proc/interrupts column numbers */
257 int *irqs_per_cpu;		/* indexed by cpu_num */
258 
259 void setup_all_buffers(void);
260 
261 int cpu_is_not_present(int cpu)
262 {
263 	return !CPU_ISSET_S(cpu, cpu_present_setsize, cpu_present_set);
264 }
265 /*
266  * run func(thread, core, package) in topology order
267  * skip non-present cpus
268  */
269 
270 int for_all_cpus(int (func)(struct thread_data *, struct core_data *, struct pkg_data *),
271 	struct thread_data *thread_base, struct core_data *core_base, struct pkg_data *pkg_base)
272 {
273 	int retval, pkg_no, core_no, thread_no;
274 
275 	for (pkg_no = 0; pkg_no < topo.num_packages; ++pkg_no) {
276 		for (core_no = 0; core_no < topo.num_cores_per_pkg; ++core_no) {
277 			for (thread_no = 0; thread_no <
278 				topo.num_threads_per_core; ++thread_no) {
279 				struct thread_data *t;
280 				struct core_data *c;
281 				struct pkg_data *p;
282 
283 				t = GET_THREAD(thread_base, thread_no, core_no, pkg_no);
284 
285 				if (cpu_is_not_present(t->cpu_id))
286 					continue;
287 
288 				c = GET_CORE(core_base, core_no, pkg_no);
289 				p = GET_PKG(pkg_base, pkg_no);
290 
291 				retval = func(t, c, p);
292 				if (retval)
293 					return retval;
294 			}
295 		}
296 	}
297 	return 0;
298 }
299 
300 int cpu_migrate(int cpu)
301 {
302 	CPU_ZERO_S(cpu_affinity_setsize, cpu_affinity_set);
303 	CPU_SET_S(cpu, cpu_affinity_setsize, cpu_affinity_set);
304 	if (sched_setaffinity(0, cpu_affinity_setsize, cpu_affinity_set) == -1)
305 		return -1;
306 	else
307 		return 0;
308 }
309 int get_msr_fd(int cpu)
310 {
311 	char pathname[32];
312 	int fd;
313 
314 	fd = fd_percpu[cpu];
315 
316 	if (fd)
317 		return fd;
318 
319 	sprintf(pathname, "/dev/cpu/%d/msr", cpu);
320 	fd = open(pathname, O_RDONLY);
321 	if (fd < 0)
322 		err(-1, "%s open failed, try chown or chmod +r /dev/cpu/*/msr, or run as root", pathname);
323 
324 	fd_percpu[cpu] = fd;
325 
326 	return fd;
327 }
328 
329 int get_msr(int cpu, off_t offset, unsigned long long *msr)
330 {
331 	ssize_t retval;
332 
333 	retval = pread(get_msr_fd(cpu), msr, sizeof(*msr), offset);
334 
335 	if (retval != sizeof *msr)
336 		err(-1, "cpu%d: msr offset 0x%llx read failed", cpu, (unsigned long long)offset);
337 
338 	return 0;
339 }
340 
341 /*
342  * Each string in this array is compared in --show and --hide cmdline.
343  * Thus, strings that are proper sub-sets must follow their more specific peers.
344  */
345 struct msr_counter bic[] = {
346 	{ 0x0, "Package" },
347 	{ 0x0, "Avg_MHz" },
348 	{ 0x0, "Bzy_MHz" },
349 	{ 0x0, "TSC_MHz" },
350 	{ 0x0, "IRQ" },
351 	{ 0x0, "SMI", "", 32, 0, FORMAT_DELTA, NULL},
352 	{ 0x0, "Busy%" },
353 	{ 0x0, "CPU%c1" },
354 	{ 0x0, "CPU%c3" },
355 	{ 0x0, "CPU%c6" },
356 	{ 0x0, "CPU%c7" },
357 	{ 0x0, "ThreadC" },
358 	{ 0x0, "CoreTmp" },
359 	{ 0x0, "CoreCnt" },
360 	{ 0x0, "PkgTmp" },
361 	{ 0x0, "GFX%rc6" },
362 	{ 0x0, "GFXMHz" },
363 	{ 0x0, "Pkg%pc2" },
364 	{ 0x0, "Pkg%pc3" },
365 	{ 0x0, "Pkg%pc6" },
366 	{ 0x0, "Pkg%pc7" },
367 	{ 0x0, "Pkg%pc8" },
368 	{ 0x0, "Pkg%pc9" },
369 	{ 0x0, "Pkg%pc10" },
370 	{ 0x0, "PkgWatt" },
371 	{ 0x0, "CorWatt" },
372 	{ 0x0, "GFXWatt" },
373 	{ 0x0, "PkgCnt" },
374 	{ 0x0, "RAMWatt" },
375 	{ 0x0, "PKG_%" },
376 	{ 0x0, "RAM_%" },
377 	{ 0x0, "Pkg_J" },
378 	{ 0x0, "Cor_J" },
379 	{ 0x0, "GFX_J" },
380 	{ 0x0, "RAM_J" },
381 	{ 0x0, "Core" },
382 	{ 0x0, "CPU" },
383 	{ 0x0, "Mod%c6" },
384 	{ 0x0, "sysfs" },
385 };
386 
387 #define MAX_BIC (sizeof(bic) / sizeof(struct msr_counter))
388 #define	BIC_Package	(1ULL << 0)
389 #define	BIC_Avg_MHz	(1ULL << 1)
390 #define	BIC_Bzy_MHz	(1ULL << 2)
391 #define	BIC_TSC_MHz	(1ULL << 3)
392 #define	BIC_IRQ		(1ULL << 4)
393 #define	BIC_SMI		(1ULL << 5)
394 #define	BIC_Busy	(1ULL << 6)
395 #define	BIC_CPU_c1	(1ULL << 7)
396 #define	BIC_CPU_c3	(1ULL << 8)
397 #define	BIC_CPU_c6	(1ULL << 9)
398 #define	BIC_CPU_c7	(1ULL << 10)
399 #define	BIC_ThreadC	(1ULL << 11)
400 #define	BIC_CoreTmp	(1ULL << 12)
401 #define	BIC_CoreCnt	(1ULL << 13)
402 #define	BIC_PkgTmp	(1ULL << 14)
403 #define	BIC_GFX_rc6	(1ULL << 15)
404 #define	BIC_GFXMHz	(1ULL << 16)
405 #define	BIC_Pkgpc2	(1ULL << 17)
406 #define	BIC_Pkgpc3	(1ULL << 18)
407 #define	BIC_Pkgpc6	(1ULL << 19)
408 #define	BIC_Pkgpc7	(1ULL << 20)
409 #define	BIC_Pkgpc8	(1ULL << 21)
410 #define	BIC_Pkgpc9	(1ULL << 22)
411 #define	BIC_Pkgpc10	(1ULL << 23)
412 #define	BIC_PkgWatt	(1ULL << 24)
413 #define	BIC_CorWatt	(1ULL << 25)
414 #define	BIC_GFXWatt	(1ULL << 26)
415 #define	BIC_PkgCnt	(1ULL << 27)
416 #define	BIC_RAMWatt	(1ULL << 28)
417 #define	BIC_PKG__	(1ULL << 29)
418 #define	BIC_RAM__	(1ULL << 30)
419 #define	BIC_Pkg_J	(1ULL << 31)
420 #define	BIC_Cor_J	(1ULL << 32)
421 #define	BIC_GFX_J	(1ULL << 33)
422 #define	BIC_RAM_J	(1ULL << 34)
423 #define	BIC_Core	(1ULL << 35)
424 #define	BIC_CPU		(1ULL << 36)
425 #define	BIC_Mod_c6	(1ULL << 37)
426 #define	BIC_sysfs	(1ULL << 38)
427 
428 unsigned long long bic_enabled = 0xFFFFFFFFFFFFFFFFULL;
429 unsigned long long bic_present = BIC_sysfs;
430 
431 #define DO_BIC(COUNTER_NAME) (bic_enabled & bic_present & COUNTER_NAME)
432 #define BIC_PRESENT(COUNTER_BIT) (bic_present |= COUNTER_BIT)
433 #define BIC_NOT_PRESENT(COUNTER_BIT) (bic_present &= ~COUNTER_BIT)
434 
435 /*
436  * bic_lookup
437  * for all the strings in comma separate name_list,
438  * set the approprate bit in return value.
439  */
440 unsigned long long bic_lookup(char *name_list)
441 {
442 	int i;
443 	unsigned long long retval = 0;
444 
445 	while (name_list) {
446 		char *comma;
447 
448 		comma = strchr(name_list, ',');
449 
450 		if (comma)
451 			*comma = '\0';
452 
453 		for (i = 0; i < MAX_BIC; ++i) {
454 			if (!strcmp(name_list, bic[i].name)) {
455 				retval |= (1ULL << i);
456 				break;
457 			}
458 		}
459 		if (i == MAX_BIC) {
460 			fprintf(stderr, "Invalid counter name: %s\n", name_list);
461 			exit(-1);
462 		}
463 
464 		name_list = comma;
465 		if (name_list)
466 			name_list++;
467 
468 	}
469 	return retval;
470 }
471 
472 void print_header(void)
473 {
474 	struct msr_counter *mp;
475 
476 	if (DO_BIC(BIC_Package))
477 		outp += sprintf(outp, "\tPackage");
478 	if (DO_BIC(BIC_Core))
479 		outp += sprintf(outp, "\tCore");
480 	if (DO_BIC(BIC_CPU))
481 		outp += sprintf(outp, "\tCPU");
482 	if (DO_BIC(BIC_Avg_MHz))
483 		outp += sprintf(outp, "\tAvg_MHz");
484 	if (DO_BIC(BIC_Busy))
485 		outp += sprintf(outp, "\tBusy%%");
486 	if (DO_BIC(BIC_Bzy_MHz))
487 		outp += sprintf(outp, "\tBzy_MHz");
488 	if (DO_BIC(BIC_TSC_MHz))
489 		outp += sprintf(outp, "\tTSC_MHz");
490 
491 	if (DO_BIC(BIC_IRQ))
492 		outp += sprintf(outp, "\tIRQ");
493 	if (DO_BIC(BIC_SMI))
494 		outp += sprintf(outp, "\tSMI");
495 
496 	for (mp = sys.tp; mp; mp = mp->next) {
497 		if (mp->format == FORMAT_RAW) {
498 			if (mp->width == 64)
499 				outp += sprintf(outp, "\t%18.18s", mp->name);
500 			else
501 				outp += sprintf(outp, "\t%10.10s", mp->name);
502 		} else {
503 			outp += sprintf(outp, "\t%s", mp->name);
504 		}
505 	}
506 
507 	if (DO_BIC(BIC_CPU_c1))
508 		outp += sprintf(outp, "\tCPU%%c1");
509 	if (DO_BIC(BIC_CPU_c3) && !do_slm_cstates && !do_knl_cstates)
510 		outp += sprintf(outp, "\tCPU%%c3");
511 	if (DO_BIC(BIC_CPU_c6))
512 		outp += sprintf(outp, "\tCPU%%c6");
513 	if (DO_BIC(BIC_CPU_c7))
514 		outp += sprintf(outp, "\tCPU%%c7");
515 
516 	if (DO_BIC(BIC_Mod_c6))
517 		outp += sprintf(outp, "\tMod%%c6");
518 
519 	if (DO_BIC(BIC_CoreTmp))
520 		outp += sprintf(outp, "\tCoreTmp");
521 
522 	for (mp = sys.cp; mp; mp = mp->next) {
523 		if (mp->format == FORMAT_RAW) {
524 			if (mp->width == 64)
525 				outp += sprintf(outp, "\t%18.18s", mp->name);
526 			else
527 				outp += sprintf(outp, "\t%10.10s", mp->name);
528 		} else {
529 			outp += sprintf(outp, "\t%s", mp->name);
530 		}
531 	}
532 
533 	if (DO_BIC(BIC_PkgTmp))
534 		outp += sprintf(outp, "\tPkgTmp");
535 
536 	if (DO_BIC(BIC_GFX_rc6))
537 		outp += sprintf(outp, "\tGFX%%rc6");
538 
539 	if (DO_BIC(BIC_GFXMHz))
540 		outp += sprintf(outp, "\tGFXMHz");
541 
542 	if (do_skl_residency) {
543 		outp += sprintf(outp, "\tTotl%%C0");
544 		outp += sprintf(outp, "\tAny%%C0");
545 		outp += sprintf(outp, "\tGFX%%C0");
546 		outp += sprintf(outp, "\tCPUGFX%%");
547 	}
548 
549 	if (DO_BIC(BIC_Pkgpc2))
550 		outp += sprintf(outp, "\tPkg%%pc2");
551 	if (DO_BIC(BIC_Pkgpc3))
552 		outp += sprintf(outp, "\tPkg%%pc3");
553 	if (DO_BIC(BIC_Pkgpc6))
554 		outp += sprintf(outp, "\tPkg%%pc6");
555 	if (DO_BIC(BIC_Pkgpc7))
556 		outp += sprintf(outp, "\tPkg%%pc7");
557 	if (DO_BIC(BIC_Pkgpc8))
558 		outp += sprintf(outp, "\tPkg%%pc8");
559 	if (DO_BIC(BIC_Pkgpc9))
560 		outp += sprintf(outp, "\tPkg%%pc9");
561 	if (DO_BIC(BIC_Pkgpc10))
562 		outp += sprintf(outp, "\tPk%%pc10");
563 
564 	if (do_rapl && !rapl_joules) {
565 		if (DO_BIC(BIC_PkgWatt))
566 			outp += sprintf(outp, "\tPkgWatt");
567 		if (DO_BIC(BIC_CorWatt))
568 			outp += sprintf(outp, "\tCorWatt");
569 		if (DO_BIC(BIC_GFXWatt))
570 			outp += sprintf(outp, "\tGFXWatt");
571 		if (DO_BIC(BIC_RAMWatt))
572 			outp += sprintf(outp, "\tRAMWatt");
573 		if (DO_BIC(BIC_PKG__))
574 			outp += sprintf(outp, "\tPKG_%%");
575 		if (DO_BIC(BIC_RAM__))
576 			outp += sprintf(outp, "\tRAM_%%");
577 	} else if (do_rapl && rapl_joules) {
578 		if (DO_BIC(BIC_Pkg_J))
579 			outp += sprintf(outp, "\tPkg_J");
580 		if (DO_BIC(BIC_Cor_J))
581 			outp += sprintf(outp, "\tCor_J");
582 		if (DO_BIC(BIC_GFX_J))
583 			outp += sprintf(outp, "\tGFX_J");
584 		if (DO_BIC(BIC_RAM_J))
585 			outp += sprintf(outp, "\tRAM_J");
586 		if (DO_BIC(BIC_PKG__))
587 			outp += sprintf(outp, "\tPKG_%%");
588 		if (DO_BIC(BIC_RAM__))
589 			outp += sprintf(outp, "\tRAM_%%");
590 	}
591 	for (mp = sys.pp; mp; mp = mp->next) {
592 		if (mp->format == FORMAT_RAW) {
593 			if (mp->width == 64)
594 				outp += sprintf(outp, "\t%18.18s", mp->name);
595 			else
596 				outp += sprintf(outp, "\t%10.10s", mp->name);
597 		} else {
598 			outp += sprintf(outp, "\t%s", mp->name);
599 		}
600 	}
601 
602 	outp += sprintf(outp, "\n");
603 }
604 
605 int dump_counters(struct thread_data *t, struct core_data *c,
606 	struct pkg_data *p)
607 {
608 	int i;
609 	struct msr_counter *mp;
610 
611 	outp += sprintf(outp, "t %p, c %p, p %p\n", t, c, p);
612 
613 	if (t) {
614 		outp += sprintf(outp, "CPU: %d flags 0x%x\n",
615 			t->cpu_id, t->flags);
616 		outp += sprintf(outp, "TSC: %016llX\n", t->tsc);
617 		outp += sprintf(outp, "aperf: %016llX\n", t->aperf);
618 		outp += sprintf(outp, "mperf: %016llX\n", t->mperf);
619 		outp += sprintf(outp, "c1: %016llX\n", t->c1);
620 
621 		if (DO_BIC(BIC_IRQ))
622 			outp += sprintf(outp, "IRQ: %08X\n", t->irq_count);
623 		if (DO_BIC(BIC_SMI))
624 			outp += sprintf(outp, "SMI: %08X\n", t->smi_count);
625 
626 		for (i = 0, mp = sys.tp; mp; i++, mp = mp->next) {
627 			outp += sprintf(outp, "tADDED [%d] msr0x%x: %08llX\n",
628 				i, mp->msr_num, t->counter[i]);
629 		}
630 	}
631 
632 	if (c) {
633 		outp += sprintf(outp, "core: %d\n", c->core_id);
634 		outp += sprintf(outp, "c3: %016llX\n", c->c3);
635 		outp += sprintf(outp, "c6: %016llX\n", c->c6);
636 		outp += sprintf(outp, "c7: %016llX\n", c->c7);
637 		outp += sprintf(outp, "DTS: %dC\n", c->core_temp_c);
638 
639 		for (i = 0, mp = sys.cp; mp; i++, mp = mp->next) {
640 			outp += sprintf(outp, "cADDED [%d] msr0x%x: %08llX\n",
641 				i, mp->msr_num, c->counter[i]);
642 		}
643 		outp += sprintf(outp, "mc6_us: %016llX\n", c->mc6_us);
644 	}
645 
646 	if (p) {
647 		outp += sprintf(outp, "package: %d\n", p->package_id);
648 
649 		outp += sprintf(outp, "Weighted cores: %016llX\n", p->pkg_wtd_core_c0);
650 		outp += sprintf(outp, "Any cores: %016llX\n", p->pkg_any_core_c0);
651 		outp += sprintf(outp, "Any GFX: %016llX\n", p->pkg_any_gfxe_c0);
652 		outp += sprintf(outp, "CPU + GFX: %016llX\n", p->pkg_both_core_gfxe_c0);
653 
654 		outp += sprintf(outp, "pc2: %016llX\n", p->pc2);
655 		if (DO_BIC(BIC_Pkgpc3))
656 			outp += sprintf(outp, "pc3: %016llX\n", p->pc3);
657 		if (DO_BIC(BIC_Pkgpc6))
658 			outp += sprintf(outp, "pc6: %016llX\n", p->pc6);
659 		if (DO_BIC(BIC_Pkgpc7))
660 			outp += sprintf(outp, "pc7: %016llX\n", p->pc7);
661 		outp += sprintf(outp, "pc8: %016llX\n", p->pc8);
662 		outp += sprintf(outp, "pc9: %016llX\n", p->pc9);
663 		outp += sprintf(outp, "pc10: %016llX\n", p->pc10);
664 		outp += sprintf(outp, "Joules PKG: %0X\n", p->energy_pkg);
665 		outp += sprintf(outp, "Joules COR: %0X\n", p->energy_cores);
666 		outp += sprintf(outp, "Joules GFX: %0X\n", p->energy_gfx);
667 		outp += sprintf(outp, "Joules RAM: %0X\n", p->energy_dram);
668 		outp += sprintf(outp, "Throttle PKG: %0X\n",
669 			p->rapl_pkg_perf_status);
670 		outp += sprintf(outp, "Throttle RAM: %0X\n",
671 			p->rapl_dram_perf_status);
672 		outp += sprintf(outp, "PTM: %dC\n", p->pkg_temp_c);
673 
674 		for (i = 0, mp = sys.pp; mp; i++, mp = mp->next) {
675 			outp += sprintf(outp, "pADDED [%d] msr0x%x: %08llX\n",
676 				i, mp->msr_num, p->counter[i]);
677 		}
678 	}
679 
680 	outp += sprintf(outp, "\n");
681 
682 	return 0;
683 }
684 
685 /*
686  * column formatting convention & formats
687  */
688 int format_counters(struct thread_data *t, struct core_data *c,
689 	struct pkg_data *p)
690 {
691 	double interval_float, tsc;
692 	char *fmt8;
693 	int i;
694 	struct msr_counter *mp;
695 
696 	 /* if showing only 1st thread in core and this isn't one, bail out */
697 	if (show_core_only && !(t->flags & CPU_IS_FIRST_THREAD_IN_CORE))
698 		return 0;
699 
700 	 /* if showing only 1st thread in pkg and this isn't one, bail out */
701 	if (show_pkg_only && !(t->flags & CPU_IS_FIRST_CORE_IN_PACKAGE))
702 		return 0;
703 
704 	/*if not summary line and --cpu is used */
705 	if ((t != &average.threads) &&
706 		(cpu_subset && !CPU_ISSET_S(t->cpu_id, cpu_subset_size, cpu_subset)))
707 		return 0;
708 
709 	interval_float = tv_delta.tv_sec + tv_delta.tv_usec/1000000.0;
710 
711 	tsc = t->tsc * tsc_tweak;
712 
713 	/* topo columns, print blanks on 1st (average) line */
714 	if (t == &average.threads) {
715 		if (DO_BIC(BIC_Package))
716 			outp += sprintf(outp, "\t-");
717 		if (DO_BIC(BIC_Core))
718 			outp += sprintf(outp, "\t-");
719 		if (DO_BIC(BIC_CPU))
720 			outp += sprintf(outp, "\t-");
721 	} else {
722 		if (DO_BIC(BIC_Package)) {
723 			if (p)
724 				outp += sprintf(outp, "\t%d", p->package_id);
725 			else
726 				outp += sprintf(outp, "\t-");
727 		}
728 		if (DO_BIC(BIC_Core)) {
729 			if (c)
730 				outp += sprintf(outp, "\t%d", c->core_id);
731 			else
732 				outp += sprintf(outp, "\t-");
733 		}
734 		if (DO_BIC(BIC_CPU))
735 			outp += sprintf(outp, "\t%d", t->cpu_id);
736 	}
737 
738 	if (DO_BIC(BIC_Avg_MHz))
739 		outp += sprintf(outp, "\t%.0f",
740 			1.0 / units * t->aperf / interval_float);
741 
742 	if (DO_BIC(BIC_Busy))
743 		outp += sprintf(outp, "\t%.2f", 100.0 * t->mperf/tsc);
744 
745 	if (DO_BIC(BIC_Bzy_MHz)) {
746 		if (has_base_hz)
747 			outp += sprintf(outp, "\t%.0f", base_hz / units * t->aperf / t->mperf);
748 		else
749 			outp += sprintf(outp, "\t%.0f",
750 				tsc / units * t->aperf / t->mperf / interval_float);
751 	}
752 
753 	if (DO_BIC(BIC_TSC_MHz))
754 		outp += sprintf(outp, "\t%.0f", 1.0 * t->tsc/units/interval_float);
755 
756 	/* IRQ */
757 	if (DO_BIC(BIC_IRQ))
758 		outp += sprintf(outp, "\t%d", t->irq_count);
759 
760 	/* SMI */
761 	if (DO_BIC(BIC_SMI))
762 		outp += sprintf(outp, "\t%d", t->smi_count);
763 
764 	/* Added counters */
765 	for (i = 0, mp = sys.tp; mp; i++, mp = mp->next) {
766 		if (mp->format == FORMAT_RAW) {
767 			if (mp->width == 32)
768 				outp += sprintf(outp, "\t0x%08lx", (unsigned long) t->counter[i]);
769 			else
770 				outp += sprintf(outp, "\t0x%016llx", t->counter[i]);
771 		} else if (mp->format == FORMAT_DELTA) {
772 			outp += sprintf(outp, "\t%lld", t->counter[i]);
773 		} else if (mp->format == FORMAT_PERCENT) {
774 			if (mp->type == COUNTER_USEC)
775 				outp += sprintf(outp, "\t%.2f", t->counter[i]/interval_float/10000);
776 			else
777 				outp += sprintf(outp, "\t%.2f", 100.0 * t->counter[i]/tsc);
778 		}
779 	}
780 
781 	/* C1 */
782 	if (DO_BIC(BIC_CPU_c1))
783 		outp += sprintf(outp, "\t%.2f", 100.0 * t->c1/tsc);
784 
785 
786 	/* print per-core data only for 1st thread in core */
787 	if (!(t->flags & CPU_IS_FIRST_THREAD_IN_CORE))
788 		goto done;
789 
790 	if (DO_BIC(BIC_CPU_c3) && !do_slm_cstates && !do_knl_cstates)
791 		outp += sprintf(outp, "\t%.2f", 100.0 * c->c3/tsc);
792 	if (DO_BIC(BIC_CPU_c6))
793 		outp += sprintf(outp, "\t%.2f", 100.0 * c->c6/tsc);
794 	if (DO_BIC(BIC_CPU_c7))
795 		outp += sprintf(outp, "\t%.2f", 100.0 * c->c7/tsc);
796 
797 	/* Mod%c6 */
798 	if (DO_BIC(BIC_Mod_c6))
799 		outp += sprintf(outp, "\t%.2f", 100.0 * c->mc6_us / tsc);
800 
801 	if (DO_BIC(BIC_CoreTmp))
802 		outp += sprintf(outp, "\t%d", c->core_temp_c);
803 
804 	for (i = 0, mp = sys.cp; mp; i++, mp = mp->next) {
805 		if (mp->format == FORMAT_RAW) {
806 			if (mp->width == 32)
807 				outp += sprintf(outp, "\t0x%08lx", (unsigned long) c->counter[i]);
808 			else
809 				outp += sprintf(outp, "\t0x%016llx", c->counter[i]);
810 		} else if (mp->format == FORMAT_DELTA) {
811 			outp += sprintf(outp, "\t%lld", c->counter[i]);
812 		} else if (mp->format == FORMAT_PERCENT) {
813 			outp += sprintf(outp, "\t%.2f", 100.0 * c->counter[i]/tsc);
814 		}
815 	}
816 
817 	/* print per-package data only for 1st core in package */
818 	if (!(t->flags & CPU_IS_FIRST_CORE_IN_PACKAGE))
819 		goto done;
820 
821 	/* PkgTmp */
822 	if (DO_BIC(BIC_PkgTmp))
823 		outp += sprintf(outp, "\t%d", p->pkg_temp_c);
824 
825 	/* GFXrc6 */
826 	if (DO_BIC(BIC_GFX_rc6)) {
827 		if (p->gfx_rc6_ms == -1) {	/* detect GFX counter reset */
828 			outp += sprintf(outp, "\t**.**");
829 		} else {
830 			outp += sprintf(outp, "\t%.2f",
831 				p->gfx_rc6_ms / 10.0 / interval_float);
832 		}
833 	}
834 
835 	/* GFXMHz */
836 	if (DO_BIC(BIC_GFXMHz))
837 		outp += sprintf(outp, "\t%d", p->gfx_mhz);
838 
839 	/* Totl%C0, Any%C0 GFX%C0 CPUGFX% */
840 	if (do_skl_residency) {
841 		outp += sprintf(outp, "\t%.2f", 100.0 * p->pkg_wtd_core_c0/tsc);
842 		outp += sprintf(outp, "\t%.2f", 100.0 * p->pkg_any_core_c0/tsc);
843 		outp += sprintf(outp, "\t%.2f", 100.0 * p->pkg_any_gfxe_c0/tsc);
844 		outp += sprintf(outp, "\t%.2f", 100.0 * p->pkg_both_core_gfxe_c0/tsc);
845 	}
846 
847 	if (DO_BIC(BIC_Pkgpc2))
848 		outp += sprintf(outp, "\t%.2f", 100.0 * p->pc2/tsc);
849 	if (DO_BIC(BIC_Pkgpc3))
850 		outp += sprintf(outp, "\t%.2f", 100.0 * p->pc3/tsc);
851 	if (DO_BIC(BIC_Pkgpc6))
852 		outp += sprintf(outp, "\t%.2f", 100.0 * p->pc6/tsc);
853 	if (DO_BIC(BIC_Pkgpc7))
854 		outp += sprintf(outp, "\t%.2f", 100.0 * p->pc7/tsc);
855 	if (DO_BIC(BIC_Pkgpc8))
856 		outp += sprintf(outp, "\t%.2f", 100.0 * p->pc8/tsc);
857 	if (DO_BIC(BIC_Pkgpc9))
858 		outp += sprintf(outp, "\t%.2f", 100.0 * p->pc9/tsc);
859 	if (DO_BIC(BIC_Pkgpc10))
860 		outp += sprintf(outp, "\t%.2f", 100.0 * p->pc10/tsc);
861 
862 	/*
863  	 * If measurement interval exceeds minimum RAPL Joule Counter range,
864  	 * indicate that results are suspect by printing "**" in fraction place.
865  	 */
866 	if (interval_float < rapl_joule_counter_range)
867 		fmt8 = "\t%.2f";
868 	else
869 		fmt8 = "%6.0f**";
870 
871 	if (DO_BIC(BIC_PkgWatt))
872 		outp += sprintf(outp, fmt8, p->energy_pkg * rapl_energy_units / interval_float);
873 	if (DO_BIC(BIC_CorWatt))
874 		outp += sprintf(outp, fmt8, p->energy_cores * rapl_energy_units / interval_float);
875 	if (DO_BIC(BIC_GFXWatt))
876 		outp += sprintf(outp, fmt8, p->energy_gfx * rapl_energy_units / interval_float);
877 	if (DO_BIC(BIC_RAMWatt))
878 		outp += sprintf(outp, fmt8, p->energy_dram * rapl_dram_energy_units / interval_float);
879 	if (DO_BIC(BIC_Pkg_J))
880 		outp += sprintf(outp, fmt8, p->energy_pkg * rapl_energy_units);
881 	if (DO_BIC(BIC_Cor_J))
882 		outp += sprintf(outp, fmt8, p->energy_cores * rapl_energy_units);
883 	if (DO_BIC(BIC_GFX_J))
884 		outp += sprintf(outp, fmt8, p->energy_gfx * rapl_energy_units);
885 	if (DO_BIC(BIC_RAM_J))
886 		outp += sprintf(outp, fmt8, p->energy_dram * rapl_dram_energy_units);
887 	if (DO_BIC(BIC_PKG__))
888 		outp += sprintf(outp, fmt8, 100.0 * p->rapl_pkg_perf_status * rapl_time_units / interval_float);
889 	if (DO_BIC(BIC_RAM__))
890 		outp += sprintf(outp, fmt8, 100.0 * p->rapl_dram_perf_status * rapl_time_units / interval_float);
891 
892 	for (i = 0, mp = sys.pp; mp; i++, mp = mp->next) {
893 		if (mp->format == FORMAT_RAW) {
894 			if (mp->width == 32)
895 				outp += sprintf(outp, "\t0x%08lx", (unsigned long) p->counter[i]);
896 			else
897 				outp += sprintf(outp, "\t0x%016llx", p->counter[i]);
898 		} else if (mp->format == FORMAT_DELTA) {
899 			outp += sprintf(outp, "\t%lld", p->counter[i]);
900 		} else if (mp->format == FORMAT_PERCENT) {
901 			outp += sprintf(outp, "\t%.2f", 100.0 * p->counter[i]/tsc);
902 		}
903 	}
904 
905 done:
906 	outp += sprintf(outp, "\n");
907 
908 	return 0;
909 }
910 
911 void flush_output_stdout(void)
912 {
913 	FILE *filep;
914 
915 	if (outf == stderr)
916 		filep = stdout;
917 	else
918 		filep = outf;
919 
920 	fputs(output_buffer, filep);
921 	fflush(filep);
922 
923 	outp = output_buffer;
924 }
925 void flush_output_stderr(void)
926 {
927 	fputs(output_buffer, outf);
928 	fflush(outf);
929 	outp = output_buffer;
930 }
931 void format_all_counters(struct thread_data *t, struct core_data *c, struct pkg_data *p)
932 {
933 	static int printed;
934 
935 	if (!printed || !summary_only)
936 		print_header();
937 
938 	if (topo.num_cpus > 1)
939 		format_counters(&average.threads, &average.cores,
940 			&average.packages);
941 
942 	printed = 1;
943 
944 	if (summary_only)
945 		return;
946 
947 	for_all_cpus(format_counters, t, c, p);
948 }
949 
950 #define DELTA_WRAP32(new, old)			\
951 	if (new > old) {			\
952 		old = new - old;		\
953 	} else {				\
954 		old = 0x100000000 + new - old;	\
955 	}
956 
957 int
958 delta_package(struct pkg_data *new, struct pkg_data *old)
959 {
960 	int i;
961 	struct msr_counter *mp;
962 
963 	if (do_skl_residency) {
964 		old->pkg_wtd_core_c0 = new->pkg_wtd_core_c0 - old->pkg_wtd_core_c0;
965 		old->pkg_any_core_c0 = new->pkg_any_core_c0 - old->pkg_any_core_c0;
966 		old->pkg_any_gfxe_c0 = new->pkg_any_gfxe_c0 - old->pkg_any_gfxe_c0;
967 		old->pkg_both_core_gfxe_c0 = new->pkg_both_core_gfxe_c0 - old->pkg_both_core_gfxe_c0;
968 	}
969 	old->pc2 = new->pc2 - old->pc2;
970 	if (DO_BIC(BIC_Pkgpc3))
971 		old->pc3 = new->pc3 - old->pc3;
972 	if (DO_BIC(BIC_Pkgpc6))
973 		old->pc6 = new->pc6 - old->pc6;
974 	if (DO_BIC(BIC_Pkgpc7))
975 		old->pc7 = new->pc7 - old->pc7;
976 	old->pc8 = new->pc8 - old->pc8;
977 	old->pc9 = new->pc9 - old->pc9;
978 	old->pc10 = new->pc10 - old->pc10;
979 	old->pkg_temp_c = new->pkg_temp_c;
980 
981 	/* flag an error when rc6 counter resets/wraps */
982 	if (old->gfx_rc6_ms >  new->gfx_rc6_ms)
983 		old->gfx_rc6_ms = -1;
984 	else
985 		old->gfx_rc6_ms = new->gfx_rc6_ms - old->gfx_rc6_ms;
986 
987 	old->gfx_mhz = new->gfx_mhz;
988 
989 	DELTA_WRAP32(new->energy_pkg, old->energy_pkg);
990 	DELTA_WRAP32(new->energy_cores, old->energy_cores);
991 	DELTA_WRAP32(new->energy_gfx, old->energy_gfx);
992 	DELTA_WRAP32(new->energy_dram, old->energy_dram);
993 	DELTA_WRAP32(new->rapl_pkg_perf_status, old->rapl_pkg_perf_status);
994 	DELTA_WRAP32(new->rapl_dram_perf_status, old->rapl_dram_perf_status);
995 
996 	for (i = 0, mp = sys.pp; mp; i++, mp = mp->next) {
997 		if (mp->format == FORMAT_RAW)
998 			old->counter[i] = new->counter[i];
999 		else
1000 			old->counter[i] = new->counter[i] - old->counter[i];
1001 	}
1002 
1003 	return 0;
1004 }
1005 
1006 void
1007 delta_core(struct core_data *new, struct core_data *old)
1008 {
1009 	int i;
1010 	struct msr_counter *mp;
1011 
1012 	old->c3 = new->c3 - old->c3;
1013 	old->c6 = new->c6 - old->c6;
1014 	old->c7 = new->c7 - old->c7;
1015 	old->core_temp_c = new->core_temp_c;
1016 	old->mc6_us = new->mc6_us - old->mc6_us;
1017 
1018 	for (i = 0, mp = sys.cp; mp; i++, mp = mp->next) {
1019 		if (mp->format == FORMAT_RAW)
1020 			old->counter[i] = new->counter[i];
1021 		else
1022 			old->counter[i] = new->counter[i] - old->counter[i];
1023 	}
1024 }
1025 
1026 /*
1027  * old = new - old
1028  */
1029 int
1030 delta_thread(struct thread_data *new, struct thread_data *old,
1031 	struct core_data *core_delta)
1032 {
1033 	int i;
1034 	struct msr_counter *mp;
1035 
1036 	old->tsc = new->tsc - old->tsc;
1037 
1038 	/* check for TSC < 1 Mcycles over interval */
1039 	if (old->tsc < (1000 * 1000))
1040 		errx(-3, "Insanely slow TSC rate, TSC stops in idle?\n"
1041 		     "You can disable all c-states by booting with \"idle=poll\"\n"
1042 		     "or just the deep ones with \"processor.max_cstate=1\"");
1043 
1044 	old->c1 = new->c1 - old->c1;
1045 
1046 	if (DO_BIC(BIC_Avg_MHz) || DO_BIC(BIC_Busy) || DO_BIC(BIC_Bzy_MHz)) {
1047 		if ((new->aperf > old->aperf) && (new->mperf > old->mperf)) {
1048 			old->aperf = new->aperf - old->aperf;
1049 			old->mperf = new->mperf - old->mperf;
1050 		} else {
1051 			return -1;
1052 		}
1053 	}
1054 
1055 
1056 	if (use_c1_residency_msr) {
1057 		/*
1058 		 * Some models have a dedicated C1 residency MSR,
1059 		 * which should be more accurate than the derivation below.
1060 		 */
1061 	} else {
1062 		/*
1063 		 * As counter collection is not atomic,
1064 		 * it is possible for mperf's non-halted cycles + idle states
1065 		 * to exceed TSC's all cycles: show c1 = 0% in that case.
1066 		 */
1067 		if ((old->mperf + core_delta->c3 + core_delta->c6 + core_delta->c7) > old->tsc)
1068 			old->c1 = 0;
1069 		else {
1070 			/* normal case, derive c1 */
1071 			old->c1 = (old->tsc * tsc_tweak) - old->mperf - core_delta->c3
1072 				- core_delta->c6 - core_delta->c7;
1073 		}
1074 	}
1075 
1076 	if (old->mperf == 0) {
1077 		if (debug > 1)
1078 			fprintf(outf, "cpu%d MPERF 0!\n", old->cpu_id);
1079 		old->mperf = 1;	/* divide by 0 protection */
1080 	}
1081 
1082 	if (DO_BIC(BIC_IRQ))
1083 		old->irq_count = new->irq_count - old->irq_count;
1084 
1085 	if (DO_BIC(BIC_SMI))
1086 		old->smi_count = new->smi_count - old->smi_count;
1087 
1088 	for (i = 0, mp = sys.tp; mp; i++, mp = mp->next) {
1089 		if (mp->format == FORMAT_RAW)
1090 			old->counter[i] = new->counter[i];
1091 		else
1092 			old->counter[i] = new->counter[i] - old->counter[i];
1093 	}
1094 	return 0;
1095 }
1096 
1097 int delta_cpu(struct thread_data *t, struct core_data *c,
1098 	struct pkg_data *p, struct thread_data *t2,
1099 	struct core_data *c2, struct pkg_data *p2)
1100 {
1101 	int retval = 0;
1102 
1103 	/* calculate core delta only for 1st thread in core */
1104 	if (t->flags & CPU_IS_FIRST_THREAD_IN_CORE)
1105 		delta_core(c, c2);
1106 
1107 	/* always calculate thread delta */
1108 	retval = delta_thread(t, t2, c2);	/* c2 is core delta */
1109 	if (retval)
1110 		return retval;
1111 
1112 	/* calculate package delta only for 1st core in package */
1113 	if (t->flags & CPU_IS_FIRST_CORE_IN_PACKAGE)
1114 		retval = delta_package(p, p2);
1115 
1116 	return retval;
1117 }
1118 
1119 void clear_counters(struct thread_data *t, struct core_data *c, struct pkg_data *p)
1120 {
1121 	int i;
1122 	struct msr_counter  *mp;
1123 
1124 	t->tsc = 0;
1125 	t->aperf = 0;
1126 	t->mperf = 0;
1127 	t->c1 = 0;
1128 
1129 	t->irq_count = 0;
1130 	t->smi_count = 0;
1131 
1132 	/* tells format_counters to dump all fields from this set */
1133 	t->flags = CPU_IS_FIRST_THREAD_IN_CORE | CPU_IS_FIRST_CORE_IN_PACKAGE;
1134 
1135 	c->c3 = 0;
1136 	c->c6 = 0;
1137 	c->c7 = 0;
1138 	c->mc6_us = 0;
1139 	c->core_temp_c = 0;
1140 
1141 	p->pkg_wtd_core_c0 = 0;
1142 	p->pkg_any_core_c0 = 0;
1143 	p->pkg_any_gfxe_c0 = 0;
1144 	p->pkg_both_core_gfxe_c0 = 0;
1145 
1146 	p->pc2 = 0;
1147 	if (DO_BIC(BIC_Pkgpc3))
1148 		p->pc3 = 0;
1149 	if (DO_BIC(BIC_Pkgpc6))
1150 		p->pc6 = 0;
1151 	if (DO_BIC(BIC_Pkgpc7))
1152 		p->pc7 = 0;
1153 	p->pc8 = 0;
1154 	p->pc9 = 0;
1155 	p->pc10 = 0;
1156 
1157 	p->energy_pkg = 0;
1158 	p->energy_dram = 0;
1159 	p->energy_cores = 0;
1160 	p->energy_gfx = 0;
1161 	p->rapl_pkg_perf_status = 0;
1162 	p->rapl_dram_perf_status = 0;
1163 	p->pkg_temp_c = 0;
1164 
1165 	p->gfx_rc6_ms = 0;
1166 	p->gfx_mhz = 0;
1167 	for (i = 0, mp = sys.tp; mp; i++, mp = mp->next)
1168 		t->counter[i] = 0;
1169 
1170 	for (i = 0, mp = sys.cp; mp; i++, mp = mp->next)
1171 		c->counter[i] = 0;
1172 
1173 	for (i = 0, mp = sys.pp; mp; i++, mp = mp->next)
1174 		p->counter[i] = 0;
1175 }
1176 int sum_counters(struct thread_data *t, struct core_data *c,
1177 	struct pkg_data *p)
1178 {
1179 	int i;
1180 	struct msr_counter *mp;
1181 
1182 	average.threads.tsc += t->tsc;
1183 	average.threads.aperf += t->aperf;
1184 	average.threads.mperf += t->mperf;
1185 	average.threads.c1 += t->c1;
1186 
1187 	average.threads.irq_count += t->irq_count;
1188 	average.threads.smi_count += t->smi_count;
1189 
1190 	for (i = 0, mp = sys.tp; mp; i++, mp = mp->next) {
1191 		if (mp->format == FORMAT_RAW)
1192 			continue;
1193 		average.threads.counter[i] += t->counter[i];
1194 	}
1195 
1196 	/* sum per-core values only for 1st thread in core */
1197 	if (!(t->flags & CPU_IS_FIRST_THREAD_IN_CORE))
1198 		return 0;
1199 
1200 	average.cores.c3 += c->c3;
1201 	average.cores.c6 += c->c6;
1202 	average.cores.c7 += c->c7;
1203 	average.cores.mc6_us += c->mc6_us;
1204 
1205 	average.cores.core_temp_c = MAX(average.cores.core_temp_c, c->core_temp_c);
1206 
1207 	for (i = 0, mp = sys.cp; mp; i++, mp = mp->next) {
1208 		if (mp->format == FORMAT_RAW)
1209 			continue;
1210 		average.cores.counter[i] += c->counter[i];
1211 	}
1212 
1213 	/* sum per-pkg values only for 1st core in pkg */
1214 	if (!(t->flags & CPU_IS_FIRST_CORE_IN_PACKAGE))
1215 		return 0;
1216 
1217 	if (do_skl_residency) {
1218 		average.packages.pkg_wtd_core_c0 += p->pkg_wtd_core_c0;
1219 		average.packages.pkg_any_core_c0 += p->pkg_any_core_c0;
1220 		average.packages.pkg_any_gfxe_c0 += p->pkg_any_gfxe_c0;
1221 		average.packages.pkg_both_core_gfxe_c0 += p->pkg_both_core_gfxe_c0;
1222 	}
1223 
1224 	average.packages.pc2 += p->pc2;
1225 	if (DO_BIC(BIC_Pkgpc3))
1226 		average.packages.pc3 += p->pc3;
1227 	if (DO_BIC(BIC_Pkgpc6))
1228 		average.packages.pc6 += p->pc6;
1229 	if (DO_BIC(BIC_Pkgpc7))
1230 		average.packages.pc7 += p->pc7;
1231 	average.packages.pc8 += p->pc8;
1232 	average.packages.pc9 += p->pc9;
1233 	average.packages.pc10 += p->pc10;
1234 
1235 	average.packages.energy_pkg += p->energy_pkg;
1236 	average.packages.energy_dram += p->energy_dram;
1237 	average.packages.energy_cores += p->energy_cores;
1238 	average.packages.energy_gfx += p->energy_gfx;
1239 
1240 	average.packages.gfx_rc6_ms = p->gfx_rc6_ms;
1241 	average.packages.gfx_mhz = p->gfx_mhz;
1242 
1243 	average.packages.pkg_temp_c = MAX(average.packages.pkg_temp_c, p->pkg_temp_c);
1244 
1245 	average.packages.rapl_pkg_perf_status += p->rapl_pkg_perf_status;
1246 	average.packages.rapl_dram_perf_status += p->rapl_dram_perf_status;
1247 
1248 	for (i = 0, mp = sys.pp; mp; i++, mp = mp->next) {
1249 		if (mp->format == FORMAT_RAW)
1250 			continue;
1251 		average.packages.counter[i] += p->counter[i];
1252 	}
1253 	return 0;
1254 }
1255 /*
1256  * sum the counters for all cpus in the system
1257  * compute the weighted average
1258  */
1259 void compute_average(struct thread_data *t, struct core_data *c,
1260 	struct pkg_data *p)
1261 {
1262 	int i;
1263 	struct msr_counter *mp;
1264 
1265 	clear_counters(&average.threads, &average.cores, &average.packages);
1266 
1267 	for_all_cpus(sum_counters, t, c, p);
1268 
1269 	average.threads.tsc /= topo.num_cpus;
1270 	average.threads.aperf /= topo.num_cpus;
1271 	average.threads.mperf /= topo.num_cpus;
1272 	average.threads.c1 /= topo.num_cpus;
1273 
1274 	average.cores.c3 /= topo.num_cores;
1275 	average.cores.c6 /= topo.num_cores;
1276 	average.cores.c7 /= topo.num_cores;
1277 	average.cores.mc6_us /= topo.num_cores;
1278 
1279 	if (do_skl_residency) {
1280 		average.packages.pkg_wtd_core_c0 /= topo.num_packages;
1281 		average.packages.pkg_any_core_c0 /= topo.num_packages;
1282 		average.packages.pkg_any_gfxe_c0 /= topo.num_packages;
1283 		average.packages.pkg_both_core_gfxe_c0 /= topo.num_packages;
1284 	}
1285 
1286 	average.packages.pc2 /= topo.num_packages;
1287 	if (DO_BIC(BIC_Pkgpc3))
1288 		average.packages.pc3 /= topo.num_packages;
1289 	if (DO_BIC(BIC_Pkgpc6))
1290 		average.packages.pc6 /= topo.num_packages;
1291 	if (DO_BIC(BIC_Pkgpc7))
1292 		average.packages.pc7 /= topo.num_packages;
1293 
1294 	average.packages.pc8 /= topo.num_packages;
1295 	average.packages.pc9 /= topo.num_packages;
1296 	average.packages.pc10 /= topo.num_packages;
1297 
1298 	for (i = 0, mp = sys.tp; mp; i++, mp = mp->next) {
1299 		if (mp->format == FORMAT_RAW)
1300 			continue;
1301 		if (mp->flags & SYSFS_PERCPU && mp->type == COUNTER_ITEMS)
1302 			continue;
1303 		average.threads.counter[i] /= topo.num_cpus;
1304 	}
1305 	for (i = 0, mp = sys.cp; mp; i++, mp = mp->next) {
1306 		if (mp->format == FORMAT_RAW)
1307 			continue;
1308 		average.cores.counter[i] /= topo.num_cores;
1309 	}
1310 	for (i = 0, mp = sys.pp; mp; i++, mp = mp->next) {
1311 		if (mp->format == FORMAT_RAW)
1312 			continue;
1313 		average.packages.counter[i] /= topo.num_packages;
1314 	}
1315 }
1316 
1317 static unsigned long long rdtsc(void)
1318 {
1319 	unsigned int low, high;
1320 
1321 	asm volatile("rdtsc" : "=a" (low), "=d" (high));
1322 
1323 	return low | ((unsigned long long)high) << 32;
1324 }
1325 
1326 /*
1327  * Open a file, and exit on failure
1328  */
1329 FILE *fopen_or_die(const char *path, const char *mode)
1330 {
1331 	FILE *filep = fopen(path, mode);
1332 
1333 	if (!filep)
1334 		err(1, "%s: open failed", path);
1335 	return filep;
1336 }
1337 /*
1338  * snapshot_sysfs_counter()
1339  *
1340  * return snapshot of given counter
1341  */
1342 unsigned long long snapshot_sysfs_counter(char *path)
1343 {
1344 	FILE *fp;
1345 	int retval;
1346 	unsigned long long counter;
1347 
1348 	fp = fopen_or_die(path, "r");
1349 
1350 	retval = fscanf(fp, "%lld", &counter);
1351 	if (retval != 1)
1352 		err(1, "snapshot_sysfs_counter(%s)", path);
1353 
1354 	fclose(fp);
1355 
1356 	return counter;
1357 }
1358 
1359 int get_mp(int cpu, struct msr_counter *mp, unsigned long long *counterp)
1360 {
1361 	if (mp->msr_num != 0) {
1362 		if (get_msr(cpu, mp->msr_num, counterp))
1363 			return -1;
1364 	} else {
1365 		char path[128];
1366 
1367 		if (mp->flags & SYSFS_PERCPU) {
1368 			sprintf(path, "/sys/devices/system/cpu/cpu%d/%s",
1369 				 cpu, mp->path);
1370 
1371 			*counterp = snapshot_sysfs_counter(path);
1372 		} else {
1373 			*counterp = snapshot_sysfs_counter(mp->path);
1374 		}
1375 	}
1376 
1377 	return 0;
1378 }
1379 
1380 /*
1381  * get_counters(...)
1382  * migrate to cpu
1383  * acquire and record local counters for that cpu
1384  */
1385 int get_counters(struct thread_data *t, struct core_data *c, struct pkg_data *p)
1386 {
1387 	int cpu = t->cpu_id;
1388 	unsigned long long msr;
1389 	int aperf_mperf_retry_count = 0;
1390 	struct msr_counter *mp;
1391 	int i;
1392 
1393 	if (cpu_migrate(cpu)) {
1394 		fprintf(outf, "Could not migrate to CPU %d\n", cpu);
1395 		return -1;
1396 	}
1397 
1398 retry:
1399 	t->tsc = rdtsc();	/* we are running on local CPU of interest */
1400 
1401 	if (DO_BIC(BIC_Avg_MHz) || DO_BIC(BIC_Busy) || DO_BIC(BIC_Bzy_MHz)) {
1402 		unsigned long long tsc_before, tsc_between, tsc_after, aperf_time, mperf_time;
1403 
1404 		/*
1405 		 * The TSC, APERF and MPERF must be read together for
1406 		 * APERF/MPERF and MPERF/TSC to give accurate results.
1407 		 *
1408 		 * Unfortunately, APERF and MPERF are read by
1409 		 * individual system call, so delays may occur
1410 		 * between them.  If the time to read them
1411 		 * varies by a large amount, we re-read them.
1412 		 */
1413 
1414 		/*
1415 		 * This initial dummy APERF read has been seen to
1416 		 * reduce jitter in the subsequent reads.
1417 		 */
1418 
1419 		if (get_msr(cpu, MSR_IA32_APERF, &t->aperf))
1420 			return -3;
1421 
1422 		t->tsc = rdtsc();	/* re-read close to APERF */
1423 
1424 		tsc_before = t->tsc;
1425 
1426 		if (get_msr(cpu, MSR_IA32_APERF, &t->aperf))
1427 			return -3;
1428 
1429 		tsc_between = rdtsc();
1430 
1431 		if (get_msr(cpu, MSR_IA32_MPERF, &t->mperf))
1432 			return -4;
1433 
1434 		tsc_after = rdtsc();
1435 
1436 		aperf_time = tsc_between - tsc_before;
1437 		mperf_time = tsc_after - tsc_between;
1438 
1439 		/*
1440 		 * If the system call latency to read APERF and MPERF
1441 		 * differ by more than 2x, then try again.
1442 		 */
1443 		if ((aperf_time > (2 * mperf_time)) || (mperf_time > (2 * aperf_time))) {
1444 			aperf_mperf_retry_count++;
1445 			if (aperf_mperf_retry_count < 5)
1446 				goto retry;
1447 			else
1448 				warnx("cpu%d jitter %lld %lld",
1449 					cpu, aperf_time, mperf_time);
1450 		}
1451 		aperf_mperf_retry_count = 0;
1452 
1453 		t->aperf = t->aperf * aperf_mperf_multiplier;
1454 		t->mperf = t->mperf * aperf_mperf_multiplier;
1455 	}
1456 
1457 	if (DO_BIC(BIC_IRQ))
1458 		t->irq_count = irqs_per_cpu[cpu];
1459 	if (DO_BIC(BIC_SMI)) {
1460 		if (get_msr(cpu, MSR_SMI_COUNT, &msr))
1461 			return -5;
1462 		t->smi_count = msr & 0xFFFFFFFF;
1463 	}
1464 	if (DO_BIC(BIC_CPU_c1) && use_c1_residency_msr) {
1465 		if (get_msr(cpu, MSR_CORE_C1_RES, &t->c1))
1466 			return -6;
1467 	}
1468 
1469 	for (i = 0, mp = sys.tp; mp; i++, mp = mp->next) {
1470 		if (get_mp(cpu, mp, &t->counter[i]))
1471 			return -10;
1472 	}
1473 
1474 	/* collect core counters only for 1st thread in core */
1475 	if (!(t->flags & CPU_IS_FIRST_THREAD_IN_CORE))
1476 		return 0;
1477 
1478 	if (DO_BIC(BIC_CPU_c3) && !do_slm_cstates && !do_knl_cstates) {
1479 		if (get_msr(cpu, MSR_CORE_C3_RESIDENCY, &c->c3))
1480 			return -6;
1481 	}
1482 
1483 	if (DO_BIC(BIC_CPU_c6) && !do_knl_cstates) {
1484 		if (get_msr(cpu, MSR_CORE_C6_RESIDENCY, &c->c6))
1485 			return -7;
1486 	} else if (do_knl_cstates) {
1487 		if (get_msr(cpu, MSR_KNL_CORE_C6_RESIDENCY, &c->c6))
1488 			return -7;
1489 	}
1490 
1491 	if (DO_BIC(BIC_CPU_c7))
1492 		if (get_msr(cpu, MSR_CORE_C7_RESIDENCY, &c->c7))
1493 			return -8;
1494 
1495 	if (DO_BIC(BIC_Mod_c6))
1496 		if (get_msr(cpu, MSR_MODULE_C6_RES_MS, &c->mc6_us))
1497 			return -8;
1498 
1499 	if (DO_BIC(BIC_CoreTmp)) {
1500 		if (get_msr(cpu, MSR_IA32_THERM_STATUS, &msr))
1501 			return -9;
1502 		c->core_temp_c = tcc_activation_temp - ((msr >> 16) & 0x7F);
1503 	}
1504 
1505 	for (i = 0, mp = sys.cp; mp; i++, mp = mp->next) {
1506 		if (get_mp(cpu, mp, &c->counter[i]))
1507 			return -10;
1508 	}
1509 
1510 	/* collect package counters only for 1st core in package */
1511 	if (!(t->flags & CPU_IS_FIRST_CORE_IN_PACKAGE))
1512 		return 0;
1513 
1514 	if (do_skl_residency) {
1515 		if (get_msr(cpu, MSR_PKG_WEIGHTED_CORE_C0_RES, &p->pkg_wtd_core_c0))
1516 			return -10;
1517 		if (get_msr(cpu, MSR_PKG_ANY_CORE_C0_RES, &p->pkg_any_core_c0))
1518 			return -11;
1519 		if (get_msr(cpu, MSR_PKG_ANY_GFXE_C0_RES, &p->pkg_any_gfxe_c0))
1520 			return -12;
1521 		if (get_msr(cpu, MSR_PKG_BOTH_CORE_GFXE_C0_RES, &p->pkg_both_core_gfxe_c0))
1522 			return -13;
1523 	}
1524 	if (DO_BIC(BIC_Pkgpc3))
1525 		if (get_msr(cpu, MSR_PKG_C3_RESIDENCY, &p->pc3))
1526 			return -9;
1527 	if (DO_BIC(BIC_Pkgpc6)) {
1528 		if (do_slm_cstates) {
1529 			if (get_msr(cpu, MSR_ATOM_PKG_C6_RESIDENCY, &p->pc6))
1530 				return -10;
1531 		} else {
1532 			if (get_msr(cpu, MSR_PKG_C6_RESIDENCY, &p->pc6))
1533 				return -10;
1534 		}
1535 	}
1536 
1537 	if (DO_BIC(BIC_Pkgpc2))
1538 		if (get_msr(cpu, MSR_PKG_C2_RESIDENCY, &p->pc2))
1539 			return -11;
1540 	if (DO_BIC(BIC_Pkgpc7))
1541 		if (get_msr(cpu, MSR_PKG_C7_RESIDENCY, &p->pc7))
1542 			return -12;
1543 	if (DO_BIC(BIC_Pkgpc8))
1544 		if (get_msr(cpu, MSR_PKG_C8_RESIDENCY, &p->pc8))
1545 			return -13;
1546 	if (DO_BIC(BIC_Pkgpc9))
1547 		if (get_msr(cpu, MSR_PKG_C9_RESIDENCY, &p->pc9))
1548 			return -13;
1549 	if (DO_BIC(BIC_Pkgpc10))
1550 		if (get_msr(cpu, MSR_PKG_C10_RESIDENCY, &p->pc10))
1551 			return -13;
1552 
1553 	if (do_rapl & RAPL_PKG) {
1554 		if (get_msr(cpu, MSR_PKG_ENERGY_STATUS, &msr))
1555 			return -13;
1556 		p->energy_pkg = msr & 0xFFFFFFFF;
1557 	}
1558 	if (do_rapl & RAPL_CORES_ENERGY_STATUS) {
1559 		if (get_msr(cpu, MSR_PP0_ENERGY_STATUS, &msr))
1560 			return -14;
1561 		p->energy_cores = msr & 0xFFFFFFFF;
1562 	}
1563 	if (do_rapl & RAPL_DRAM) {
1564 		if (get_msr(cpu, MSR_DRAM_ENERGY_STATUS, &msr))
1565 			return -15;
1566 		p->energy_dram = msr & 0xFFFFFFFF;
1567 	}
1568 	if (do_rapl & RAPL_GFX) {
1569 		if (get_msr(cpu, MSR_PP1_ENERGY_STATUS, &msr))
1570 			return -16;
1571 		p->energy_gfx = msr & 0xFFFFFFFF;
1572 	}
1573 	if (do_rapl & RAPL_PKG_PERF_STATUS) {
1574 		if (get_msr(cpu, MSR_PKG_PERF_STATUS, &msr))
1575 			return -16;
1576 		p->rapl_pkg_perf_status = msr & 0xFFFFFFFF;
1577 	}
1578 	if (do_rapl & RAPL_DRAM_PERF_STATUS) {
1579 		if (get_msr(cpu, MSR_DRAM_PERF_STATUS, &msr))
1580 			return -16;
1581 		p->rapl_dram_perf_status = msr & 0xFFFFFFFF;
1582 	}
1583 	if (DO_BIC(BIC_PkgTmp)) {
1584 		if (get_msr(cpu, MSR_IA32_PACKAGE_THERM_STATUS, &msr))
1585 			return -17;
1586 		p->pkg_temp_c = tcc_activation_temp - ((msr >> 16) & 0x7F);
1587 	}
1588 
1589 	if (DO_BIC(BIC_GFX_rc6))
1590 		p->gfx_rc6_ms = gfx_cur_rc6_ms;
1591 
1592 	if (DO_BIC(BIC_GFXMHz))
1593 		p->gfx_mhz = gfx_cur_mhz;
1594 
1595 	for (i = 0, mp = sys.pp; mp; i++, mp = mp->next) {
1596 		if (get_mp(cpu, mp, &p->counter[i]))
1597 			return -10;
1598 	}
1599 
1600 	return 0;
1601 }
1602 
1603 /*
1604  * MSR_PKG_CST_CONFIG_CONTROL decoding for pkg_cstate_limit:
1605  * If you change the values, note they are used both in comparisons
1606  * (>= PCL__7) and to index pkg_cstate_limit_strings[].
1607  */
1608 
1609 #define PCLUKN 0 /* Unknown */
1610 #define PCLRSV 1 /* Reserved */
1611 #define PCL__0 2 /* PC0 */
1612 #define PCL__1 3 /* PC1 */
1613 #define PCL__2 4 /* PC2 */
1614 #define PCL__3 5 /* PC3 */
1615 #define PCL__4 6 /* PC4 */
1616 #define PCL__6 7 /* PC6 */
1617 #define PCL_6N 8 /* PC6 No Retention */
1618 #define PCL_6R 9 /* PC6 Retention */
1619 #define PCL__7 10 /* PC7 */
1620 #define PCL_7S 11 /* PC7 Shrink */
1621 #define PCL__8 12 /* PC8 */
1622 #define PCL__9 13 /* PC9 */
1623 #define PCLUNL 14 /* Unlimited */
1624 
1625 int pkg_cstate_limit = PCLUKN;
1626 char *pkg_cstate_limit_strings[] = { "reserved", "unknown", "pc0", "pc1", "pc2",
1627 	"pc3", "pc4", "pc6", "pc6n", "pc6r", "pc7", "pc7s", "pc8", "pc9", "unlimited"};
1628 
1629 int nhm_pkg_cstate_limits[16] = {PCL__0, PCL__1, PCL__3, PCL__6, PCL__7, PCLRSV, PCLRSV, PCLUNL, PCLRSV, PCLRSV, PCLRSV, PCLRSV, PCLRSV, PCLRSV, PCLRSV, PCLRSV};
1630 int snb_pkg_cstate_limits[16] = {PCL__0, PCL__2, PCL_6N, PCL_6R, PCL__7, PCL_7S, PCLRSV, PCLUNL, PCLRSV, PCLRSV, PCLRSV, PCLRSV, PCLRSV, PCLRSV, PCLRSV, PCLRSV};
1631 int hsw_pkg_cstate_limits[16] = {PCL__0, PCL__2, PCL__3, PCL__6, PCL__7, PCL_7S, PCL__8, PCL__9, PCLUNL, PCLRSV, PCLRSV, PCLRSV, PCLRSV, PCLRSV, PCLRSV, PCLRSV};
1632 int slv_pkg_cstate_limits[16] = {PCL__0, PCL__1, PCLRSV, PCLRSV, PCL__4, PCLRSV, PCL__6, PCL__7, PCLRSV, PCLRSV, PCLRSV, PCLRSV, PCLRSV, PCLRSV, PCL__6, PCL__7};
1633 int amt_pkg_cstate_limits[16] = {PCLUNL, PCL__1, PCL__2, PCLRSV, PCLRSV, PCLRSV, PCL__6, PCL__7, PCLRSV, PCLRSV, PCLRSV, PCLRSV, PCLRSV, PCLRSV, PCLRSV, PCLRSV};
1634 int phi_pkg_cstate_limits[16] = {PCL__0, PCL__2, PCL_6N, PCL_6R, PCLRSV, PCLRSV, PCLRSV, PCLUNL, PCLRSV, PCLRSV, PCLRSV, PCLRSV, PCLRSV, PCLRSV, PCLRSV, PCLRSV};
1635 int bxt_pkg_cstate_limits[16] = {PCL__0, PCL__2, PCLUNL, PCLRSV, PCLRSV, PCLRSV, PCLRSV, PCLRSV, PCLRSV, PCLRSV, PCLRSV, PCLRSV, PCLRSV, PCLRSV, PCLRSV, PCLRSV};
1636 int skx_pkg_cstate_limits[16] = {PCL__0, PCL__2, PCL_6N, PCL_6R, PCLRSV, PCLRSV, PCLRSV, PCLRSV, PCLUNL, PCLRSV, PCLRSV, PCLRSV, PCLRSV, PCLRSV, PCLRSV, PCLRSV};
1637 
1638 
1639 static void
1640 calculate_tsc_tweak()
1641 {
1642 	tsc_tweak = base_hz / tsc_hz;
1643 }
1644 
1645 static void
1646 dump_nhm_platform_info(void)
1647 {
1648 	unsigned long long msr;
1649 	unsigned int ratio;
1650 
1651 	get_msr(base_cpu, MSR_PLATFORM_INFO, &msr);
1652 
1653 	fprintf(outf, "cpu%d: MSR_PLATFORM_INFO: 0x%08llx\n", base_cpu, msr);
1654 
1655 	ratio = (msr >> 40) & 0xFF;
1656 	fprintf(outf, "%d * %.1f = %.1f MHz max efficiency frequency\n",
1657 		ratio, bclk, ratio * bclk);
1658 
1659 	ratio = (msr >> 8) & 0xFF;
1660 	fprintf(outf, "%d * %.1f = %.1f MHz base frequency\n",
1661 		ratio, bclk, ratio * bclk);
1662 
1663 	get_msr(base_cpu, MSR_IA32_POWER_CTL, &msr);
1664 	fprintf(outf, "cpu%d: MSR_IA32_POWER_CTL: 0x%08llx (C1E auto-promotion: %sabled)\n",
1665 		base_cpu, msr, msr & 0x2 ? "EN" : "DIS");
1666 
1667 	return;
1668 }
1669 
1670 static void
1671 dump_hsw_turbo_ratio_limits(void)
1672 {
1673 	unsigned long long msr;
1674 	unsigned int ratio;
1675 
1676 	get_msr(base_cpu, MSR_TURBO_RATIO_LIMIT2, &msr);
1677 
1678 	fprintf(outf, "cpu%d: MSR_TURBO_RATIO_LIMIT2: 0x%08llx\n", base_cpu, msr);
1679 
1680 	ratio = (msr >> 8) & 0xFF;
1681 	if (ratio)
1682 		fprintf(outf, "%d * %.1f = %.1f MHz max turbo 18 active cores\n",
1683 			ratio, bclk, ratio * bclk);
1684 
1685 	ratio = (msr >> 0) & 0xFF;
1686 	if (ratio)
1687 		fprintf(outf, "%d * %.1f = %.1f MHz max turbo 17 active cores\n",
1688 			ratio, bclk, ratio * bclk);
1689 	return;
1690 }
1691 
1692 static void
1693 dump_ivt_turbo_ratio_limits(void)
1694 {
1695 	unsigned long long msr;
1696 	unsigned int ratio;
1697 
1698 	get_msr(base_cpu, MSR_TURBO_RATIO_LIMIT1, &msr);
1699 
1700 	fprintf(outf, "cpu%d: MSR_TURBO_RATIO_LIMIT1: 0x%08llx\n", base_cpu, msr);
1701 
1702 	ratio = (msr >> 56) & 0xFF;
1703 	if (ratio)
1704 		fprintf(outf, "%d * %.1f = %.1f MHz max turbo 16 active cores\n",
1705 			ratio, bclk, ratio * bclk);
1706 
1707 	ratio = (msr >> 48) & 0xFF;
1708 	if (ratio)
1709 		fprintf(outf, "%d * %.1f = %.1f MHz max turbo 15 active cores\n",
1710 			ratio, bclk, ratio * bclk);
1711 
1712 	ratio = (msr >> 40) & 0xFF;
1713 	if (ratio)
1714 		fprintf(outf, "%d * %.1f = %.1f MHz max turbo 14 active cores\n",
1715 			ratio, bclk, ratio * bclk);
1716 
1717 	ratio = (msr >> 32) & 0xFF;
1718 	if (ratio)
1719 		fprintf(outf, "%d * %.1f = %.1f MHz max turbo 13 active cores\n",
1720 			ratio, bclk, ratio * bclk);
1721 
1722 	ratio = (msr >> 24) & 0xFF;
1723 	if (ratio)
1724 		fprintf(outf, "%d * %.1f = %.1f MHz max turbo 12 active cores\n",
1725 			ratio, bclk, ratio * bclk);
1726 
1727 	ratio = (msr >> 16) & 0xFF;
1728 	if (ratio)
1729 		fprintf(outf, "%d * %.1f = %.1f MHz max turbo 11 active cores\n",
1730 			ratio, bclk, ratio * bclk);
1731 
1732 	ratio = (msr >> 8) & 0xFF;
1733 	if (ratio)
1734 		fprintf(outf, "%d * %.1f = %.1f MHz max turbo 10 active cores\n",
1735 			ratio, bclk, ratio * bclk);
1736 
1737 	ratio = (msr >> 0) & 0xFF;
1738 	if (ratio)
1739 		fprintf(outf, "%d * %.1f = %.1f MHz max turbo 9 active cores\n",
1740 			ratio, bclk, ratio * bclk);
1741 	return;
1742 }
1743 int has_turbo_ratio_group_limits(int family, int model)
1744 {
1745 
1746 	if (!genuine_intel)
1747 		return 0;
1748 
1749 	switch (model) {
1750 	case INTEL_FAM6_ATOM_GOLDMONT:
1751 	case INTEL_FAM6_SKYLAKE_X:
1752 	case INTEL_FAM6_ATOM_DENVERTON:
1753 		return 1;
1754 	}
1755 	return 0;
1756 }
1757 
1758 static void
1759 dump_turbo_ratio_limits(int family, int model)
1760 {
1761 	unsigned long long msr, core_counts;
1762 	unsigned int ratio, group_size;
1763 
1764 	get_msr(base_cpu, MSR_TURBO_RATIO_LIMIT, &msr);
1765 	fprintf(outf, "cpu%d: MSR_TURBO_RATIO_LIMIT: 0x%08llx\n", base_cpu, msr);
1766 
1767 	if (has_turbo_ratio_group_limits(family, model)) {
1768 		get_msr(base_cpu, MSR_TURBO_RATIO_LIMIT1, &core_counts);
1769 		fprintf(outf, "cpu%d: MSR_TURBO_RATIO_LIMIT1: 0x%08llx\n", base_cpu, core_counts);
1770 	} else {
1771 		core_counts = 0x0807060504030201;
1772 	}
1773 
1774 	ratio = (msr >> 56) & 0xFF;
1775 	group_size = (core_counts >> 56) & 0xFF;
1776 	if (ratio)
1777 		fprintf(outf, "%d * %.1f = %.1f MHz max turbo %d active cores\n",
1778 			ratio, bclk, ratio * bclk, group_size);
1779 
1780 	ratio = (msr >> 48) & 0xFF;
1781 	group_size = (core_counts >> 48) & 0xFF;
1782 	if (ratio)
1783 		fprintf(outf, "%d * %.1f = %.1f MHz max turbo %d active cores\n",
1784 			ratio, bclk, ratio * bclk, group_size);
1785 
1786 	ratio = (msr >> 40) & 0xFF;
1787 	group_size = (core_counts >> 40) & 0xFF;
1788 	if (ratio)
1789 		fprintf(outf, "%d * %.1f = %.1f MHz max turbo %d active cores\n",
1790 			ratio, bclk, ratio * bclk, group_size);
1791 
1792 	ratio = (msr >> 32) & 0xFF;
1793 	group_size = (core_counts >> 32) & 0xFF;
1794 	if (ratio)
1795 		fprintf(outf, "%d * %.1f = %.1f MHz max turbo %d active cores\n",
1796 			ratio, bclk, ratio * bclk, group_size);
1797 
1798 	ratio = (msr >> 24) & 0xFF;
1799 	group_size = (core_counts >> 24) & 0xFF;
1800 	if (ratio)
1801 		fprintf(outf, "%d * %.1f = %.1f MHz max turbo %d active cores\n",
1802 			ratio, bclk, ratio * bclk, group_size);
1803 
1804 	ratio = (msr >> 16) & 0xFF;
1805 	group_size = (core_counts >> 16) & 0xFF;
1806 	if (ratio)
1807 		fprintf(outf, "%d * %.1f = %.1f MHz max turbo %d active cores\n",
1808 			ratio, bclk, ratio * bclk, group_size);
1809 
1810 	ratio = (msr >> 8) & 0xFF;
1811 	group_size = (core_counts >> 8) & 0xFF;
1812 	if (ratio)
1813 		fprintf(outf, "%d * %.1f = %.1f MHz max turbo %d active cores\n",
1814 			ratio, bclk, ratio * bclk, group_size);
1815 
1816 	ratio = (msr >> 0) & 0xFF;
1817 	group_size = (core_counts >> 0) & 0xFF;
1818 	if (ratio)
1819 		fprintf(outf, "%d * %.1f = %.1f MHz max turbo %d active cores\n",
1820 			ratio, bclk, ratio * bclk, group_size);
1821 	return;
1822 }
1823 
1824 static void
1825 dump_atom_turbo_ratio_limits(void)
1826 {
1827 	unsigned long long msr;
1828 	unsigned int ratio;
1829 
1830 	get_msr(base_cpu, MSR_ATOM_CORE_RATIOS, &msr);
1831 	fprintf(outf, "cpu%d: MSR_ATOM_CORE_RATIOS: 0x%08llx\n", base_cpu, msr & 0xFFFFFFFF);
1832 
1833 	ratio = (msr >> 0) & 0x3F;
1834 	if (ratio)
1835 		fprintf(outf, "%d * %.1f = %.1f MHz minimum operating frequency\n",
1836 			ratio, bclk, ratio * bclk);
1837 
1838 	ratio = (msr >> 8) & 0x3F;
1839 	if (ratio)
1840 		fprintf(outf, "%d * %.1f = %.1f MHz low frequency mode (LFM)\n",
1841 			ratio, bclk, ratio * bclk);
1842 
1843 	ratio = (msr >> 16) & 0x3F;
1844 	if (ratio)
1845 		fprintf(outf, "%d * %.1f = %.1f MHz base frequency\n",
1846 			ratio, bclk, ratio * bclk);
1847 
1848 	get_msr(base_cpu, MSR_ATOM_CORE_TURBO_RATIOS, &msr);
1849 	fprintf(outf, "cpu%d: MSR_ATOM_CORE_TURBO_RATIOS: 0x%08llx\n", base_cpu, msr & 0xFFFFFFFF);
1850 
1851 	ratio = (msr >> 24) & 0x3F;
1852 	if (ratio)
1853 		fprintf(outf, "%d * %.1f = %.1f MHz max turbo 4 active cores\n",
1854 			ratio, bclk, ratio * bclk);
1855 
1856 	ratio = (msr >> 16) & 0x3F;
1857 	if (ratio)
1858 		fprintf(outf, "%d * %.1f = %.1f MHz max turbo 3 active cores\n",
1859 			ratio, bclk, ratio * bclk);
1860 
1861 	ratio = (msr >> 8) & 0x3F;
1862 	if (ratio)
1863 		fprintf(outf, "%d * %.1f = %.1f MHz max turbo 2 active cores\n",
1864 			ratio, bclk, ratio * bclk);
1865 
1866 	ratio = (msr >> 0) & 0x3F;
1867 	if (ratio)
1868 		fprintf(outf, "%d * %.1f = %.1f MHz max turbo 1 active core\n",
1869 			ratio, bclk, ratio * bclk);
1870 }
1871 
1872 static void
1873 dump_knl_turbo_ratio_limits(void)
1874 {
1875 	const unsigned int buckets_no = 7;
1876 
1877 	unsigned long long msr;
1878 	int delta_cores, delta_ratio;
1879 	int i, b_nr;
1880 	unsigned int cores[buckets_no];
1881 	unsigned int ratio[buckets_no];
1882 
1883 	get_msr(base_cpu, MSR_TURBO_RATIO_LIMIT, &msr);
1884 
1885 	fprintf(outf, "cpu%d: MSR_TURBO_RATIO_LIMIT: 0x%08llx\n",
1886 		base_cpu, msr);
1887 
1888 	/**
1889 	 * Turbo encoding in KNL is as follows:
1890 	 * [0] -- Reserved
1891 	 * [7:1] -- Base value of number of active cores of bucket 1.
1892 	 * [15:8] -- Base value of freq ratio of bucket 1.
1893 	 * [20:16] -- +ve delta of number of active cores of bucket 2.
1894 	 * i.e. active cores of bucket 2 =
1895 	 * active cores of bucket 1 + delta
1896 	 * [23:21] -- Negative delta of freq ratio of bucket 2.
1897 	 * i.e. freq ratio of bucket 2 =
1898 	 * freq ratio of bucket 1 - delta
1899 	 * [28:24]-- +ve delta of number of active cores of bucket 3.
1900 	 * [31:29]-- -ve delta of freq ratio of bucket 3.
1901 	 * [36:32]-- +ve delta of number of active cores of bucket 4.
1902 	 * [39:37]-- -ve delta of freq ratio of bucket 4.
1903 	 * [44:40]-- +ve delta of number of active cores of bucket 5.
1904 	 * [47:45]-- -ve delta of freq ratio of bucket 5.
1905 	 * [52:48]-- +ve delta of number of active cores of bucket 6.
1906 	 * [55:53]-- -ve delta of freq ratio of bucket 6.
1907 	 * [60:56]-- +ve delta of number of active cores of bucket 7.
1908 	 * [63:61]-- -ve delta of freq ratio of bucket 7.
1909 	 */
1910 
1911 	b_nr = 0;
1912 	cores[b_nr] = (msr & 0xFF) >> 1;
1913 	ratio[b_nr] = (msr >> 8) & 0xFF;
1914 
1915 	for (i = 16; i < 64; i += 8) {
1916 		delta_cores = (msr >> i) & 0x1F;
1917 		delta_ratio = (msr >> (i + 5)) & 0x7;
1918 
1919 		cores[b_nr + 1] = cores[b_nr] + delta_cores;
1920 		ratio[b_nr + 1] = ratio[b_nr] - delta_ratio;
1921 		b_nr++;
1922 	}
1923 
1924 	for (i = buckets_no - 1; i >= 0; i--)
1925 		if (i > 0 ? ratio[i] != ratio[i - 1] : 1)
1926 			fprintf(outf,
1927 				"%d * %.1f = %.1f MHz max turbo %d active cores\n",
1928 				ratio[i], bclk, ratio[i] * bclk, cores[i]);
1929 }
1930 
1931 static void
1932 dump_nhm_cst_cfg(void)
1933 {
1934 	unsigned long long msr;
1935 
1936 	get_msr(base_cpu, MSR_PKG_CST_CONFIG_CONTROL, &msr);
1937 
1938 #define SNB_C1_AUTO_UNDEMOTE              (1UL << 27)
1939 #define SNB_C3_AUTO_UNDEMOTE              (1UL << 28)
1940 
1941 	fprintf(outf, "cpu%d: MSR_PKG_CST_CONFIG_CONTROL: 0x%08llx", base_cpu, msr);
1942 
1943 	fprintf(outf, " (%s%s%s%s%slocked: pkg-cstate-limit=%d: %s)\n",
1944 		(msr & SNB_C3_AUTO_UNDEMOTE) ? "UNdemote-C3, " : "",
1945 		(msr & SNB_C1_AUTO_UNDEMOTE) ? "UNdemote-C1, " : "",
1946 		(msr & NHM_C3_AUTO_DEMOTE) ? "demote-C3, " : "",
1947 		(msr & NHM_C1_AUTO_DEMOTE) ? "demote-C1, " : "",
1948 		(msr & (1 << 15)) ? "" : "UN",
1949 		(unsigned int)msr & 0xF,
1950 		pkg_cstate_limit_strings[pkg_cstate_limit]);
1951 	return;
1952 }
1953 
1954 static void
1955 dump_config_tdp(void)
1956 {
1957 	unsigned long long msr;
1958 
1959 	get_msr(base_cpu, MSR_CONFIG_TDP_NOMINAL, &msr);
1960 	fprintf(outf, "cpu%d: MSR_CONFIG_TDP_NOMINAL: 0x%08llx", base_cpu, msr);
1961 	fprintf(outf, " (base_ratio=%d)\n", (unsigned int)msr & 0xFF);
1962 
1963 	get_msr(base_cpu, MSR_CONFIG_TDP_LEVEL_1, &msr);
1964 	fprintf(outf, "cpu%d: MSR_CONFIG_TDP_LEVEL_1: 0x%08llx (", base_cpu, msr);
1965 	if (msr) {
1966 		fprintf(outf, "PKG_MIN_PWR_LVL1=%d ", (unsigned int)(msr >> 48) & 0x7FFF);
1967 		fprintf(outf, "PKG_MAX_PWR_LVL1=%d ", (unsigned int)(msr >> 32) & 0x7FFF);
1968 		fprintf(outf, "LVL1_RATIO=%d ", (unsigned int)(msr >> 16) & 0xFF);
1969 		fprintf(outf, "PKG_TDP_LVL1=%d", (unsigned int)(msr) & 0x7FFF);
1970 	}
1971 	fprintf(outf, ")\n");
1972 
1973 	get_msr(base_cpu, MSR_CONFIG_TDP_LEVEL_2, &msr);
1974 	fprintf(outf, "cpu%d: MSR_CONFIG_TDP_LEVEL_2: 0x%08llx (", base_cpu, msr);
1975 	if (msr) {
1976 		fprintf(outf, "PKG_MIN_PWR_LVL2=%d ", (unsigned int)(msr >> 48) & 0x7FFF);
1977 		fprintf(outf, "PKG_MAX_PWR_LVL2=%d ", (unsigned int)(msr >> 32) & 0x7FFF);
1978 		fprintf(outf, "LVL2_RATIO=%d ", (unsigned int)(msr >> 16) & 0xFF);
1979 		fprintf(outf, "PKG_TDP_LVL2=%d", (unsigned int)(msr) & 0x7FFF);
1980 	}
1981 	fprintf(outf, ")\n");
1982 
1983 	get_msr(base_cpu, MSR_CONFIG_TDP_CONTROL, &msr);
1984 	fprintf(outf, "cpu%d: MSR_CONFIG_TDP_CONTROL: 0x%08llx (", base_cpu, msr);
1985 	if ((msr) & 0x3)
1986 		fprintf(outf, "TDP_LEVEL=%d ", (unsigned int)(msr) & 0x3);
1987 	fprintf(outf, " lock=%d", (unsigned int)(msr >> 31) & 1);
1988 	fprintf(outf, ")\n");
1989 
1990 	get_msr(base_cpu, MSR_TURBO_ACTIVATION_RATIO, &msr);
1991 	fprintf(outf, "cpu%d: MSR_TURBO_ACTIVATION_RATIO: 0x%08llx (", base_cpu, msr);
1992 	fprintf(outf, "MAX_NON_TURBO_RATIO=%d", (unsigned int)(msr) & 0xFF);
1993 	fprintf(outf, " lock=%d", (unsigned int)(msr >> 31) & 1);
1994 	fprintf(outf, ")\n");
1995 }
1996 
1997 unsigned int irtl_time_units[] = {1, 32, 1024, 32768, 1048576, 33554432, 0, 0 };
1998 
1999 void print_irtl(void)
2000 {
2001 	unsigned long long msr;
2002 
2003 	get_msr(base_cpu, MSR_PKGC3_IRTL, &msr);
2004 	fprintf(outf, "cpu%d: MSR_PKGC3_IRTL: 0x%08llx (", base_cpu, msr);
2005 	fprintf(outf, "%svalid, %lld ns)\n", msr & (1 << 15) ? "" : "NOT",
2006 		(msr & 0x3FF) * irtl_time_units[(msr >> 10) & 0x3]);
2007 
2008 	get_msr(base_cpu, MSR_PKGC6_IRTL, &msr);
2009 	fprintf(outf, "cpu%d: MSR_PKGC6_IRTL: 0x%08llx (", base_cpu, msr);
2010 	fprintf(outf, "%svalid, %lld ns)\n", msr & (1 << 15) ? "" : "NOT",
2011 		(msr & 0x3FF) * irtl_time_units[(msr >> 10) & 0x3]);
2012 
2013 	get_msr(base_cpu, MSR_PKGC7_IRTL, &msr);
2014 	fprintf(outf, "cpu%d: MSR_PKGC7_IRTL: 0x%08llx (", base_cpu, msr);
2015 	fprintf(outf, "%svalid, %lld ns)\n", msr & (1 << 15) ? "" : "NOT",
2016 		(msr & 0x3FF) * irtl_time_units[(msr >> 10) & 0x3]);
2017 
2018 	if (!do_irtl_hsw)
2019 		return;
2020 
2021 	get_msr(base_cpu, MSR_PKGC8_IRTL, &msr);
2022 	fprintf(outf, "cpu%d: MSR_PKGC8_IRTL: 0x%08llx (", base_cpu, msr);
2023 	fprintf(outf, "%svalid, %lld ns)\n", msr & (1 << 15) ? "" : "NOT",
2024 		(msr & 0x3FF) * irtl_time_units[(msr >> 10) & 0x3]);
2025 
2026 	get_msr(base_cpu, MSR_PKGC9_IRTL, &msr);
2027 	fprintf(outf, "cpu%d: MSR_PKGC9_IRTL: 0x%08llx (", base_cpu, msr);
2028 	fprintf(outf, "%svalid, %lld ns)\n", msr & (1 << 15) ? "" : "NOT",
2029 		(msr & 0x3FF) * irtl_time_units[(msr >> 10) & 0x3]);
2030 
2031 	get_msr(base_cpu, MSR_PKGC10_IRTL, &msr);
2032 	fprintf(outf, "cpu%d: MSR_PKGC10_IRTL: 0x%08llx (", base_cpu, msr);
2033 	fprintf(outf, "%svalid, %lld ns)\n", msr & (1 << 15) ? "" : "NOT",
2034 		(msr & 0x3FF) * irtl_time_units[(msr >> 10) & 0x3]);
2035 
2036 }
2037 void free_fd_percpu(void)
2038 {
2039 	int i;
2040 
2041 	for (i = 0; i < topo.max_cpu_num + 1; ++i) {
2042 		if (fd_percpu[i] != 0)
2043 			close(fd_percpu[i]);
2044 	}
2045 
2046 	free(fd_percpu);
2047 }
2048 
2049 void free_all_buffers(void)
2050 {
2051 	CPU_FREE(cpu_present_set);
2052 	cpu_present_set = NULL;
2053 	cpu_present_setsize = 0;
2054 
2055 	CPU_FREE(cpu_affinity_set);
2056 	cpu_affinity_set = NULL;
2057 	cpu_affinity_setsize = 0;
2058 
2059 	free(thread_even);
2060 	free(core_even);
2061 	free(package_even);
2062 
2063 	thread_even = NULL;
2064 	core_even = NULL;
2065 	package_even = NULL;
2066 
2067 	free(thread_odd);
2068 	free(core_odd);
2069 	free(package_odd);
2070 
2071 	thread_odd = NULL;
2072 	core_odd = NULL;
2073 	package_odd = NULL;
2074 
2075 	free(output_buffer);
2076 	output_buffer = NULL;
2077 	outp = NULL;
2078 
2079 	free_fd_percpu();
2080 
2081 	free(irq_column_2_cpu);
2082 	free(irqs_per_cpu);
2083 }
2084 
2085 
2086 /*
2087  * Parse a file containing a single int.
2088  */
2089 int parse_int_file(const char *fmt, ...)
2090 {
2091 	va_list args;
2092 	char path[PATH_MAX];
2093 	FILE *filep;
2094 	int value;
2095 
2096 	va_start(args, fmt);
2097 	vsnprintf(path, sizeof(path), fmt, args);
2098 	va_end(args);
2099 	filep = fopen_or_die(path, "r");
2100 	if (fscanf(filep, "%d", &value) != 1)
2101 		err(1, "%s: failed to parse number from file", path);
2102 	fclose(filep);
2103 	return value;
2104 }
2105 
2106 /*
2107  * get_cpu_position_in_core(cpu)
2108  * return the position of the CPU among its HT siblings in the core
2109  * return -1 if the sibling is not in list
2110  */
2111 int get_cpu_position_in_core(int cpu)
2112 {
2113 	char path[64];
2114 	FILE *filep;
2115 	int this_cpu;
2116 	char character;
2117 	int i;
2118 
2119 	sprintf(path,
2120 		"/sys/devices/system/cpu/cpu%d/topology/thread_siblings_list",
2121 		cpu);
2122 	filep = fopen(path, "r");
2123 	if (filep == NULL) {
2124 		perror(path);
2125 		exit(1);
2126 	}
2127 
2128 	for (i = 0; i < topo.num_threads_per_core; i++) {
2129 		fscanf(filep, "%d", &this_cpu);
2130 		if (this_cpu == cpu) {
2131 			fclose(filep);
2132 			return i;
2133 		}
2134 
2135 		/* Account for no separator after last thread*/
2136 		if (i != (topo.num_threads_per_core - 1))
2137 			fscanf(filep, "%c", &character);
2138 	}
2139 
2140 	fclose(filep);
2141 	return -1;
2142 }
2143 
2144 /*
2145  * cpu_is_first_core_in_package(cpu)
2146  * return 1 if given CPU is 1st core in package
2147  */
2148 int cpu_is_first_core_in_package(int cpu)
2149 {
2150 	return cpu == parse_int_file("/sys/devices/system/cpu/cpu%d/topology/core_siblings_list", cpu);
2151 }
2152 
2153 int get_physical_package_id(int cpu)
2154 {
2155 	return parse_int_file("/sys/devices/system/cpu/cpu%d/topology/physical_package_id", cpu);
2156 }
2157 
2158 int get_core_id(int cpu)
2159 {
2160 	return parse_int_file("/sys/devices/system/cpu/cpu%d/topology/core_id", cpu);
2161 }
2162 
2163 int get_num_ht_siblings(int cpu)
2164 {
2165 	char path[80];
2166 	FILE *filep;
2167 	int sib1;
2168 	int matches = 0;
2169 	char character;
2170 	char str[100];
2171 	char *ch;
2172 
2173 	sprintf(path, "/sys/devices/system/cpu/cpu%d/topology/thread_siblings_list", cpu);
2174 	filep = fopen_or_die(path, "r");
2175 
2176 	/*
2177 	 * file format:
2178 	 * A ',' separated or '-' separated set of numbers
2179 	 * (eg 1-2 or 1,3,4,5)
2180 	 */
2181 	fscanf(filep, "%d%c\n", &sib1, &character);
2182 	fseek(filep, 0, SEEK_SET);
2183 	fgets(str, 100, filep);
2184 	ch = strchr(str, character);
2185 	while (ch != NULL) {
2186 		matches++;
2187 		ch = strchr(ch+1, character);
2188 	}
2189 
2190 	fclose(filep);
2191 	return matches+1;
2192 }
2193 
2194 /*
2195  * run func(thread, core, package) in topology order
2196  * skip non-present cpus
2197  */
2198 
2199 int for_all_cpus_2(int (func)(struct thread_data *, struct core_data *,
2200 	struct pkg_data *, struct thread_data *, struct core_data *,
2201 	struct pkg_data *), struct thread_data *thread_base,
2202 	struct core_data *core_base, struct pkg_data *pkg_base,
2203 	struct thread_data *thread_base2, struct core_data *core_base2,
2204 	struct pkg_data *pkg_base2)
2205 {
2206 	int retval, pkg_no, core_no, thread_no;
2207 
2208 	for (pkg_no = 0; pkg_no < topo.num_packages; ++pkg_no) {
2209 		for (core_no = 0; core_no < topo.num_cores_per_pkg; ++core_no) {
2210 			for (thread_no = 0; thread_no <
2211 				topo.num_threads_per_core; ++thread_no) {
2212 				struct thread_data *t, *t2;
2213 				struct core_data *c, *c2;
2214 				struct pkg_data *p, *p2;
2215 
2216 				t = GET_THREAD(thread_base, thread_no, core_no, pkg_no);
2217 
2218 				if (cpu_is_not_present(t->cpu_id))
2219 					continue;
2220 
2221 				t2 = GET_THREAD(thread_base2, thread_no, core_no, pkg_no);
2222 
2223 				c = GET_CORE(core_base, core_no, pkg_no);
2224 				c2 = GET_CORE(core_base2, core_no, pkg_no);
2225 
2226 				p = GET_PKG(pkg_base, pkg_no);
2227 				p2 = GET_PKG(pkg_base2, pkg_no);
2228 
2229 				retval = func(t, c, p, t2, c2, p2);
2230 				if (retval)
2231 					return retval;
2232 			}
2233 		}
2234 	}
2235 	return 0;
2236 }
2237 
2238 /*
2239  * run func(cpu) on every cpu in /proc/stat
2240  * return max_cpu number
2241  */
2242 int for_all_proc_cpus(int (func)(int))
2243 {
2244 	FILE *fp;
2245 	int cpu_num;
2246 	int retval;
2247 
2248 	fp = fopen_or_die(proc_stat, "r");
2249 
2250 	retval = fscanf(fp, "cpu %*d %*d %*d %*d %*d %*d %*d %*d %*d %*d\n");
2251 	if (retval != 0)
2252 		err(1, "%s: failed to parse format", proc_stat);
2253 
2254 	while (1) {
2255 		retval = fscanf(fp, "cpu%u %*d %*d %*d %*d %*d %*d %*d %*d %*d %*d\n", &cpu_num);
2256 		if (retval != 1)
2257 			break;
2258 
2259 		retval = func(cpu_num);
2260 		if (retval) {
2261 			fclose(fp);
2262 			return(retval);
2263 		}
2264 	}
2265 	fclose(fp);
2266 	return 0;
2267 }
2268 
2269 void re_initialize(void)
2270 {
2271 	free_all_buffers();
2272 	setup_all_buffers();
2273 	printf("turbostat: re-initialized with num_cpus %d\n", topo.num_cpus);
2274 }
2275 
2276 
2277 /*
2278  * count_cpus()
2279  * remember the last one seen, it will be the max
2280  */
2281 int count_cpus(int cpu)
2282 {
2283 	if (topo.max_cpu_num < cpu)
2284 		topo.max_cpu_num = cpu;
2285 
2286 	topo.num_cpus += 1;
2287 	return 0;
2288 }
2289 int mark_cpu_present(int cpu)
2290 {
2291 	CPU_SET_S(cpu, cpu_present_setsize, cpu_present_set);
2292 	return 0;
2293 }
2294 
2295 /*
2296  * snapshot_proc_interrupts()
2297  *
2298  * read and record summary of /proc/interrupts
2299  *
2300  * return 1 if config change requires a restart, else return 0
2301  */
2302 int snapshot_proc_interrupts(void)
2303 {
2304 	static FILE *fp;
2305 	int column, retval;
2306 
2307 	if (fp == NULL)
2308 		fp = fopen_or_die("/proc/interrupts", "r");
2309 	else
2310 		rewind(fp);
2311 
2312 	/* read 1st line of /proc/interrupts to get cpu* name for each column */
2313 	for (column = 0; column < topo.num_cpus; ++column) {
2314 		int cpu_number;
2315 
2316 		retval = fscanf(fp, " CPU%d", &cpu_number);
2317 		if (retval != 1)
2318 			break;
2319 
2320 		if (cpu_number > topo.max_cpu_num) {
2321 			warn("/proc/interrupts: cpu%d: > %d", cpu_number, topo.max_cpu_num);
2322 			return 1;
2323 		}
2324 
2325 		irq_column_2_cpu[column] = cpu_number;
2326 		irqs_per_cpu[cpu_number] = 0;
2327 	}
2328 
2329 	/* read /proc/interrupt count lines and sum up irqs per cpu */
2330 	while (1) {
2331 		int column;
2332 		char buf[64];
2333 
2334 		retval = fscanf(fp, " %s:", buf);	/* flush irq# "N:" */
2335 		if (retval != 1)
2336 			break;
2337 
2338 		/* read the count per cpu */
2339 		for (column = 0; column < topo.num_cpus; ++column) {
2340 
2341 			int cpu_number, irq_count;
2342 
2343 			retval = fscanf(fp, " %d", &irq_count);
2344 			if (retval != 1)
2345 				break;
2346 
2347 			cpu_number = irq_column_2_cpu[column];
2348 			irqs_per_cpu[cpu_number] += irq_count;
2349 
2350 		}
2351 
2352 		while (getc(fp) != '\n')
2353 			;	/* flush interrupt description */
2354 
2355 	}
2356 	return 0;
2357 }
2358 /*
2359  * snapshot_gfx_rc6_ms()
2360  *
2361  * record snapshot of
2362  * /sys/class/drm/card0/power/rc6_residency_ms
2363  *
2364  * return 1 if config change requires a restart, else return 0
2365  */
2366 int snapshot_gfx_rc6_ms(void)
2367 {
2368 	FILE *fp;
2369 	int retval;
2370 
2371 	fp = fopen_or_die("/sys/class/drm/card0/power/rc6_residency_ms", "r");
2372 
2373 	retval = fscanf(fp, "%lld", &gfx_cur_rc6_ms);
2374 	if (retval != 1)
2375 		err(1, "GFX rc6");
2376 
2377 	fclose(fp);
2378 
2379 	return 0;
2380 }
2381 /*
2382  * snapshot_gfx_mhz()
2383  *
2384  * record snapshot of
2385  * /sys/class/graphics/fb0/device/drm/card0/gt_cur_freq_mhz
2386  *
2387  * return 1 if config change requires a restart, else return 0
2388  */
2389 int snapshot_gfx_mhz(void)
2390 {
2391 	static FILE *fp;
2392 	int retval;
2393 
2394 	if (fp == NULL)
2395 		fp = fopen_or_die("/sys/class/graphics/fb0/device/drm/card0/gt_cur_freq_mhz", "r");
2396 	else
2397 		rewind(fp);
2398 
2399 	retval = fscanf(fp, "%d", &gfx_cur_mhz);
2400 	if (retval != 1)
2401 		err(1, "GFX MHz");
2402 
2403 	return 0;
2404 }
2405 
2406 /*
2407  * snapshot /proc and /sys files
2408  *
2409  * return 1 if configuration restart needed, else return 0
2410  */
2411 int snapshot_proc_sysfs_files(void)
2412 {
2413 	if (snapshot_proc_interrupts())
2414 		return 1;
2415 
2416 	if (DO_BIC(BIC_GFX_rc6))
2417 		snapshot_gfx_rc6_ms();
2418 
2419 	if (DO_BIC(BIC_GFXMHz))
2420 		snapshot_gfx_mhz();
2421 
2422 	return 0;
2423 }
2424 
2425 void turbostat_loop()
2426 {
2427 	int retval;
2428 	int restarted = 0;
2429 
2430 restart:
2431 	restarted++;
2432 
2433 	snapshot_proc_sysfs_files();
2434 	retval = for_all_cpus(get_counters, EVEN_COUNTERS);
2435 	if (retval < -1) {
2436 		exit(retval);
2437 	} else if (retval == -1) {
2438 		if (restarted > 1) {
2439 			exit(retval);
2440 		}
2441 		re_initialize();
2442 		goto restart;
2443 	}
2444 	restarted = 0;
2445 	gettimeofday(&tv_even, (struct timezone *)NULL);
2446 
2447 	while (1) {
2448 		if (for_all_proc_cpus(cpu_is_not_present)) {
2449 			re_initialize();
2450 			goto restart;
2451 		}
2452 		nanosleep(&interval_ts, NULL);
2453 		if (snapshot_proc_sysfs_files())
2454 			goto restart;
2455 		retval = for_all_cpus(get_counters, ODD_COUNTERS);
2456 		if (retval < -1) {
2457 			exit(retval);
2458 		} else if (retval == -1) {
2459 			re_initialize();
2460 			goto restart;
2461 		}
2462 		gettimeofday(&tv_odd, (struct timezone *)NULL);
2463 		timersub(&tv_odd, &tv_even, &tv_delta);
2464 		if (for_all_cpus_2(delta_cpu, ODD_COUNTERS, EVEN_COUNTERS)) {
2465 			re_initialize();
2466 			goto restart;
2467 		}
2468 		compute_average(EVEN_COUNTERS);
2469 		format_all_counters(EVEN_COUNTERS);
2470 		flush_output_stdout();
2471 		nanosleep(&interval_ts, NULL);
2472 		if (snapshot_proc_sysfs_files())
2473 			goto restart;
2474 		retval = for_all_cpus(get_counters, EVEN_COUNTERS);
2475 		if (retval < -1) {
2476 			exit(retval);
2477 		} else if (retval == -1) {
2478 			re_initialize();
2479 			goto restart;
2480 		}
2481 		gettimeofday(&tv_even, (struct timezone *)NULL);
2482 		timersub(&tv_even, &tv_odd, &tv_delta);
2483 		if (for_all_cpus_2(delta_cpu, EVEN_COUNTERS, ODD_COUNTERS)) {
2484 			re_initialize();
2485 			goto restart;
2486 		}
2487 		compute_average(ODD_COUNTERS);
2488 		format_all_counters(ODD_COUNTERS);
2489 		flush_output_stdout();
2490 	}
2491 }
2492 
2493 void check_dev_msr()
2494 {
2495 	struct stat sb;
2496 	char pathname[32];
2497 
2498 	sprintf(pathname, "/dev/cpu/%d/msr", base_cpu);
2499 	if (stat(pathname, &sb))
2500  		if (system("/sbin/modprobe msr > /dev/null 2>&1"))
2501 			err(-5, "no /dev/cpu/0/msr, Try \"# modprobe msr\" ");
2502 }
2503 
2504 void check_permissions()
2505 {
2506 	struct __user_cap_header_struct cap_header_data;
2507 	cap_user_header_t cap_header = &cap_header_data;
2508 	struct __user_cap_data_struct cap_data_data;
2509 	cap_user_data_t cap_data = &cap_data_data;
2510 	extern int capget(cap_user_header_t hdrp, cap_user_data_t datap);
2511 	int do_exit = 0;
2512 	char pathname[32];
2513 
2514 	/* check for CAP_SYS_RAWIO */
2515 	cap_header->pid = getpid();
2516 	cap_header->version = _LINUX_CAPABILITY_VERSION;
2517 	if (capget(cap_header, cap_data) < 0)
2518 		err(-6, "capget(2) failed");
2519 
2520 	if ((cap_data->effective & (1 << CAP_SYS_RAWIO)) == 0) {
2521 		do_exit++;
2522 		warnx("capget(CAP_SYS_RAWIO) failed,"
2523 			" try \"# setcap cap_sys_rawio=ep %s\"", progname);
2524 	}
2525 
2526 	/* test file permissions */
2527 	sprintf(pathname, "/dev/cpu/%d/msr", base_cpu);
2528 	if (euidaccess(pathname, R_OK)) {
2529 		do_exit++;
2530 		warn("/dev/cpu/0/msr open failed, try chown or chmod +r /dev/cpu/*/msr");
2531 	}
2532 
2533 	/* if all else fails, thell them to be root */
2534 	if (do_exit)
2535 		if (getuid() != 0)
2536 			warnx("... or simply run as root");
2537 
2538 	if (do_exit)
2539 		exit(-6);
2540 }
2541 
2542 /*
2543  * NHM adds support for additional MSRs:
2544  *
2545  * MSR_SMI_COUNT                   0x00000034
2546  *
2547  * MSR_PLATFORM_INFO               0x000000ce
2548  * MSR_PKG_CST_CONFIG_CONTROL     0x000000e2
2549  *
2550  * MSR_MISC_PWR_MGMT               0x000001aa
2551  *
2552  * MSR_PKG_C3_RESIDENCY            0x000003f8
2553  * MSR_PKG_C6_RESIDENCY            0x000003f9
2554  * MSR_CORE_C3_RESIDENCY           0x000003fc
2555  * MSR_CORE_C6_RESIDENCY           0x000003fd
2556  *
2557  * Side effect:
2558  * sets global pkg_cstate_limit to decode MSR_PKG_CST_CONFIG_CONTROL
2559  * sets has_misc_feature_control
2560  */
2561 int probe_nhm_msrs(unsigned int family, unsigned int model)
2562 {
2563 	unsigned long long msr;
2564 	unsigned int base_ratio;
2565 	int *pkg_cstate_limits;
2566 
2567 	if (!genuine_intel)
2568 		return 0;
2569 
2570 	if (family != 6)
2571 		return 0;
2572 
2573 	bclk = discover_bclk(family, model);
2574 
2575 	switch (model) {
2576 	case INTEL_FAM6_NEHALEM_EP:	/* Core i7, Xeon 5500 series - Bloomfield, Gainstown NHM-EP */
2577 	case INTEL_FAM6_NEHALEM:	/* Core i7 and i5 Processor - Clarksfield, Lynnfield, Jasper Forest */
2578 	case 0x1F:	/* Core i7 and i5 Processor - Nehalem */
2579 	case INTEL_FAM6_WESTMERE:	/* Westmere Client - Clarkdale, Arrandale */
2580 	case INTEL_FAM6_WESTMERE_EP:	/* Westmere EP - Gulftown */
2581 	case INTEL_FAM6_NEHALEM_EX:	/* Nehalem-EX Xeon - Beckton */
2582 	case INTEL_FAM6_WESTMERE_EX:	/* Westmere-EX Xeon - Eagleton */
2583 		pkg_cstate_limits = nhm_pkg_cstate_limits;
2584 		break;
2585 	case INTEL_FAM6_SANDYBRIDGE:	/* SNB */
2586 	case INTEL_FAM6_SANDYBRIDGE_X:	/* SNB Xeon */
2587 	case INTEL_FAM6_IVYBRIDGE:	/* IVB */
2588 	case INTEL_FAM6_IVYBRIDGE_X:	/* IVB Xeon */
2589 		pkg_cstate_limits = snb_pkg_cstate_limits;
2590 		has_misc_feature_control = 1;
2591 		break;
2592 	case INTEL_FAM6_HASWELL_CORE:	/* HSW */
2593 	case INTEL_FAM6_HASWELL_X:	/* HSX */
2594 	case INTEL_FAM6_HASWELL_ULT:	/* HSW */
2595 	case INTEL_FAM6_HASWELL_GT3E:	/* HSW */
2596 	case INTEL_FAM6_BROADWELL_CORE:	/* BDW */
2597 	case INTEL_FAM6_BROADWELL_GT3E:	/* BDW */
2598 	case INTEL_FAM6_BROADWELL_X:	/* BDX */
2599 	case INTEL_FAM6_BROADWELL_XEON_D:	/* BDX-DE */
2600 	case INTEL_FAM6_SKYLAKE_MOBILE:	/* SKL */
2601 	case INTEL_FAM6_SKYLAKE_DESKTOP:	/* SKL */
2602 	case INTEL_FAM6_KABYLAKE_MOBILE:	/* KBL */
2603 	case INTEL_FAM6_KABYLAKE_DESKTOP:	/* KBL */
2604 		pkg_cstate_limits = hsw_pkg_cstate_limits;
2605 		has_misc_feature_control = 1;
2606 		break;
2607 	case INTEL_FAM6_SKYLAKE_X:	/* SKX */
2608 		pkg_cstate_limits = skx_pkg_cstate_limits;
2609 		has_misc_feature_control = 1;
2610 		break;
2611 	case INTEL_FAM6_ATOM_SILVERMONT1:	/* BYT */
2612 		no_MSR_MISC_PWR_MGMT = 1;
2613 	case INTEL_FAM6_ATOM_SILVERMONT2:	/* AVN */
2614 		pkg_cstate_limits = slv_pkg_cstate_limits;
2615 		break;
2616 	case INTEL_FAM6_ATOM_AIRMONT:	/* AMT */
2617 		pkg_cstate_limits = amt_pkg_cstate_limits;
2618 		no_MSR_MISC_PWR_MGMT = 1;
2619 		break;
2620 	case INTEL_FAM6_XEON_PHI_KNL:	/* PHI */
2621 	case INTEL_FAM6_XEON_PHI_KNM:
2622 		pkg_cstate_limits = phi_pkg_cstate_limits;
2623 		break;
2624 	case INTEL_FAM6_ATOM_GOLDMONT:	/* BXT */
2625 	case INTEL_FAM6_ATOM_GEMINI_LAKE:
2626 	case INTEL_FAM6_ATOM_DENVERTON:	/* DNV */
2627 		pkg_cstate_limits = bxt_pkg_cstate_limits;
2628 		break;
2629 	default:
2630 		return 0;
2631 	}
2632 	get_msr(base_cpu, MSR_PKG_CST_CONFIG_CONTROL, &msr);
2633 	pkg_cstate_limit = pkg_cstate_limits[msr & 0xF];
2634 
2635 	get_msr(base_cpu, MSR_PLATFORM_INFO, &msr);
2636 	base_ratio = (msr >> 8) & 0xFF;
2637 
2638 	base_hz = base_ratio * bclk * 1000000;
2639 	has_base_hz = 1;
2640 	return 1;
2641 }
2642 /*
2643  * SLV client has support for unique MSRs:
2644  *
2645  * MSR_CC6_DEMOTION_POLICY_CONFIG
2646  * MSR_MC6_DEMOTION_POLICY_CONFIG
2647  */
2648 
2649 int has_slv_msrs(unsigned int family, unsigned int model)
2650 {
2651 	if (!genuine_intel)
2652 		return 0;
2653 
2654 	switch (model) {
2655 	case INTEL_FAM6_ATOM_SILVERMONT1:
2656 	case INTEL_FAM6_ATOM_MERRIFIELD:
2657 	case INTEL_FAM6_ATOM_MOOREFIELD:
2658 		return 1;
2659 	}
2660 	return 0;
2661 }
2662 int is_dnv(unsigned int family, unsigned int model)
2663 {
2664 
2665 	if (!genuine_intel)
2666 		return 0;
2667 
2668 	switch (model) {
2669 	case INTEL_FAM6_ATOM_DENVERTON:
2670 		return 1;
2671 	}
2672 	return 0;
2673 }
2674 int is_bdx(unsigned int family, unsigned int model)
2675 {
2676 
2677 	if (!genuine_intel)
2678 		return 0;
2679 
2680 	switch (model) {
2681 	case INTEL_FAM6_BROADWELL_X:
2682 	case INTEL_FAM6_BROADWELL_XEON_D:
2683 		return 1;
2684 	}
2685 	return 0;
2686 }
2687 int is_skx(unsigned int family, unsigned int model)
2688 {
2689 
2690 	if (!genuine_intel)
2691 		return 0;
2692 
2693 	switch (model) {
2694 	case INTEL_FAM6_SKYLAKE_X:
2695 		return 1;
2696 	}
2697 	return 0;
2698 }
2699 
2700 int has_turbo_ratio_limit(unsigned int family, unsigned int model)
2701 {
2702 	if (has_slv_msrs(family, model))
2703 		return 0;
2704 
2705 	switch (model) {
2706 	/* Nehalem compatible, but do not include turbo-ratio limit support */
2707 	case INTEL_FAM6_NEHALEM_EX:	/* Nehalem-EX Xeon - Beckton */
2708 	case INTEL_FAM6_WESTMERE_EX:	/* Westmere-EX Xeon - Eagleton */
2709 	case INTEL_FAM6_XEON_PHI_KNL:	/* PHI - Knights Landing (different MSR definition) */
2710 	case INTEL_FAM6_XEON_PHI_KNM:
2711 		return 0;
2712 	default:
2713 		return 1;
2714 	}
2715 }
2716 int has_atom_turbo_ratio_limit(unsigned int family, unsigned int model)
2717 {
2718 	if (has_slv_msrs(family, model))
2719 		return 1;
2720 
2721 	return 0;
2722 }
2723 int has_ivt_turbo_ratio_limit(unsigned int family, unsigned int model)
2724 {
2725 	if (!genuine_intel)
2726 		return 0;
2727 
2728 	if (family != 6)
2729 		return 0;
2730 
2731 	switch (model) {
2732 	case INTEL_FAM6_IVYBRIDGE_X:	/* IVB Xeon */
2733 	case INTEL_FAM6_HASWELL_X:	/* HSW Xeon */
2734 		return 1;
2735 	default:
2736 		return 0;
2737 	}
2738 }
2739 int has_hsw_turbo_ratio_limit(unsigned int family, unsigned int model)
2740 {
2741 	if (!genuine_intel)
2742 		return 0;
2743 
2744 	if (family != 6)
2745 		return 0;
2746 
2747 	switch (model) {
2748 	case INTEL_FAM6_HASWELL_X:	/* HSW Xeon */
2749 		return 1;
2750 	default:
2751 		return 0;
2752 	}
2753 }
2754 
2755 int has_knl_turbo_ratio_limit(unsigned int family, unsigned int model)
2756 {
2757 	if (!genuine_intel)
2758 		return 0;
2759 
2760 	if (family != 6)
2761 		return 0;
2762 
2763 	switch (model) {
2764 	case INTEL_FAM6_XEON_PHI_KNL:	/* Knights Landing */
2765 	case INTEL_FAM6_XEON_PHI_KNM:
2766 		return 1;
2767 	default:
2768 		return 0;
2769 	}
2770 }
2771 int has_glm_turbo_ratio_limit(unsigned int family, unsigned int model)
2772 {
2773 	if (!genuine_intel)
2774 		return 0;
2775 
2776 	if (family != 6)
2777 		return 0;
2778 
2779 	switch (model) {
2780 	case INTEL_FAM6_ATOM_GOLDMONT:
2781 	case INTEL_FAM6_SKYLAKE_X:
2782 		return 1;
2783 	default:
2784 		return 0;
2785 	}
2786 }
2787 int has_config_tdp(unsigned int family, unsigned int model)
2788 {
2789 	if (!genuine_intel)
2790 		return 0;
2791 
2792 	if (family != 6)
2793 		return 0;
2794 
2795 	switch (model) {
2796 	case INTEL_FAM6_IVYBRIDGE:	/* IVB */
2797 	case INTEL_FAM6_HASWELL_CORE:	/* HSW */
2798 	case INTEL_FAM6_HASWELL_X:	/* HSX */
2799 	case INTEL_FAM6_HASWELL_ULT:	/* HSW */
2800 	case INTEL_FAM6_HASWELL_GT3E:	/* HSW */
2801 	case INTEL_FAM6_BROADWELL_CORE:	/* BDW */
2802 	case INTEL_FAM6_BROADWELL_GT3E:	/* BDW */
2803 	case INTEL_FAM6_BROADWELL_X:	/* BDX */
2804 	case INTEL_FAM6_BROADWELL_XEON_D:	/* BDX-DE */
2805 	case INTEL_FAM6_SKYLAKE_MOBILE:	/* SKL */
2806 	case INTEL_FAM6_SKYLAKE_DESKTOP:	/* SKL */
2807 	case INTEL_FAM6_KABYLAKE_MOBILE:	/* KBL */
2808 	case INTEL_FAM6_KABYLAKE_DESKTOP:	/* KBL */
2809 	case INTEL_FAM6_SKYLAKE_X:	/* SKX */
2810 
2811 	case INTEL_FAM6_XEON_PHI_KNL:	/* Knights Landing */
2812 	case INTEL_FAM6_XEON_PHI_KNM:
2813 		return 1;
2814 	default:
2815 		return 0;
2816 	}
2817 }
2818 
2819 static void
2820 dump_cstate_pstate_config_info(unsigned int family, unsigned int model)
2821 {
2822 	if (!do_nhm_platform_info)
2823 		return;
2824 
2825 	dump_nhm_platform_info();
2826 
2827 	if (has_hsw_turbo_ratio_limit(family, model))
2828 		dump_hsw_turbo_ratio_limits();
2829 
2830 	if (has_ivt_turbo_ratio_limit(family, model))
2831 		dump_ivt_turbo_ratio_limits();
2832 
2833 	if (has_turbo_ratio_limit(family, model))
2834 		dump_turbo_ratio_limits(family, model);
2835 
2836 	if (has_atom_turbo_ratio_limit(family, model))
2837 		dump_atom_turbo_ratio_limits();
2838 
2839 	if (has_knl_turbo_ratio_limit(family, model))
2840 		dump_knl_turbo_ratio_limits();
2841 
2842 	if (has_config_tdp(family, model))
2843 		dump_config_tdp();
2844 
2845 	dump_nhm_cst_cfg();
2846 }
2847 
2848 static void
2849 dump_sysfs_cstate_config(void)
2850 {
2851 	char path[64];
2852 	char name_buf[16];
2853 	char desc[64];
2854 	FILE *input;
2855 	int state;
2856 	char *sp;
2857 
2858 	if (!DO_BIC(BIC_sysfs))
2859 		return;
2860 
2861 	for (state = 0; state < 10; ++state) {
2862 
2863 		sprintf(path, "/sys/devices/system/cpu/cpu%d/cpuidle/state%d/name",
2864 			base_cpu, state);
2865 		input = fopen(path, "r");
2866 		if (input == NULL)
2867 			continue;
2868 		fgets(name_buf, sizeof(name_buf), input);
2869 
2870 		 /* truncate "C1-HSW\n" to "C1", or truncate "C1\n" to "C1" */
2871 		sp = strchr(name_buf, '-');
2872 		if (!sp)
2873 			sp = strchrnul(name_buf, '\n');
2874 		*sp = '\0';
2875 
2876 		fclose(input);
2877 
2878 		sprintf(path, "/sys/devices/system/cpu/cpu%d/cpuidle/state%d/desc",
2879 			base_cpu, state);
2880 		input = fopen(path, "r");
2881 		if (input == NULL)
2882 			continue;
2883 		fgets(desc, sizeof(desc), input);
2884 
2885 		fprintf(outf, "cpu%d: %s: %s", base_cpu, name_buf, desc);
2886 		fclose(input);
2887 	}
2888 }
2889 
2890 
2891 /*
2892  * print_epb()
2893  * Decode the ENERGY_PERF_BIAS MSR
2894  */
2895 int print_epb(struct thread_data *t, struct core_data *c, struct pkg_data *p)
2896 {
2897 	unsigned long long msr;
2898 	char *epb_string;
2899 	int cpu;
2900 
2901 	if (!has_epb)
2902 		return 0;
2903 
2904 	cpu = t->cpu_id;
2905 
2906 	/* EPB is per-package */
2907 	if (!(t->flags & CPU_IS_FIRST_THREAD_IN_CORE) || !(t->flags & CPU_IS_FIRST_CORE_IN_PACKAGE))
2908 		return 0;
2909 
2910 	if (cpu_migrate(cpu)) {
2911 		fprintf(outf, "Could not migrate to CPU %d\n", cpu);
2912 		return -1;
2913 	}
2914 
2915 	if (get_msr(cpu, MSR_IA32_ENERGY_PERF_BIAS, &msr))
2916 		return 0;
2917 
2918 	switch (msr & 0xF) {
2919 	case ENERGY_PERF_BIAS_PERFORMANCE:
2920 		epb_string = "performance";
2921 		break;
2922 	case ENERGY_PERF_BIAS_NORMAL:
2923 		epb_string = "balanced";
2924 		break;
2925 	case ENERGY_PERF_BIAS_POWERSAVE:
2926 		epb_string = "powersave";
2927 		break;
2928 	default:
2929 		epb_string = "custom";
2930 		break;
2931 	}
2932 	fprintf(outf, "cpu%d: MSR_IA32_ENERGY_PERF_BIAS: 0x%08llx (%s)\n", cpu, msr, epb_string);
2933 
2934 	return 0;
2935 }
2936 /*
2937  * print_hwp()
2938  * Decode the MSR_HWP_CAPABILITIES
2939  */
2940 int print_hwp(struct thread_data *t, struct core_data *c, struct pkg_data *p)
2941 {
2942 	unsigned long long msr;
2943 	int cpu;
2944 
2945 	if (!has_hwp)
2946 		return 0;
2947 
2948 	cpu = t->cpu_id;
2949 
2950 	/* MSR_HWP_CAPABILITIES is per-package */
2951 	if (!(t->flags & CPU_IS_FIRST_THREAD_IN_CORE) || !(t->flags & CPU_IS_FIRST_CORE_IN_PACKAGE))
2952 		return 0;
2953 
2954 	if (cpu_migrate(cpu)) {
2955 		fprintf(outf, "Could not migrate to CPU %d\n", cpu);
2956 		return -1;
2957 	}
2958 
2959 	if (get_msr(cpu, MSR_PM_ENABLE, &msr))
2960 		return 0;
2961 
2962 	fprintf(outf, "cpu%d: MSR_PM_ENABLE: 0x%08llx (%sHWP)\n",
2963 		cpu, msr, (msr & (1 << 0)) ? "" : "No-");
2964 
2965 	/* MSR_PM_ENABLE[1] == 1 if HWP is enabled and MSRs visible */
2966 	if ((msr & (1 << 0)) == 0)
2967 		return 0;
2968 
2969 	if (get_msr(cpu, MSR_HWP_CAPABILITIES, &msr))
2970 		return 0;
2971 
2972 	fprintf(outf, "cpu%d: MSR_HWP_CAPABILITIES: 0x%08llx "
2973 			"(high 0x%x guar 0x%x eff 0x%x low 0x%x)\n",
2974 			cpu, msr,
2975 			(unsigned int)HWP_HIGHEST_PERF(msr),
2976 			(unsigned int)HWP_GUARANTEED_PERF(msr),
2977 			(unsigned int)HWP_MOSTEFFICIENT_PERF(msr),
2978 			(unsigned int)HWP_LOWEST_PERF(msr));
2979 
2980 	if (get_msr(cpu, MSR_HWP_REQUEST, &msr))
2981 		return 0;
2982 
2983 	fprintf(outf, "cpu%d: MSR_HWP_REQUEST: 0x%08llx "
2984 			"(min 0x%x max 0x%x des 0x%x epp 0x%x window 0x%x pkg 0x%x)\n",
2985 			cpu, msr,
2986 			(unsigned int)(((msr) >> 0) & 0xff),
2987 			(unsigned int)(((msr) >> 8) & 0xff),
2988 			(unsigned int)(((msr) >> 16) & 0xff),
2989 			(unsigned int)(((msr) >> 24) & 0xff),
2990 			(unsigned int)(((msr) >> 32) & 0xff3),
2991 			(unsigned int)(((msr) >> 42) & 0x1));
2992 
2993 	if (has_hwp_pkg) {
2994 		if (get_msr(cpu, MSR_HWP_REQUEST_PKG, &msr))
2995 			return 0;
2996 
2997 		fprintf(outf, "cpu%d: MSR_HWP_REQUEST_PKG: 0x%08llx "
2998 			"(min 0x%x max 0x%x des 0x%x epp 0x%x window 0x%x)\n",
2999 			cpu, msr,
3000 			(unsigned int)(((msr) >> 0) & 0xff),
3001 			(unsigned int)(((msr) >> 8) & 0xff),
3002 			(unsigned int)(((msr) >> 16) & 0xff),
3003 			(unsigned int)(((msr) >> 24) & 0xff),
3004 			(unsigned int)(((msr) >> 32) & 0xff3));
3005 	}
3006 	if (has_hwp_notify) {
3007 		if (get_msr(cpu, MSR_HWP_INTERRUPT, &msr))
3008 			return 0;
3009 
3010 		fprintf(outf, "cpu%d: MSR_HWP_INTERRUPT: 0x%08llx "
3011 			"(%s_Guaranteed_Perf_Change, %s_Excursion_Min)\n",
3012 			cpu, msr,
3013 			((msr) & 0x1) ? "EN" : "Dis",
3014 			((msr) & 0x2) ? "EN" : "Dis");
3015 	}
3016 	if (get_msr(cpu, MSR_HWP_STATUS, &msr))
3017 		return 0;
3018 
3019 	fprintf(outf, "cpu%d: MSR_HWP_STATUS: 0x%08llx "
3020 			"(%sGuaranteed_Perf_Change, %sExcursion_Min)\n",
3021 			cpu, msr,
3022 			((msr) & 0x1) ? "" : "No-",
3023 			((msr) & 0x2) ? "" : "No-");
3024 
3025 	return 0;
3026 }
3027 
3028 /*
3029  * print_perf_limit()
3030  */
3031 int print_perf_limit(struct thread_data *t, struct core_data *c, struct pkg_data *p)
3032 {
3033 	unsigned long long msr;
3034 	int cpu;
3035 
3036 	cpu = t->cpu_id;
3037 
3038 	/* per-package */
3039 	if (!(t->flags & CPU_IS_FIRST_THREAD_IN_CORE) || !(t->flags & CPU_IS_FIRST_CORE_IN_PACKAGE))
3040 		return 0;
3041 
3042 	if (cpu_migrate(cpu)) {
3043 		fprintf(outf, "Could not migrate to CPU %d\n", cpu);
3044 		return -1;
3045 	}
3046 
3047 	if (do_core_perf_limit_reasons) {
3048 		get_msr(cpu, MSR_CORE_PERF_LIMIT_REASONS, &msr);
3049 		fprintf(outf, "cpu%d: MSR_CORE_PERF_LIMIT_REASONS, 0x%08llx", cpu, msr);
3050 		fprintf(outf, " (Active: %s%s%s%s%s%s%s%s%s%s%s%s%s%s)",
3051 			(msr & 1 << 15) ? "bit15, " : "",
3052 			(msr & 1 << 14) ? "bit14, " : "",
3053 			(msr & 1 << 13) ? "Transitions, " : "",
3054 			(msr & 1 << 12) ? "MultiCoreTurbo, " : "",
3055 			(msr & 1 << 11) ? "PkgPwrL2, " : "",
3056 			(msr & 1 << 10) ? "PkgPwrL1, " : "",
3057 			(msr & 1 << 9) ? "CorePwr, " : "",
3058 			(msr & 1 << 8) ? "Amps, " : "",
3059 			(msr & 1 << 6) ? "VR-Therm, " : "",
3060 			(msr & 1 << 5) ? "Auto-HWP, " : "",
3061 			(msr & 1 << 4) ? "Graphics, " : "",
3062 			(msr & 1 << 2) ? "bit2, " : "",
3063 			(msr & 1 << 1) ? "ThermStatus, " : "",
3064 			(msr & 1 << 0) ? "PROCHOT, " : "");
3065 		fprintf(outf, " (Logged: %s%s%s%s%s%s%s%s%s%s%s%s%s%s)\n",
3066 			(msr & 1 << 31) ? "bit31, " : "",
3067 			(msr & 1 << 30) ? "bit30, " : "",
3068 			(msr & 1 << 29) ? "Transitions, " : "",
3069 			(msr & 1 << 28) ? "MultiCoreTurbo, " : "",
3070 			(msr & 1 << 27) ? "PkgPwrL2, " : "",
3071 			(msr & 1 << 26) ? "PkgPwrL1, " : "",
3072 			(msr & 1 << 25) ? "CorePwr, " : "",
3073 			(msr & 1 << 24) ? "Amps, " : "",
3074 			(msr & 1 << 22) ? "VR-Therm, " : "",
3075 			(msr & 1 << 21) ? "Auto-HWP, " : "",
3076 			(msr & 1 << 20) ? "Graphics, " : "",
3077 			(msr & 1 << 18) ? "bit18, " : "",
3078 			(msr & 1 << 17) ? "ThermStatus, " : "",
3079 			(msr & 1 << 16) ? "PROCHOT, " : "");
3080 
3081 	}
3082 	if (do_gfx_perf_limit_reasons) {
3083 		get_msr(cpu, MSR_GFX_PERF_LIMIT_REASONS, &msr);
3084 		fprintf(outf, "cpu%d: MSR_GFX_PERF_LIMIT_REASONS, 0x%08llx", cpu, msr);
3085 		fprintf(outf, " (Active: %s%s%s%s%s%s%s%s)",
3086 			(msr & 1 << 0) ? "PROCHOT, " : "",
3087 			(msr & 1 << 1) ? "ThermStatus, " : "",
3088 			(msr & 1 << 4) ? "Graphics, " : "",
3089 			(msr & 1 << 6) ? "VR-Therm, " : "",
3090 			(msr & 1 << 8) ? "Amps, " : "",
3091 			(msr & 1 << 9) ? "GFXPwr, " : "",
3092 			(msr & 1 << 10) ? "PkgPwrL1, " : "",
3093 			(msr & 1 << 11) ? "PkgPwrL2, " : "");
3094 		fprintf(outf, " (Logged: %s%s%s%s%s%s%s%s)\n",
3095 			(msr & 1 << 16) ? "PROCHOT, " : "",
3096 			(msr & 1 << 17) ? "ThermStatus, " : "",
3097 			(msr & 1 << 20) ? "Graphics, " : "",
3098 			(msr & 1 << 22) ? "VR-Therm, " : "",
3099 			(msr & 1 << 24) ? "Amps, " : "",
3100 			(msr & 1 << 25) ? "GFXPwr, " : "",
3101 			(msr & 1 << 26) ? "PkgPwrL1, " : "",
3102 			(msr & 1 << 27) ? "PkgPwrL2, " : "");
3103 	}
3104 	if (do_ring_perf_limit_reasons) {
3105 		get_msr(cpu, MSR_RING_PERF_LIMIT_REASONS, &msr);
3106 		fprintf(outf, "cpu%d: MSR_RING_PERF_LIMIT_REASONS, 0x%08llx", cpu, msr);
3107 		fprintf(outf, " (Active: %s%s%s%s%s%s)",
3108 			(msr & 1 << 0) ? "PROCHOT, " : "",
3109 			(msr & 1 << 1) ? "ThermStatus, " : "",
3110 			(msr & 1 << 6) ? "VR-Therm, " : "",
3111 			(msr & 1 << 8) ? "Amps, " : "",
3112 			(msr & 1 << 10) ? "PkgPwrL1, " : "",
3113 			(msr & 1 << 11) ? "PkgPwrL2, " : "");
3114 		fprintf(outf, " (Logged: %s%s%s%s%s%s)\n",
3115 			(msr & 1 << 16) ? "PROCHOT, " : "",
3116 			(msr & 1 << 17) ? "ThermStatus, " : "",
3117 			(msr & 1 << 22) ? "VR-Therm, " : "",
3118 			(msr & 1 << 24) ? "Amps, " : "",
3119 			(msr & 1 << 26) ? "PkgPwrL1, " : "",
3120 			(msr & 1 << 27) ? "PkgPwrL2, " : "");
3121 	}
3122 	return 0;
3123 }
3124 
3125 #define	RAPL_POWER_GRANULARITY	0x7FFF	/* 15 bit power granularity */
3126 #define	RAPL_TIME_GRANULARITY	0x3F /* 6 bit time granularity */
3127 
3128 double get_tdp(unsigned int model)
3129 {
3130 	unsigned long long msr;
3131 
3132 	if (do_rapl & RAPL_PKG_POWER_INFO)
3133 		if (!get_msr(base_cpu, MSR_PKG_POWER_INFO, &msr))
3134 			return ((msr >> 0) & RAPL_POWER_GRANULARITY) * rapl_power_units;
3135 
3136 	switch (model) {
3137 	case INTEL_FAM6_ATOM_SILVERMONT1:
3138 	case INTEL_FAM6_ATOM_SILVERMONT2:
3139 		return 30.0;
3140 	default:
3141 		return 135.0;
3142 	}
3143 }
3144 
3145 /*
3146  * rapl_dram_energy_units_probe()
3147  * Energy units are either hard-coded, or come from RAPL Energy Unit MSR.
3148  */
3149 static double
3150 rapl_dram_energy_units_probe(int  model, double rapl_energy_units)
3151 {
3152 	/* only called for genuine_intel, family 6 */
3153 
3154 	switch (model) {
3155 	case INTEL_FAM6_HASWELL_X:	/* HSX */
3156 	case INTEL_FAM6_BROADWELL_X:	/* BDX */
3157 	case INTEL_FAM6_BROADWELL_XEON_D:	/* BDX-DE */
3158 	case INTEL_FAM6_XEON_PHI_KNL:	/* KNL */
3159 	case INTEL_FAM6_XEON_PHI_KNM:
3160 		return (rapl_dram_energy_units = 15.3 / 1000000);
3161 	default:
3162 		return (rapl_energy_units);
3163 	}
3164 }
3165 
3166 
3167 /*
3168  * rapl_probe()
3169  *
3170  * sets do_rapl, rapl_power_units, rapl_energy_units, rapl_time_units
3171  */
3172 void rapl_probe(unsigned int family, unsigned int model)
3173 {
3174 	unsigned long long msr;
3175 	unsigned int time_unit;
3176 	double tdp;
3177 
3178 	if (!genuine_intel)
3179 		return;
3180 
3181 	if (family != 6)
3182 		return;
3183 
3184 	switch (model) {
3185 	case INTEL_FAM6_SANDYBRIDGE:
3186 	case INTEL_FAM6_IVYBRIDGE:
3187 	case INTEL_FAM6_HASWELL_CORE:	/* HSW */
3188 	case INTEL_FAM6_HASWELL_ULT:	/* HSW */
3189 	case INTEL_FAM6_HASWELL_GT3E:	/* HSW */
3190 	case INTEL_FAM6_BROADWELL_CORE:	/* BDW */
3191 	case INTEL_FAM6_BROADWELL_GT3E:	/* BDW */
3192 		do_rapl = RAPL_PKG | RAPL_CORES | RAPL_CORE_POLICY | RAPL_GFX | RAPL_PKG_POWER_INFO;
3193 		if (rapl_joules) {
3194 			BIC_PRESENT(BIC_Pkg_J);
3195 			BIC_PRESENT(BIC_Cor_J);
3196 			BIC_PRESENT(BIC_GFX_J);
3197 		} else {
3198 			BIC_PRESENT(BIC_PkgWatt);
3199 			BIC_PRESENT(BIC_CorWatt);
3200 			BIC_PRESENT(BIC_GFXWatt);
3201 		}
3202 		break;
3203 	case INTEL_FAM6_ATOM_GOLDMONT:	/* BXT */
3204 	case INTEL_FAM6_ATOM_GEMINI_LAKE:
3205 		do_rapl = RAPL_PKG | RAPL_PKG_POWER_INFO;
3206 		if (rapl_joules)
3207 			BIC_PRESENT(BIC_Pkg_J);
3208 		else
3209 			BIC_PRESENT(BIC_PkgWatt);
3210 		break;
3211 	case INTEL_FAM6_SKYLAKE_MOBILE:	/* SKL */
3212 	case INTEL_FAM6_SKYLAKE_DESKTOP:	/* SKL */
3213 	case INTEL_FAM6_KABYLAKE_MOBILE:	/* KBL */
3214 	case INTEL_FAM6_KABYLAKE_DESKTOP:	/* KBL */
3215 		do_rapl = RAPL_PKG | RAPL_DRAM | RAPL_DRAM_PERF_STATUS | RAPL_PKG_PERF_STATUS | RAPL_PKG_POWER_INFO;
3216 		BIC_PRESENT(BIC_PKG__);
3217 		BIC_PRESENT(BIC_RAM__);
3218 		if (rapl_joules) {
3219 			BIC_PRESENT(BIC_Pkg_J);
3220 			BIC_PRESENT(BIC_Cor_J);
3221 			BIC_PRESENT(BIC_RAM_J);
3222 		} else {
3223 			BIC_PRESENT(BIC_PkgWatt);
3224 			BIC_PRESENT(BIC_CorWatt);
3225 			BIC_PRESENT(BIC_RAMWatt);
3226 		}
3227 		break;
3228 	case INTEL_FAM6_HASWELL_X:	/* HSX */
3229 	case INTEL_FAM6_BROADWELL_X:	/* BDX */
3230 	case INTEL_FAM6_BROADWELL_XEON_D:	/* BDX-DE */
3231 	case INTEL_FAM6_SKYLAKE_X:	/* SKX */
3232 	case INTEL_FAM6_XEON_PHI_KNL:	/* KNL */
3233 	case INTEL_FAM6_XEON_PHI_KNM:
3234 		do_rapl = RAPL_PKG | RAPL_DRAM | RAPL_DRAM_POWER_INFO | RAPL_DRAM_PERF_STATUS | RAPL_PKG_PERF_STATUS | RAPL_PKG_POWER_INFO;
3235 		BIC_PRESENT(BIC_PKG__);
3236 		BIC_PRESENT(BIC_RAM__);
3237 		if (rapl_joules) {
3238 			BIC_PRESENT(BIC_Pkg_J);
3239 			BIC_PRESENT(BIC_RAM_J);
3240 		} else {
3241 			BIC_PRESENT(BIC_PkgWatt);
3242 			BIC_PRESENT(BIC_RAMWatt);
3243 		}
3244 		break;
3245 	case INTEL_FAM6_SANDYBRIDGE_X:
3246 	case INTEL_FAM6_IVYBRIDGE_X:
3247 		do_rapl = RAPL_PKG | RAPL_CORES | RAPL_CORE_POLICY | RAPL_DRAM | RAPL_DRAM_POWER_INFO | RAPL_PKG_PERF_STATUS | RAPL_DRAM_PERF_STATUS | RAPL_PKG_POWER_INFO;
3248 		BIC_PRESENT(BIC_PKG__);
3249 		BIC_PRESENT(BIC_RAM__);
3250 		if (rapl_joules) {
3251 			BIC_PRESENT(BIC_Pkg_J);
3252 			BIC_PRESENT(BIC_Cor_J);
3253 			BIC_PRESENT(BIC_RAM_J);
3254 		} else {
3255 			BIC_PRESENT(BIC_PkgWatt);
3256 			BIC_PRESENT(BIC_CorWatt);
3257 			BIC_PRESENT(BIC_RAMWatt);
3258 		}
3259 		break;
3260 	case INTEL_FAM6_ATOM_SILVERMONT1:	/* BYT */
3261 	case INTEL_FAM6_ATOM_SILVERMONT2:	/* AVN */
3262 		do_rapl = RAPL_PKG | RAPL_CORES;
3263 		if (rapl_joules) {
3264 			BIC_PRESENT(BIC_Pkg_J);
3265 			BIC_PRESENT(BIC_Cor_J);
3266 		} else {
3267 			BIC_PRESENT(BIC_PkgWatt);
3268 			BIC_PRESENT(BIC_CorWatt);
3269 		}
3270 		break;
3271 	case INTEL_FAM6_ATOM_DENVERTON:	/* DNV */
3272 		do_rapl = RAPL_PKG | RAPL_DRAM | RAPL_DRAM_POWER_INFO | RAPL_DRAM_PERF_STATUS | RAPL_PKG_PERF_STATUS | RAPL_PKG_POWER_INFO | RAPL_CORES_ENERGY_STATUS;
3273 		BIC_PRESENT(BIC_PKG__);
3274 		BIC_PRESENT(BIC_RAM__);
3275 		if (rapl_joules) {
3276 			BIC_PRESENT(BIC_Pkg_J);
3277 			BIC_PRESENT(BIC_Cor_J);
3278 			BIC_PRESENT(BIC_RAM_J);
3279 		} else {
3280 			BIC_PRESENT(BIC_PkgWatt);
3281 			BIC_PRESENT(BIC_CorWatt);
3282 			BIC_PRESENT(BIC_RAMWatt);
3283 		}
3284 		break;
3285 	default:
3286 		return;
3287 	}
3288 
3289 	/* units on package 0, verify later other packages match */
3290 	if (get_msr(base_cpu, MSR_RAPL_POWER_UNIT, &msr))
3291 		return;
3292 
3293 	rapl_power_units = 1.0 / (1 << (msr & 0xF));
3294 	if (model == INTEL_FAM6_ATOM_SILVERMONT1)
3295 		rapl_energy_units = 1.0 * (1 << (msr >> 8 & 0x1F)) / 1000000;
3296 	else
3297 		rapl_energy_units = 1.0 / (1 << (msr >> 8 & 0x1F));
3298 
3299 	rapl_dram_energy_units = rapl_dram_energy_units_probe(model, rapl_energy_units);
3300 
3301 	time_unit = msr >> 16 & 0xF;
3302 	if (time_unit == 0)
3303 		time_unit = 0xA;
3304 
3305 	rapl_time_units = 1.0 / (1 << (time_unit));
3306 
3307 	tdp = get_tdp(model);
3308 
3309 	rapl_joule_counter_range = 0xFFFFFFFF * rapl_energy_units / tdp;
3310 	if (!quiet)
3311 		fprintf(outf, "RAPL: %.0f sec. Joule Counter Range, at %.0f Watts\n", rapl_joule_counter_range, tdp);
3312 
3313 	return;
3314 }
3315 
3316 void perf_limit_reasons_probe(unsigned int family, unsigned int model)
3317 {
3318 	if (!genuine_intel)
3319 		return;
3320 
3321 	if (family != 6)
3322 		return;
3323 
3324 	switch (model) {
3325 	case INTEL_FAM6_HASWELL_CORE:	/* HSW */
3326 	case INTEL_FAM6_HASWELL_ULT:	/* HSW */
3327 	case INTEL_FAM6_HASWELL_GT3E:	/* HSW */
3328 		do_gfx_perf_limit_reasons = 1;
3329 	case INTEL_FAM6_HASWELL_X:	/* HSX */
3330 		do_core_perf_limit_reasons = 1;
3331 		do_ring_perf_limit_reasons = 1;
3332 	default:
3333 		return;
3334 	}
3335 }
3336 
3337 int print_thermal(struct thread_data *t, struct core_data *c, struct pkg_data *p)
3338 {
3339 	unsigned long long msr;
3340 	unsigned int dts;
3341 	int cpu;
3342 
3343 	if (!(do_dts || do_ptm))
3344 		return 0;
3345 
3346 	cpu = t->cpu_id;
3347 
3348 	/* DTS is per-core, no need to print for each thread */
3349 	if (!(t->flags & CPU_IS_FIRST_THREAD_IN_CORE))
3350 		return 0;
3351 
3352 	if (cpu_migrate(cpu)) {
3353 		fprintf(outf, "Could not migrate to CPU %d\n", cpu);
3354 		return -1;
3355 	}
3356 
3357 	if (do_ptm && (t->flags & CPU_IS_FIRST_CORE_IN_PACKAGE)) {
3358 		if (get_msr(cpu, MSR_IA32_PACKAGE_THERM_STATUS, &msr))
3359 			return 0;
3360 
3361 		dts = (msr >> 16) & 0x7F;
3362 		fprintf(outf, "cpu%d: MSR_IA32_PACKAGE_THERM_STATUS: 0x%08llx (%d C)\n",
3363 			cpu, msr, tcc_activation_temp - dts);
3364 
3365 #ifdef	THERM_DEBUG
3366 		if (get_msr(cpu, MSR_IA32_PACKAGE_THERM_INTERRUPT, &msr))
3367 			return 0;
3368 
3369 		dts = (msr >> 16) & 0x7F;
3370 		dts2 = (msr >> 8) & 0x7F;
3371 		fprintf(outf, "cpu%d: MSR_IA32_PACKAGE_THERM_INTERRUPT: 0x%08llx (%d C, %d C)\n",
3372 			cpu, msr, tcc_activation_temp - dts, tcc_activation_temp - dts2);
3373 #endif
3374 	}
3375 
3376 
3377 	if (do_dts) {
3378 		unsigned int resolution;
3379 
3380 		if (get_msr(cpu, MSR_IA32_THERM_STATUS, &msr))
3381 			return 0;
3382 
3383 		dts = (msr >> 16) & 0x7F;
3384 		resolution = (msr >> 27) & 0xF;
3385 		fprintf(outf, "cpu%d: MSR_IA32_THERM_STATUS: 0x%08llx (%d C +/- %d)\n",
3386 			cpu, msr, tcc_activation_temp - dts, resolution);
3387 
3388 #ifdef THERM_DEBUG
3389 		if (get_msr(cpu, MSR_IA32_THERM_INTERRUPT, &msr))
3390 			return 0;
3391 
3392 		dts = (msr >> 16) & 0x7F;
3393 		dts2 = (msr >> 8) & 0x7F;
3394 		fprintf(outf, "cpu%d: MSR_IA32_THERM_INTERRUPT: 0x%08llx (%d C, %d C)\n",
3395 			cpu, msr, tcc_activation_temp - dts, tcc_activation_temp - dts2);
3396 #endif
3397 	}
3398 
3399 	return 0;
3400 }
3401 
3402 void print_power_limit_msr(int cpu, unsigned long long msr, char *label)
3403 {
3404 	fprintf(outf, "cpu%d: %s: %sabled (%f Watts, %f sec, clamp %sabled)\n",
3405 		cpu, label,
3406 		((msr >> 15) & 1) ? "EN" : "DIS",
3407 		((msr >> 0) & 0x7FFF) * rapl_power_units,
3408 		(1.0 + (((msr >> 22) & 0x3)/4.0)) * (1 << ((msr >> 17) & 0x1F)) * rapl_time_units,
3409 		(((msr >> 16) & 1) ? "EN" : "DIS"));
3410 
3411 	return;
3412 }
3413 
3414 int print_rapl(struct thread_data *t, struct core_data *c, struct pkg_data *p)
3415 {
3416 	unsigned long long msr;
3417 	int cpu;
3418 
3419 	if (!do_rapl)
3420 		return 0;
3421 
3422 	/* RAPL counters are per package, so print only for 1st thread/package */
3423 	if (!(t->flags & CPU_IS_FIRST_THREAD_IN_CORE) || !(t->flags & CPU_IS_FIRST_CORE_IN_PACKAGE))
3424 		return 0;
3425 
3426 	cpu = t->cpu_id;
3427 	if (cpu_migrate(cpu)) {
3428 		fprintf(outf, "Could not migrate to CPU %d\n", cpu);
3429 		return -1;
3430 	}
3431 
3432 	if (get_msr(cpu, MSR_RAPL_POWER_UNIT, &msr))
3433 		return -1;
3434 
3435 	fprintf(outf, "cpu%d: MSR_RAPL_POWER_UNIT: 0x%08llx (%f Watts, %f Joules, %f sec.)\n", cpu, msr,
3436 		rapl_power_units, rapl_energy_units, rapl_time_units);
3437 
3438 	if (do_rapl & RAPL_PKG_POWER_INFO) {
3439 
3440 		if (get_msr(cpu, MSR_PKG_POWER_INFO, &msr))
3441                 	return -5;
3442 
3443 
3444 		fprintf(outf, "cpu%d: MSR_PKG_POWER_INFO: 0x%08llx (%.0f W TDP, RAPL %.0f - %.0f W, %f sec.)\n",
3445 			cpu, msr,
3446 			((msr >>  0) & RAPL_POWER_GRANULARITY) * rapl_power_units,
3447 			((msr >> 16) & RAPL_POWER_GRANULARITY) * rapl_power_units,
3448 			((msr >> 32) & RAPL_POWER_GRANULARITY) * rapl_power_units,
3449 			((msr >> 48) & RAPL_TIME_GRANULARITY) * rapl_time_units);
3450 
3451 	}
3452 	if (do_rapl & RAPL_PKG) {
3453 
3454 		if (get_msr(cpu, MSR_PKG_POWER_LIMIT, &msr))
3455 			return -9;
3456 
3457 		fprintf(outf, "cpu%d: MSR_PKG_POWER_LIMIT: 0x%08llx (%slocked)\n",
3458 			cpu, msr, (msr >> 63) & 1 ? "" : "UN");
3459 
3460 		print_power_limit_msr(cpu, msr, "PKG Limit #1");
3461 		fprintf(outf, "cpu%d: PKG Limit #2: %sabled (%f Watts, %f* sec, clamp %sabled)\n",
3462 			cpu,
3463 			((msr >> 47) & 1) ? "EN" : "DIS",
3464 			((msr >> 32) & 0x7FFF) * rapl_power_units,
3465 			(1.0 + (((msr >> 54) & 0x3)/4.0)) * (1 << ((msr >> 49) & 0x1F)) * rapl_time_units,
3466 			((msr >> 48) & 1) ? "EN" : "DIS");
3467 	}
3468 
3469 	if (do_rapl & RAPL_DRAM_POWER_INFO) {
3470 		if (get_msr(cpu, MSR_DRAM_POWER_INFO, &msr))
3471                 	return -6;
3472 
3473 		fprintf(outf, "cpu%d: MSR_DRAM_POWER_INFO,: 0x%08llx (%.0f W TDP, RAPL %.0f - %.0f W, %f sec.)\n",
3474 			cpu, msr,
3475 			((msr >>  0) & RAPL_POWER_GRANULARITY) * rapl_power_units,
3476 			((msr >> 16) & RAPL_POWER_GRANULARITY) * rapl_power_units,
3477 			((msr >> 32) & RAPL_POWER_GRANULARITY) * rapl_power_units,
3478 			((msr >> 48) & RAPL_TIME_GRANULARITY) * rapl_time_units);
3479 	}
3480 	if (do_rapl & RAPL_DRAM) {
3481 		if (get_msr(cpu, MSR_DRAM_POWER_LIMIT, &msr))
3482 			return -9;
3483 		fprintf(outf, "cpu%d: MSR_DRAM_POWER_LIMIT: 0x%08llx (%slocked)\n",
3484 				cpu, msr, (msr >> 31) & 1 ? "" : "UN");
3485 
3486 		print_power_limit_msr(cpu, msr, "DRAM Limit");
3487 	}
3488 	if (do_rapl & RAPL_CORE_POLICY) {
3489 		if (get_msr(cpu, MSR_PP0_POLICY, &msr))
3490 			return -7;
3491 
3492 		fprintf(outf, "cpu%d: MSR_PP0_POLICY: %lld\n", cpu, msr & 0xF);
3493 	}
3494 	if (do_rapl & RAPL_CORES_POWER_LIMIT) {
3495 		if (get_msr(cpu, MSR_PP0_POWER_LIMIT, &msr))
3496 			return -9;
3497 		fprintf(outf, "cpu%d: MSR_PP0_POWER_LIMIT: 0x%08llx (%slocked)\n",
3498 				cpu, msr, (msr >> 31) & 1 ? "" : "UN");
3499 		print_power_limit_msr(cpu, msr, "Cores Limit");
3500 	}
3501 	if (do_rapl & RAPL_GFX) {
3502 		if (get_msr(cpu, MSR_PP1_POLICY, &msr))
3503 			return -8;
3504 
3505 		fprintf(outf, "cpu%d: MSR_PP1_POLICY: %lld\n", cpu, msr & 0xF);
3506 
3507 		if (get_msr(cpu, MSR_PP1_POWER_LIMIT, &msr))
3508 			return -9;
3509 		fprintf(outf, "cpu%d: MSR_PP1_POWER_LIMIT: 0x%08llx (%slocked)\n",
3510 				cpu, msr, (msr >> 31) & 1 ? "" : "UN");
3511 		print_power_limit_msr(cpu, msr, "GFX Limit");
3512 	}
3513 	return 0;
3514 }
3515 
3516 /*
3517  * SNB adds support for additional MSRs:
3518  *
3519  * MSR_PKG_C7_RESIDENCY            0x000003fa
3520  * MSR_CORE_C7_RESIDENCY           0x000003fe
3521  * MSR_PKG_C2_RESIDENCY            0x0000060d
3522  */
3523 
3524 int has_snb_msrs(unsigned int family, unsigned int model)
3525 {
3526 	if (!genuine_intel)
3527 		return 0;
3528 
3529 	switch (model) {
3530 	case INTEL_FAM6_SANDYBRIDGE:
3531 	case INTEL_FAM6_SANDYBRIDGE_X:
3532 	case INTEL_FAM6_IVYBRIDGE:	/* IVB */
3533 	case INTEL_FAM6_IVYBRIDGE_X:	/* IVB Xeon */
3534 	case INTEL_FAM6_HASWELL_CORE:	/* HSW */
3535 	case INTEL_FAM6_HASWELL_X:	/* HSW */
3536 	case INTEL_FAM6_HASWELL_ULT:	/* HSW */
3537 	case INTEL_FAM6_HASWELL_GT3E:	/* HSW */
3538 	case INTEL_FAM6_BROADWELL_CORE:	/* BDW */
3539 	case INTEL_FAM6_BROADWELL_GT3E:	/* BDW */
3540 	case INTEL_FAM6_BROADWELL_X:	/* BDX */
3541 	case INTEL_FAM6_BROADWELL_XEON_D:	/* BDX-DE */
3542 	case INTEL_FAM6_SKYLAKE_MOBILE:	/* SKL */
3543 	case INTEL_FAM6_SKYLAKE_DESKTOP:	/* SKL */
3544 	case INTEL_FAM6_KABYLAKE_MOBILE:	/* KBL */
3545 	case INTEL_FAM6_KABYLAKE_DESKTOP:	/* KBL */
3546 	case INTEL_FAM6_SKYLAKE_X:	/* SKX */
3547 	case INTEL_FAM6_ATOM_GOLDMONT:	/* BXT */
3548 	case INTEL_FAM6_ATOM_GEMINI_LAKE:
3549 	case INTEL_FAM6_ATOM_DENVERTON:	/* DNV */
3550 		return 1;
3551 	}
3552 	return 0;
3553 }
3554 
3555 /*
3556  * HSW adds support for additional MSRs:
3557  *
3558  * MSR_PKG_C8_RESIDENCY		0x00000630
3559  * MSR_PKG_C9_RESIDENCY		0x00000631
3560  * MSR_PKG_C10_RESIDENCY	0x00000632
3561  *
3562  * MSR_PKGC8_IRTL		0x00000633
3563  * MSR_PKGC9_IRTL		0x00000634
3564  * MSR_PKGC10_IRTL		0x00000635
3565  *
3566  */
3567 int has_hsw_msrs(unsigned int family, unsigned int model)
3568 {
3569 	if (!genuine_intel)
3570 		return 0;
3571 
3572 	switch (model) {
3573 	case INTEL_FAM6_HASWELL_ULT:	/* HSW */
3574 	case INTEL_FAM6_BROADWELL_CORE:	/* BDW */
3575 	case INTEL_FAM6_SKYLAKE_MOBILE:	/* SKL */
3576 	case INTEL_FAM6_SKYLAKE_DESKTOP:	/* SKL */
3577 	case INTEL_FAM6_KABYLAKE_MOBILE:	/* KBL */
3578 	case INTEL_FAM6_KABYLAKE_DESKTOP:	/* KBL */
3579 	case INTEL_FAM6_ATOM_GOLDMONT:	/* BXT */
3580 	case INTEL_FAM6_ATOM_GEMINI_LAKE:
3581 		return 1;
3582 	}
3583 	return 0;
3584 }
3585 
3586 /*
3587  * SKL adds support for additional MSRS:
3588  *
3589  * MSR_PKG_WEIGHTED_CORE_C0_RES    0x00000658
3590  * MSR_PKG_ANY_CORE_C0_RES         0x00000659
3591  * MSR_PKG_ANY_GFXE_C0_RES         0x0000065A
3592  * MSR_PKG_BOTH_CORE_GFXE_C0_RES   0x0000065B
3593  */
3594 int has_skl_msrs(unsigned int family, unsigned int model)
3595 {
3596 	if (!genuine_intel)
3597 		return 0;
3598 
3599 	switch (model) {
3600 	case INTEL_FAM6_SKYLAKE_MOBILE:	/* SKL */
3601 	case INTEL_FAM6_SKYLAKE_DESKTOP:	/* SKL */
3602 	case INTEL_FAM6_KABYLAKE_MOBILE:	/* KBL */
3603 	case INTEL_FAM6_KABYLAKE_DESKTOP:	/* KBL */
3604 		return 1;
3605 	}
3606 	return 0;
3607 }
3608 
3609 int is_slm(unsigned int family, unsigned int model)
3610 {
3611 	if (!genuine_intel)
3612 		return 0;
3613 	switch (model) {
3614 	case INTEL_FAM6_ATOM_SILVERMONT1:	/* BYT */
3615 	case INTEL_FAM6_ATOM_SILVERMONT2:	/* AVN */
3616 		return 1;
3617 	}
3618 	return 0;
3619 }
3620 
3621 int is_knl(unsigned int family, unsigned int model)
3622 {
3623 	if (!genuine_intel)
3624 		return 0;
3625 	switch (model) {
3626 	case INTEL_FAM6_XEON_PHI_KNL:	/* KNL */
3627 	case INTEL_FAM6_XEON_PHI_KNM:
3628 		return 1;
3629 	}
3630 	return 0;
3631 }
3632 
3633 unsigned int get_aperf_mperf_multiplier(unsigned int family, unsigned int model)
3634 {
3635 	if (is_knl(family, model))
3636 		return 1024;
3637 	return 1;
3638 }
3639 
3640 #define SLM_BCLK_FREQS 5
3641 double slm_freq_table[SLM_BCLK_FREQS] = { 83.3, 100.0, 133.3, 116.7, 80.0};
3642 
3643 double slm_bclk(void)
3644 {
3645 	unsigned long long msr = 3;
3646 	unsigned int i;
3647 	double freq;
3648 
3649 	if (get_msr(base_cpu, MSR_FSB_FREQ, &msr))
3650 		fprintf(outf, "SLM BCLK: unknown\n");
3651 
3652 	i = msr & 0xf;
3653 	if (i >= SLM_BCLK_FREQS) {
3654 		fprintf(outf, "SLM BCLK[%d] invalid\n", i);
3655 		i = 3;
3656 	}
3657 	freq = slm_freq_table[i];
3658 
3659 	if (!quiet)
3660 		fprintf(outf, "SLM BCLK: %.1f Mhz\n", freq);
3661 
3662 	return freq;
3663 }
3664 
3665 double discover_bclk(unsigned int family, unsigned int model)
3666 {
3667 	if (has_snb_msrs(family, model) || is_knl(family, model))
3668 		return 100.00;
3669 	else if (is_slm(family, model))
3670 		return slm_bclk();
3671 	else
3672 		return 133.33;
3673 }
3674 
3675 /*
3676  * MSR_IA32_TEMPERATURE_TARGET indicates the temperature where
3677  * the Thermal Control Circuit (TCC) activates.
3678  * This is usually equal to tjMax.
3679  *
3680  * Older processors do not have this MSR, so there we guess,
3681  * but also allow cmdline over-ride with -T.
3682  *
3683  * Several MSR temperature values are in units of degrees-C
3684  * below this value, including the Digital Thermal Sensor (DTS),
3685  * Package Thermal Management Sensor (PTM), and thermal event thresholds.
3686  */
3687 int set_temperature_target(struct thread_data *t, struct core_data *c, struct pkg_data *p)
3688 {
3689 	unsigned long long msr;
3690 	unsigned int target_c_local;
3691 	int cpu;
3692 
3693 	/* tcc_activation_temp is used only for dts or ptm */
3694 	if (!(do_dts || do_ptm))
3695 		return 0;
3696 
3697 	/* this is a per-package concept */
3698 	if (!(t->flags & CPU_IS_FIRST_THREAD_IN_CORE) || !(t->flags & CPU_IS_FIRST_CORE_IN_PACKAGE))
3699 		return 0;
3700 
3701 	cpu = t->cpu_id;
3702 	if (cpu_migrate(cpu)) {
3703 		fprintf(outf, "Could not migrate to CPU %d\n", cpu);
3704 		return -1;
3705 	}
3706 
3707 	if (tcc_activation_temp_override != 0) {
3708 		tcc_activation_temp = tcc_activation_temp_override;
3709 		fprintf(outf, "cpu%d: Using cmdline TCC Target (%d C)\n",
3710 			cpu, tcc_activation_temp);
3711 		return 0;
3712 	}
3713 
3714 	/* Temperature Target MSR is Nehalem and newer only */
3715 	if (!do_nhm_platform_info)
3716 		goto guess;
3717 
3718 	if (get_msr(base_cpu, MSR_IA32_TEMPERATURE_TARGET, &msr))
3719 		goto guess;
3720 
3721 	target_c_local = (msr >> 16) & 0xFF;
3722 
3723 	if (!quiet)
3724 		fprintf(outf, "cpu%d: MSR_IA32_TEMPERATURE_TARGET: 0x%08llx (%d C)\n",
3725 			cpu, msr, target_c_local);
3726 
3727 	if (!target_c_local)
3728 		goto guess;
3729 
3730 	tcc_activation_temp = target_c_local;
3731 
3732 	return 0;
3733 
3734 guess:
3735 	tcc_activation_temp = TJMAX_DEFAULT;
3736 	fprintf(outf, "cpu%d: Guessing tjMax %d C, Please use -T to specify\n",
3737 		cpu, tcc_activation_temp);
3738 
3739 	return 0;
3740 }
3741 
3742 void decode_feature_control_msr(void)
3743 {
3744 	unsigned long long msr;
3745 
3746 	if (!get_msr(base_cpu, MSR_IA32_FEATURE_CONTROL, &msr))
3747 		fprintf(outf, "cpu%d: MSR_IA32_FEATURE_CONTROL: 0x%08llx (%sLocked %s)\n",
3748 			base_cpu, msr,
3749 			msr & FEATURE_CONTROL_LOCKED ? "" : "UN-",
3750 			msr & (1 << 18) ? "SGX" : "");
3751 }
3752 
3753 void decode_misc_enable_msr(void)
3754 {
3755 	unsigned long long msr;
3756 
3757 	if (!get_msr(base_cpu, MSR_IA32_MISC_ENABLE, &msr))
3758 		fprintf(outf, "cpu%d: MSR_IA32_MISC_ENABLE: 0x%08llx (%sTCC %sEIST %sMWAIT %sPREFETCH %sTURBO)\n",
3759 			base_cpu, msr,
3760 			msr & MSR_IA32_MISC_ENABLE_TM1 ? "" : "No-",
3761 			msr & MSR_IA32_MISC_ENABLE_ENHANCED_SPEEDSTEP ? "" : "No-",
3762 			msr & MSR_IA32_MISC_ENABLE_MWAIT ? "No-" : "",
3763 			msr & MSR_IA32_MISC_ENABLE_PREFETCH_DISABLE ? "No-" : "",
3764 			msr & MSR_IA32_MISC_ENABLE_TURBO_DISABLE ? "No-" : "");
3765 }
3766 
3767 void decode_misc_feature_control(void)
3768 {
3769 	unsigned long long msr;
3770 
3771 	if (!has_misc_feature_control)
3772 		return;
3773 
3774 	if (!get_msr(base_cpu, MSR_MISC_FEATURE_CONTROL, &msr))
3775 		fprintf(outf, "cpu%d: MSR_MISC_FEATURE_CONTROL: 0x%08llx (%sL2-Prefetch %sL2-Prefetch-pair %sL1-Prefetch %sL1-IP-Prefetch)\n",
3776 			base_cpu, msr,
3777 			msr & (0 << 0) ? "No-" : "",
3778 			msr & (1 << 0) ? "No-" : "",
3779 			msr & (2 << 0) ? "No-" : "",
3780 			msr & (3 << 0) ? "No-" : "");
3781 }
3782 /*
3783  * Decode MSR_MISC_PWR_MGMT
3784  *
3785  * Decode the bits according to the Nehalem documentation
3786  * bit[0] seems to continue to have same meaning going forward
3787  * bit[1] less so...
3788  */
3789 void decode_misc_pwr_mgmt_msr(void)
3790 {
3791 	unsigned long long msr;
3792 
3793 	if (!do_nhm_platform_info)
3794 		return;
3795 
3796 	if (no_MSR_MISC_PWR_MGMT)
3797 		return;
3798 
3799 	if (!get_msr(base_cpu, MSR_MISC_PWR_MGMT, &msr))
3800 		fprintf(outf, "cpu%d: MSR_MISC_PWR_MGMT: 0x%08llx (%sable-EIST_Coordination %sable-EPB %sable-OOB)\n",
3801 			base_cpu, msr,
3802 			msr & (1 << 0) ? "DIS" : "EN",
3803 			msr & (1 << 1) ? "EN" : "DIS",
3804 			msr & (1 << 8) ? "EN" : "DIS");
3805 }
3806 /*
3807  * Decode MSR_CC6_DEMOTION_POLICY_CONFIG, MSR_MC6_DEMOTION_POLICY_CONFIG
3808  *
3809  * This MSRs are present on Silvermont processors,
3810  * Intel Atom processor E3000 series (Baytrail), and friends.
3811  */
3812 void decode_c6_demotion_policy_msr(void)
3813 {
3814 	unsigned long long msr;
3815 
3816 	if (!get_msr(base_cpu, MSR_CC6_DEMOTION_POLICY_CONFIG, &msr))
3817 		fprintf(outf, "cpu%d: MSR_CC6_DEMOTION_POLICY_CONFIG: 0x%08llx (%sable-CC6-Demotion)\n",
3818 			base_cpu, msr, msr & (1 << 0) ? "EN" : "DIS");
3819 
3820 	if (!get_msr(base_cpu, MSR_MC6_DEMOTION_POLICY_CONFIG, &msr))
3821 		fprintf(outf, "cpu%d: MSR_MC6_DEMOTION_POLICY_CONFIG: 0x%08llx (%sable-MC6-Demotion)\n",
3822 			base_cpu, msr, msr & (1 << 0) ? "EN" : "DIS");
3823 }
3824 
3825 void process_cpuid()
3826 {
3827 	unsigned int eax, ebx, ecx, edx, max_level, max_extended_level;
3828 	unsigned int fms, family, model, stepping;
3829 	unsigned int has_turbo;
3830 
3831 	eax = ebx = ecx = edx = 0;
3832 
3833 	__cpuid(0, max_level, ebx, ecx, edx);
3834 
3835 	if (ebx == 0x756e6547 && edx == 0x49656e69 && ecx == 0x6c65746e)
3836 		genuine_intel = 1;
3837 
3838 	if (!quiet)
3839 		fprintf(outf, "CPUID(0): %.4s%.4s%.4s ",
3840 			(char *)&ebx, (char *)&edx, (char *)&ecx);
3841 
3842 	__cpuid(1, fms, ebx, ecx, edx);
3843 	family = (fms >> 8) & 0xf;
3844 	model = (fms >> 4) & 0xf;
3845 	stepping = fms & 0xf;
3846 	if (family == 6 || family == 0xf)
3847 		model += ((fms >> 16) & 0xf) << 4;
3848 
3849 	if (!quiet) {
3850 		fprintf(outf, "%d CPUID levels; family:model:stepping 0x%x:%x:%x (%d:%d:%d)\n",
3851 			max_level, family, model, stepping, family, model, stepping);
3852 		fprintf(outf, "CPUID(1): %s %s %s %s %s %s %s %s %s\n",
3853 			ecx & (1 << 0) ? "SSE3" : "-",
3854 			ecx & (1 << 3) ? "MONITOR" : "-",
3855 			ecx & (1 << 6) ? "SMX" : "-",
3856 			ecx & (1 << 7) ? "EIST" : "-",
3857 			ecx & (1 << 8) ? "TM2" : "-",
3858 			edx & (1 << 4) ? "TSC" : "-",
3859 			edx & (1 << 5) ? "MSR" : "-",
3860 			edx & (1 << 22) ? "ACPI-TM" : "-",
3861 			edx & (1 << 29) ? "TM" : "-");
3862 	}
3863 
3864 	if (!(edx & (1 << 5)))
3865 		errx(1, "CPUID: no MSR");
3866 
3867 	/*
3868 	 * check max extended function levels of CPUID.
3869 	 * This is needed to check for invariant TSC.
3870 	 * This check is valid for both Intel and AMD.
3871 	 */
3872 	ebx = ecx = edx = 0;
3873 	__cpuid(0x80000000, max_extended_level, ebx, ecx, edx);
3874 
3875 	if (max_extended_level >= 0x80000007) {
3876 
3877 		/*
3878 		 * Non-Stop TSC is advertised by CPUID.EAX=0x80000007: EDX.bit8
3879 		 * this check is valid for both Intel and AMD
3880 		 */
3881 		__cpuid(0x80000007, eax, ebx, ecx, edx);
3882 		has_invariant_tsc = edx & (1 << 8);
3883 	}
3884 
3885 	/*
3886 	 * APERF/MPERF is advertised by CPUID.EAX=0x6: ECX.bit0
3887 	 * this check is valid for both Intel and AMD
3888 	 */
3889 
3890 	__cpuid(0x6, eax, ebx, ecx, edx);
3891 	has_aperf = ecx & (1 << 0);
3892 	if (has_aperf) {
3893 		BIC_PRESENT(BIC_Avg_MHz);
3894 		BIC_PRESENT(BIC_Busy);
3895 		BIC_PRESENT(BIC_Bzy_MHz);
3896 	}
3897 	do_dts = eax & (1 << 0);
3898 	if (do_dts)
3899 		BIC_PRESENT(BIC_CoreTmp);
3900 	has_turbo = eax & (1 << 1);
3901 	do_ptm = eax & (1 << 6);
3902 	if (do_ptm)
3903 		BIC_PRESENT(BIC_PkgTmp);
3904 	has_hwp = eax & (1 << 7);
3905 	has_hwp_notify = eax & (1 << 8);
3906 	has_hwp_activity_window = eax & (1 << 9);
3907 	has_hwp_epp = eax & (1 << 10);
3908 	has_hwp_pkg = eax & (1 << 11);
3909 	has_epb = ecx & (1 << 3);
3910 
3911 	if (!quiet)
3912 		fprintf(outf, "CPUID(6): %sAPERF, %sTURBO, %sDTS, %sPTM, %sHWP, "
3913 			"%sHWPnotify, %sHWPwindow, %sHWPepp, %sHWPpkg, %sEPB\n",
3914 			has_aperf ? "" : "No-",
3915 			has_turbo ? "" : "No-",
3916 			do_dts ? "" : "No-",
3917 			do_ptm ? "" : "No-",
3918 			has_hwp ? "" : "No-",
3919 			has_hwp_notify ? "" : "No-",
3920 			has_hwp_activity_window ? "" : "No-",
3921 			has_hwp_epp ? "" : "No-",
3922 			has_hwp_pkg ? "" : "No-",
3923 			has_epb ? "" : "No-");
3924 
3925 	if (!quiet)
3926 		decode_misc_enable_msr();
3927 
3928 
3929 	if (max_level >= 0x7 && !quiet) {
3930 		int has_sgx;
3931 
3932 		ecx = 0;
3933 
3934 		__cpuid_count(0x7, 0, eax, ebx, ecx, edx);
3935 
3936 		has_sgx = ebx & (1 << 2);
3937 		fprintf(outf, "CPUID(7): %sSGX\n", has_sgx ? "" : "No-");
3938 
3939 		if (has_sgx)
3940 			decode_feature_control_msr();
3941 	}
3942 
3943 	if (max_level >= 0x15) {
3944 		unsigned int eax_crystal;
3945 		unsigned int ebx_tsc;
3946 
3947 		/*
3948 		 * CPUID 15H TSC/Crystal ratio, possibly Crystal Hz
3949 		 */
3950 		eax_crystal = ebx_tsc = crystal_hz = edx = 0;
3951 		__cpuid(0x15, eax_crystal, ebx_tsc, crystal_hz, edx);
3952 
3953 		if (ebx_tsc != 0) {
3954 
3955 			if (!quiet && (ebx != 0))
3956 				fprintf(outf, "CPUID(0x15): eax_crystal: %d ebx_tsc: %d ecx_crystal_hz: %d\n",
3957 					eax_crystal, ebx_tsc, crystal_hz);
3958 
3959 			if (crystal_hz == 0)
3960 				switch(model) {
3961 				case INTEL_FAM6_SKYLAKE_MOBILE:	/* SKL */
3962 				case INTEL_FAM6_SKYLAKE_DESKTOP:	/* SKL */
3963 				case INTEL_FAM6_KABYLAKE_MOBILE:	/* KBL */
3964 				case INTEL_FAM6_KABYLAKE_DESKTOP:	/* KBL */
3965 					crystal_hz = 24000000;	/* 24.0 MHz */
3966 					break;
3967 				case INTEL_FAM6_SKYLAKE_X:	/* SKX */
3968 				case INTEL_FAM6_ATOM_DENVERTON:	/* DNV */
3969 					crystal_hz = 25000000;	/* 25.0 MHz */
3970 					break;
3971 				case INTEL_FAM6_ATOM_GOLDMONT:	/* BXT */
3972 				case INTEL_FAM6_ATOM_GEMINI_LAKE:
3973 					crystal_hz = 19200000;	/* 19.2 MHz */
3974 					break;
3975 				default:
3976 					crystal_hz = 0;
3977 			}
3978 
3979 			if (crystal_hz) {
3980 				tsc_hz =  (unsigned long long) crystal_hz * ebx_tsc / eax_crystal;
3981 				if (!quiet)
3982 					fprintf(outf, "TSC: %lld MHz (%d Hz * %d / %d / 1000000)\n",
3983 						tsc_hz / 1000000, crystal_hz, ebx_tsc,  eax_crystal);
3984 			}
3985 		}
3986 	}
3987 	if (max_level >= 0x16) {
3988 		unsigned int base_mhz, max_mhz, bus_mhz, edx;
3989 
3990 		/*
3991 		 * CPUID 16H Base MHz, Max MHz, Bus MHz
3992 		 */
3993 		base_mhz = max_mhz = bus_mhz = edx = 0;
3994 
3995 		__cpuid(0x16, base_mhz, max_mhz, bus_mhz, edx);
3996 		if (!quiet)
3997 			fprintf(outf, "CPUID(0x16): base_mhz: %d max_mhz: %d bus_mhz: %d\n",
3998 				base_mhz, max_mhz, bus_mhz);
3999 	}
4000 
4001 	if (has_aperf)
4002 		aperf_mperf_multiplier = get_aperf_mperf_multiplier(family, model);
4003 
4004 	BIC_PRESENT(BIC_IRQ);
4005 	BIC_PRESENT(BIC_TSC_MHz);
4006 
4007 	if (probe_nhm_msrs(family, model)) {
4008 		do_nhm_platform_info = 1;
4009 		BIC_PRESENT(BIC_CPU_c1);
4010 		BIC_PRESENT(BIC_CPU_c3);
4011 		BIC_PRESENT(BIC_CPU_c6);
4012 		BIC_PRESENT(BIC_SMI);
4013 	}
4014 	do_snb_cstates = has_snb_msrs(family, model);
4015 
4016 	if (do_snb_cstates)
4017 		BIC_PRESENT(BIC_CPU_c7);
4018 
4019 	do_irtl_snb = has_snb_msrs(family, model);
4020 	if (do_snb_cstates && (pkg_cstate_limit >= PCL__2))
4021 		BIC_PRESENT(BIC_Pkgpc2);
4022 	if (pkg_cstate_limit >= PCL__3)
4023 		BIC_PRESENT(BIC_Pkgpc3);
4024 	if (pkg_cstate_limit >= PCL__6)
4025 		BIC_PRESENT(BIC_Pkgpc6);
4026 	if (do_snb_cstates && (pkg_cstate_limit >= PCL__7))
4027 		BIC_PRESENT(BIC_Pkgpc7);
4028 	if (has_slv_msrs(family, model)) {
4029 		BIC_NOT_PRESENT(BIC_Pkgpc2);
4030 		BIC_NOT_PRESENT(BIC_Pkgpc3);
4031 		BIC_PRESENT(BIC_Pkgpc6);
4032 		BIC_NOT_PRESENT(BIC_Pkgpc7);
4033 		BIC_PRESENT(BIC_Mod_c6);
4034 		use_c1_residency_msr = 1;
4035 	}
4036 	if (is_dnv(family, model)) {
4037 		BIC_PRESENT(BIC_CPU_c1);
4038 		BIC_NOT_PRESENT(BIC_CPU_c3);
4039 		BIC_NOT_PRESENT(BIC_Pkgpc3);
4040 		BIC_NOT_PRESENT(BIC_CPU_c7);
4041 		BIC_NOT_PRESENT(BIC_Pkgpc7);
4042 		use_c1_residency_msr = 1;
4043 	}
4044 	if (is_skx(family, model)) {
4045 		BIC_NOT_PRESENT(BIC_CPU_c3);
4046 		BIC_NOT_PRESENT(BIC_Pkgpc3);
4047 		BIC_NOT_PRESENT(BIC_CPU_c7);
4048 		BIC_NOT_PRESENT(BIC_Pkgpc7);
4049 	}
4050 	if (is_bdx(family, model)) {
4051 		BIC_NOT_PRESENT(BIC_CPU_c7);
4052 		BIC_NOT_PRESENT(BIC_Pkgpc7);
4053 	}
4054 	if (has_hsw_msrs(family, model)) {
4055 		BIC_PRESENT(BIC_Pkgpc8);
4056 		BIC_PRESENT(BIC_Pkgpc9);
4057 		BIC_PRESENT(BIC_Pkgpc10);
4058 	}
4059 	do_irtl_hsw = has_hsw_msrs(family, model);
4060 	do_skl_residency = has_skl_msrs(family, model);
4061 	do_slm_cstates = is_slm(family, model);
4062 	do_knl_cstates  = is_knl(family, model);
4063 
4064 	if (!quiet)
4065 		decode_misc_pwr_mgmt_msr();
4066 
4067 	if (!quiet && has_slv_msrs(family, model))
4068 		decode_c6_demotion_policy_msr();
4069 
4070 	rapl_probe(family, model);
4071 	perf_limit_reasons_probe(family, model);
4072 
4073 	if (!quiet)
4074 		dump_cstate_pstate_config_info(family, model);
4075 
4076 	if (!quiet)
4077 		dump_sysfs_cstate_config();
4078 
4079 	if (has_skl_msrs(family, model))
4080 		calculate_tsc_tweak();
4081 
4082 	if (!access("/sys/class/drm/card0/power/rc6_residency_ms", R_OK))
4083 		BIC_PRESENT(BIC_GFX_rc6);
4084 
4085 	if (!access("/sys/class/graphics/fb0/device/drm/card0/gt_cur_freq_mhz", R_OK))
4086 		BIC_PRESENT(BIC_GFXMHz);
4087 
4088 	if (!quiet)
4089 		decode_misc_feature_control();
4090 
4091 	return;
4092 }
4093 
4094 void help()
4095 {
4096 	fprintf(outf,
4097 	"Usage: turbostat [OPTIONS][(--interval seconds) | COMMAND ...]\n"
4098 	"\n"
4099 	"Turbostat forks the specified COMMAND and prints statistics\n"
4100 	"when COMMAND completes.\n"
4101 	"If no COMMAND is specified, turbostat wakes every 5-seconds\n"
4102 	"to print statistics, until interrupted.\n"
4103 	"--add		add a counter\n"
4104 	"		eg. --add msr0x10,u64,cpu,delta,MY_TSC\n"
4105 	"--cpu	cpu-set	limit output to summary plus cpu-set cpu-set\n"
4106 	"--quiet	skip decoding system configuration header\n"
4107 	"--interval sec	Override default 5-second measurement interval\n"
4108 	"--help		print this help message\n"
4109 	"--out file	create or truncate \"file\" for all output\n"
4110 	"--version	print version information\n"
4111 	"\n"
4112 	"For more help, run \"man turbostat\"\n");
4113 }
4114 
4115 
4116 /*
4117  * in /dev/cpu/ return success for names that are numbers
4118  * ie. filter out ".", "..", "microcode".
4119  */
4120 int dir_filter(const struct dirent *dirp)
4121 {
4122 	if (isdigit(dirp->d_name[0]))
4123 		return 1;
4124 	else
4125 		return 0;
4126 }
4127 
4128 int open_dev_cpu_msr(int dummy1)
4129 {
4130 	return 0;
4131 }
4132 
4133 void topology_probe()
4134 {
4135 	int i;
4136 	int max_core_id = 0;
4137 	int max_package_id = 0;
4138 	int max_siblings = 0;
4139 	struct cpu_topology {
4140 		int core_id;
4141 		int physical_package_id;
4142 	} *cpus;
4143 
4144 	/* Initialize num_cpus, max_cpu_num */
4145 	topo.num_cpus = 0;
4146 	topo.max_cpu_num = 0;
4147 	for_all_proc_cpus(count_cpus);
4148 	if (!summary_only && topo.num_cpus > 1)
4149 		BIC_PRESENT(BIC_CPU);
4150 
4151 	if (debug > 1)
4152 		fprintf(outf, "num_cpus %d max_cpu_num %d\n", topo.num_cpus, topo.max_cpu_num);
4153 
4154 	cpus = calloc(1, (topo.max_cpu_num  + 1) * sizeof(struct cpu_topology));
4155 	if (cpus == NULL)
4156 		err(1, "calloc cpus");
4157 
4158 	/*
4159 	 * Allocate and initialize cpu_present_set
4160 	 */
4161 	cpu_present_set = CPU_ALLOC((topo.max_cpu_num + 1));
4162 	if (cpu_present_set == NULL)
4163 		err(3, "CPU_ALLOC");
4164 	cpu_present_setsize = CPU_ALLOC_SIZE((topo.max_cpu_num + 1));
4165 	CPU_ZERO_S(cpu_present_setsize, cpu_present_set);
4166 	for_all_proc_cpus(mark_cpu_present);
4167 
4168 	/*
4169 	 * Validate that all cpus in cpu_subset are also in cpu_present_set
4170 	 */
4171 	for (i = 0; i < CPU_SUBSET_MAXCPUS; ++i) {
4172 		if (CPU_ISSET_S(i, cpu_subset_size, cpu_subset))
4173 			if (!CPU_ISSET_S(i, cpu_present_setsize, cpu_present_set))
4174 				err(1, "cpu%d not present", i);
4175 	}
4176 
4177 	/*
4178 	 * Allocate and initialize cpu_affinity_set
4179 	 */
4180 	cpu_affinity_set = CPU_ALLOC((topo.max_cpu_num + 1));
4181 	if (cpu_affinity_set == NULL)
4182 		err(3, "CPU_ALLOC");
4183 	cpu_affinity_setsize = CPU_ALLOC_SIZE((topo.max_cpu_num + 1));
4184 	CPU_ZERO_S(cpu_affinity_setsize, cpu_affinity_set);
4185 
4186 
4187 	/*
4188 	 * For online cpus
4189 	 * find max_core_id, max_package_id
4190 	 */
4191 	for (i = 0; i <= topo.max_cpu_num; ++i) {
4192 		int siblings;
4193 
4194 		if (cpu_is_not_present(i)) {
4195 			if (debug > 1)
4196 				fprintf(outf, "cpu%d NOT PRESENT\n", i);
4197 			continue;
4198 		}
4199 		cpus[i].core_id = get_core_id(i);
4200 		if (cpus[i].core_id > max_core_id)
4201 			max_core_id = cpus[i].core_id;
4202 
4203 		cpus[i].physical_package_id = get_physical_package_id(i);
4204 		if (cpus[i].physical_package_id > max_package_id)
4205 			max_package_id = cpus[i].physical_package_id;
4206 
4207 		siblings = get_num_ht_siblings(i);
4208 		if (siblings > max_siblings)
4209 			max_siblings = siblings;
4210 		if (debug > 1)
4211 			fprintf(outf, "cpu %d pkg %d core %d\n",
4212 				i, cpus[i].physical_package_id, cpus[i].core_id);
4213 	}
4214 	topo.num_cores_per_pkg = max_core_id + 1;
4215 	if (debug > 1)
4216 		fprintf(outf, "max_core_id %d, sizing for %d cores per package\n",
4217 			max_core_id, topo.num_cores_per_pkg);
4218 	if (!summary_only && topo.num_cores_per_pkg > 1)
4219 		BIC_PRESENT(BIC_Core);
4220 
4221 	topo.num_packages = max_package_id + 1;
4222 	if (debug > 1)
4223 		fprintf(outf, "max_package_id %d, sizing for %d packages\n",
4224 			max_package_id, topo.num_packages);
4225 	if (debug && !summary_only && topo.num_packages > 1)
4226 		BIC_PRESENT(BIC_Package);
4227 
4228 	topo.num_threads_per_core = max_siblings;
4229 	if (debug > 1)
4230 		fprintf(outf, "max_siblings %d\n", max_siblings);
4231 
4232 	free(cpus);
4233 }
4234 
4235 void
4236 allocate_counters(struct thread_data **t, struct core_data **c, struct pkg_data **p)
4237 {
4238 	int i;
4239 
4240 	*t = calloc(topo.num_threads_per_core * topo.num_cores_per_pkg *
4241 		topo.num_packages, sizeof(struct thread_data));
4242 	if (*t == NULL)
4243 		goto error;
4244 
4245 	for (i = 0; i < topo.num_threads_per_core *
4246 		topo.num_cores_per_pkg * topo.num_packages; i++)
4247 		(*t)[i].cpu_id = -1;
4248 
4249 	*c = calloc(topo.num_cores_per_pkg * topo.num_packages,
4250 		sizeof(struct core_data));
4251 	if (*c == NULL)
4252 		goto error;
4253 
4254 	for (i = 0; i < topo.num_cores_per_pkg * topo.num_packages; i++)
4255 		(*c)[i].core_id = -1;
4256 
4257 	*p = calloc(topo.num_packages, sizeof(struct pkg_data));
4258 	if (*p == NULL)
4259 		goto error;
4260 
4261 	for (i = 0; i < topo.num_packages; i++)
4262 		(*p)[i].package_id = i;
4263 
4264 	return;
4265 error:
4266 	err(1, "calloc counters");
4267 }
4268 /*
4269  * init_counter()
4270  *
4271  * set cpu_id, core_num, pkg_num
4272  * set FIRST_THREAD_IN_CORE and FIRST_CORE_IN_PACKAGE
4273  *
4274  * increment topo.num_cores when 1st core in pkg seen
4275  */
4276 void init_counter(struct thread_data *thread_base, struct core_data *core_base,
4277 	struct pkg_data *pkg_base, int thread_num, int core_num,
4278 	int pkg_num, int cpu_id)
4279 {
4280 	struct thread_data *t;
4281 	struct core_data *c;
4282 	struct pkg_data *p;
4283 
4284 	t = GET_THREAD(thread_base, thread_num, core_num, pkg_num);
4285 	c = GET_CORE(core_base, core_num, pkg_num);
4286 	p = GET_PKG(pkg_base, pkg_num);
4287 
4288 	t->cpu_id = cpu_id;
4289 	if (thread_num == 0) {
4290 		t->flags |= CPU_IS_FIRST_THREAD_IN_CORE;
4291 		if (cpu_is_first_core_in_package(cpu_id))
4292 			t->flags |= CPU_IS_FIRST_CORE_IN_PACKAGE;
4293 	}
4294 
4295 	c->core_id = core_num;
4296 	p->package_id = pkg_num;
4297 }
4298 
4299 
4300 int initialize_counters(int cpu_id)
4301 {
4302 	int my_thread_id, my_core_id, my_package_id;
4303 
4304 	my_package_id = get_physical_package_id(cpu_id);
4305 	my_core_id = get_core_id(cpu_id);
4306 	my_thread_id = get_cpu_position_in_core(cpu_id);
4307 	if (!my_thread_id)
4308 		topo.num_cores++;
4309 
4310 	init_counter(EVEN_COUNTERS, my_thread_id, my_core_id, my_package_id, cpu_id);
4311 	init_counter(ODD_COUNTERS, my_thread_id, my_core_id, my_package_id, cpu_id);
4312 	return 0;
4313 }
4314 
4315 void allocate_output_buffer()
4316 {
4317 	output_buffer = calloc(1, (1 + topo.num_cpus) * 1024);
4318 	outp = output_buffer;
4319 	if (outp == NULL)
4320 		err(-1, "calloc output buffer");
4321 }
4322 void allocate_fd_percpu(void)
4323 {
4324 	fd_percpu = calloc(topo.max_cpu_num + 1, sizeof(int));
4325 	if (fd_percpu == NULL)
4326 		err(-1, "calloc fd_percpu");
4327 }
4328 void allocate_irq_buffers(void)
4329 {
4330 	irq_column_2_cpu = calloc(topo.num_cpus, sizeof(int));
4331 	if (irq_column_2_cpu == NULL)
4332 		err(-1, "calloc %d", topo.num_cpus);
4333 
4334 	irqs_per_cpu = calloc(topo.max_cpu_num + 1, sizeof(int));
4335 	if (irqs_per_cpu == NULL)
4336 		err(-1, "calloc %d", topo.max_cpu_num + 1);
4337 }
4338 void setup_all_buffers(void)
4339 {
4340 	topology_probe();
4341 	allocate_irq_buffers();
4342 	allocate_fd_percpu();
4343 	allocate_counters(&thread_even, &core_even, &package_even);
4344 	allocate_counters(&thread_odd, &core_odd, &package_odd);
4345 	allocate_output_buffer();
4346 	for_all_proc_cpus(initialize_counters);
4347 }
4348 
4349 void set_base_cpu(void)
4350 {
4351 	base_cpu = sched_getcpu();
4352 	if (base_cpu < 0)
4353 		err(-ENODEV, "No valid cpus found");
4354 
4355 	if (debug > 1)
4356 		fprintf(outf, "base_cpu = %d\n", base_cpu);
4357 }
4358 
4359 void turbostat_init()
4360 {
4361 	setup_all_buffers();
4362 	set_base_cpu();
4363 	check_dev_msr();
4364 	check_permissions();
4365 	process_cpuid();
4366 
4367 
4368 	if (!quiet)
4369 		for_all_cpus(print_hwp, ODD_COUNTERS);
4370 
4371 	if (!quiet)
4372 		for_all_cpus(print_epb, ODD_COUNTERS);
4373 
4374 	if (!quiet)
4375 		for_all_cpus(print_perf_limit, ODD_COUNTERS);
4376 
4377 	if (!quiet)
4378 		for_all_cpus(print_rapl, ODD_COUNTERS);
4379 
4380 	for_all_cpus(set_temperature_target, ODD_COUNTERS);
4381 
4382 	if (!quiet)
4383 		for_all_cpus(print_thermal, ODD_COUNTERS);
4384 
4385 	if (!quiet && do_irtl_snb)
4386 		print_irtl();
4387 }
4388 
4389 int fork_it(char **argv)
4390 {
4391 	pid_t child_pid;
4392 	int status;
4393 
4394 	status = for_all_cpus(get_counters, EVEN_COUNTERS);
4395 	if (status)
4396 		exit(status);
4397 	/* clear affinity side-effect of get_counters() */
4398 	sched_setaffinity(0, cpu_present_setsize, cpu_present_set);
4399 	gettimeofday(&tv_even, (struct timezone *)NULL);
4400 
4401 	child_pid = fork();
4402 	if (!child_pid) {
4403 		/* child */
4404 		execvp(argv[0], argv);
4405 	} else {
4406 
4407 		/* parent */
4408 		if (child_pid == -1)
4409 			err(1, "fork");
4410 
4411 		signal(SIGINT, SIG_IGN);
4412 		signal(SIGQUIT, SIG_IGN);
4413 		if (waitpid(child_pid, &status, 0) == -1)
4414 			err(status, "waitpid");
4415 	}
4416 	/*
4417 	 * n.b. fork_it() does not check for errors from for_all_cpus()
4418 	 * because re-starting is problematic when forking
4419 	 */
4420 	for_all_cpus(get_counters, ODD_COUNTERS);
4421 	gettimeofday(&tv_odd, (struct timezone *)NULL);
4422 	timersub(&tv_odd, &tv_even, &tv_delta);
4423 	if (for_all_cpus_2(delta_cpu, ODD_COUNTERS, EVEN_COUNTERS))
4424 		fprintf(outf, "%s: Counter reset detected\n", progname);
4425 	else {
4426 		compute_average(EVEN_COUNTERS);
4427 		format_all_counters(EVEN_COUNTERS);
4428 	}
4429 
4430 	fprintf(outf, "%.6f sec\n", tv_delta.tv_sec + tv_delta.tv_usec/1000000.0);
4431 
4432 	flush_output_stderr();
4433 
4434 	return status;
4435 }
4436 
4437 int get_and_dump_counters(void)
4438 {
4439 	int status;
4440 
4441 	status = for_all_cpus(get_counters, ODD_COUNTERS);
4442 	if (status)
4443 		return status;
4444 
4445 	status = for_all_cpus(dump_counters, ODD_COUNTERS);
4446 	if (status)
4447 		return status;
4448 
4449 	flush_output_stdout();
4450 
4451 	return status;
4452 }
4453 
4454 void print_version() {
4455 	fprintf(outf, "turbostat version 4.17 10 Jan 2017"
4456 		" - Len Brown <lenb@kernel.org>\n");
4457 }
4458 
4459 int add_counter(unsigned int msr_num, char *path, char *name,
4460 	unsigned int width, enum counter_scope scope,
4461 	enum counter_type type, enum counter_format format, int flags)
4462 {
4463 	struct msr_counter *msrp;
4464 
4465 	msrp = calloc(1, sizeof(struct msr_counter));
4466 	if (msrp == NULL) {
4467 		perror("calloc");
4468 		exit(1);
4469 	}
4470 
4471 	msrp->msr_num = msr_num;
4472 	strncpy(msrp->name, name, NAME_BYTES);
4473 	if (path)
4474 		strncpy(msrp->path, path, PATH_BYTES);
4475 	msrp->width = width;
4476 	msrp->type = type;
4477 	msrp->format = format;
4478 	msrp->flags = flags;
4479 
4480 	switch (scope) {
4481 
4482 	case SCOPE_CPU:
4483 		msrp->next = sys.tp;
4484 		sys.tp = msrp;
4485 		sys.added_thread_counters++;
4486 		if (sys.added_thread_counters > MAX_ADDED_COUNTERS) {
4487 			fprintf(stderr, "exceeded max %d added thread counters\n",
4488 				MAX_ADDED_COUNTERS);
4489 			exit(-1);
4490 		}
4491 		break;
4492 
4493 	case SCOPE_CORE:
4494 		msrp->next = sys.cp;
4495 		sys.cp = msrp;
4496 		sys.added_core_counters++;
4497 		if (sys.added_core_counters > MAX_ADDED_COUNTERS) {
4498 			fprintf(stderr, "exceeded max %d added core counters\n",
4499 				MAX_ADDED_COUNTERS);
4500 			exit(-1);
4501 		}
4502 		break;
4503 
4504 	case SCOPE_PACKAGE:
4505 		msrp->next = sys.pp;
4506 		sys.pp = msrp;
4507 		sys.added_package_counters++;
4508 		if (sys.added_package_counters > MAX_ADDED_COUNTERS) {
4509 			fprintf(stderr, "exceeded max %d added package counters\n",
4510 				MAX_ADDED_COUNTERS);
4511 			exit(-1);
4512 		}
4513 		break;
4514 	}
4515 
4516 	return 0;
4517 }
4518 
4519 void parse_add_command(char *add_command)
4520 {
4521 	int msr_num = 0;
4522 	char *path = NULL;
4523 	char name_buffer[NAME_BYTES] = "";
4524 	int width = 64;
4525 	int fail = 0;
4526 	enum counter_scope scope = SCOPE_CPU;
4527 	enum counter_type type = COUNTER_CYCLES;
4528 	enum counter_format format = FORMAT_DELTA;
4529 
4530 	while (add_command) {
4531 
4532 		if (sscanf(add_command, "msr0x%x", &msr_num) == 1)
4533 			goto next;
4534 
4535 		if (sscanf(add_command, "msr%d", &msr_num) == 1)
4536 			goto next;
4537 
4538 		if (*add_command == '/') {
4539 			path = add_command;
4540 			goto next;
4541 		}
4542 
4543 		if (sscanf(add_command, "u%d", &width) == 1) {
4544 			if ((width == 32) || (width == 64))
4545 				goto next;
4546 			width = 64;
4547 		}
4548 		if (!strncmp(add_command, "cpu", strlen("cpu"))) {
4549 			scope = SCOPE_CPU;
4550 			goto next;
4551 		}
4552 		if (!strncmp(add_command, "core", strlen("core"))) {
4553 			scope = SCOPE_CORE;
4554 			goto next;
4555 		}
4556 		if (!strncmp(add_command, "package", strlen("package"))) {
4557 			scope = SCOPE_PACKAGE;
4558 			goto next;
4559 		}
4560 		if (!strncmp(add_command, "cycles", strlen("cycles"))) {
4561 			type = COUNTER_CYCLES;
4562 			goto next;
4563 		}
4564 		if (!strncmp(add_command, "seconds", strlen("seconds"))) {
4565 			type = COUNTER_SECONDS;
4566 			goto next;
4567 		}
4568 		if (!strncmp(add_command, "usec", strlen("usec"))) {
4569 			type = COUNTER_USEC;
4570 			goto next;
4571 		}
4572 		if (!strncmp(add_command, "raw", strlen("raw"))) {
4573 			format = FORMAT_RAW;
4574 			goto next;
4575 		}
4576 		if (!strncmp(add_command, "delta", strlen("delta"))) {
4577 			format = FORMAT_DELTA;
4578 			goto next;
4579 		}
4580 		if (!strncmp(add_command, "percent", strlen("percent"))) {
4581 			format = FORMAT_PERCENT;
4582 			goto next;
4583 		}
4584 
4585 		if (sscanf(add_command, "%18s,%*s", name_buffer) == 1) {	/* 18 < NAME_BYTES */
4586 			char *eos;
4587 
4588 			eos = strchr(name_buffer, ',');
4589 			if (eos)
4590 				*eos = '\0';
4591 			goto next;
4592 		}
4593 
4594 next:
4595 		add_command = strchr(add_command, ',');
4596 		if (add_command) {
4597 			*add_command = '\0';
4598 			add_command++;
4599 		}
4600 
4601 	}
4602 	if ((msr_num == 0) && (path == NULL)) {
4603 		fprintf(stderr, "--add: (msrDDD | msr0xXXX | /path_to_counter ) required\n");
4604 		fail++;
4605 	}
4606 
4607 	/* generate default column header */
4608 	if (*name_buffer == '\0') {
4609 		if (format == FORMAT_RAW) {
4610 			if (width == 32)
4611 				sprintf(name_buffer, "msr%d", msr_num);
4612 			else
4613 				sprintf(name_buffer, "MSR%d", msr_num);
4614 		} else if (format == FORMAT_DELTA) {
4615 			if (width == 32)
4616 				sprintf(name_buffer, "cnt%d", msr_num);
4617 			else
4618 				sprintf(name_buffer, "CNT%d", msr_num);
4619 		} else if (format == FORMAT_PERCENT) {
4620 			if (width == 32)
4621 				sprintf(name_buffer, "msr%d%%", msr_num);
4622 			else
4623 				sprintf(name_buffer, "MSR%d%%", msr_num);
4624 		}
4625 	}
4626 
4627 	if (add_counter(msr_num, path, name_buffer, width, scope, type, format, 0))
4628 		fail++;
4629 
4630 	if (fail) {
4631 		help();
4632 		exit(1);
4633 	}
4634 }
4635 
4636 void probe_sysfs(void)
4637 {
4638 	char path[64];
4639 	char name_buf[16];
4640 	FILE *input;
4641 	int state;
4642 	char *sp;
4643 
4644 	if (!DO_BIC(BIC_sysfs))
4645 		return;
4646 
4647 	for (state = 10; state > 0; --state) {
4648 
4649 		sprintf(path, "/sys/devices/system/cpu/cpu%d/cpuidle/state%d/name",
4650 			base_cpu, state);
4651 		input = fopen(path, "r");
4652 		if (input == NULL)
4653 			continue;
4654 		fgets(name_buf, sizeof(name_buf), input);
4655 
4656 		 /* truncate "C1-HSW\n" to "C1", or truncate "C1\n" to "C1" */
4657 		sp = strchr(name_buf, '-');
4658 		if (!sp)
4659 			sp = strchrnul(name_buf, '\n');
4660 		*sp = '%';
4661 		*(sp + 1) = '\0';
4662 
4663 		fclose(input);
4664 
4665 		sprintf(path, "cpuidle/state%d/time", state);
4666 
4667 		add_counter(0, path, name_buf, 64, SCOPE_CPU, COUNTER_USEC,
4668 				FORMAT_PERCENT, SYSFS_PERCPU);
4669 	}
4670 
4671 	for (state = 10; state > 0; --state) {
4672 
4673 		sprintf(path, "/sys/devices/system/cpu/cpu%d/cpuidle/state%d/name",
4674 			base_cpu, state);
4675 		input = fopen(path, "r");
4676 		if (input == NULL)
4677 			continue;
4678 		fgets(name_buf, sizeof(name_buf), input);
4679 		 /* truncate "C1-HSW\n" to "C1", or truncate "C1\n" to "C1" */
4680 		sp = strchr(name_buf, '-');
4681 		if (!sp)
4682 			sp = strchrnul(name_buf, '\n');
4683 		*sp = '\0';
4684 		fclose(input);
4685 
4686 		sprintf(path, "cpuidle/state%d/usage", state);
4687 
4688 		add_counter(0, path, name_buf, 64, SCOPE_CPU, COUNTER_ITEMS,
4689 				FORMAT_DELTA, SYSFS_PERCPU);
4690 	}
4691 
4692 }
4693 
4694 
4695 /*
4696  * parse cpuset with following syntax
4697  * 1,2,4..6,8-10 and set bits in cpu_subset
4698  */
4699 void parse_cpu_command(char *optarg)
4700 {
4701 	unsigned int start, end;
4702 	char *next;
4703 
4704 	cpu_subset = CPU_ALLOC(CPU_SUBSET_MAXCPUS);
4705 	if (cpu_subset == NULL)
4706 		err(3, "CPU_ALLOC");
4707 	cpu_subset_size = CPU_ALLOC_SIZE(CPU_SUBSET_MAXCPUS);
4708 
4709 	CPU_ZERO_S(cpu_subset_size, cpu_subset);
4710 
4711 	next = optarg;
4712 
4713 	while (next && *next) {
4714 
4715 		if (*next == '-')	/* no negative cpu numbers */
4716 			goto error;
4717 
4718 		start = strtoul(next, &next, 10);
4719 
4720 		if (start >= CPU_SUBSET_MAXCPUS)
4721 			goto error;
4722 		CPU_SET_S(start, cpu_subset_size, cpu_subset);
4723 
4724 		if (*next == '\0')
4725 			break;
4726 
4727 		if (*next == ',') {
4728 			next += 1;
4729 			continue;
4730 		}
4731 
4732 		if (*next == '-') {
4733 			next += 1;	/* start range */
4734 		} else if (*next == '.') {
4735 			next += 1;
4736 			if (*next == '.')
4737 				next += 1;	/* start range */
4738 			else
4739 				goto error;
4740 		}
4741 
4742 		end = strtoul(next, &next, 10);
4743 		if (end <= start)
4744 			goto error;
4745 
4746 		while (++start <= end) {
4747 			if (start >= CPU_SUBSET_MAXCPUS)
4748 				goto error;
4749 			CPU_SET_S(start, cpu_subset_size, cpu_subset);
4750 		}
4751 
4752 		if (*next == ',')
4753 			next += 1;
4754 		else if (*next != '\0')
4755 			goto error;
4756 	}
4757 
4758 	return;
4759 
4760 error:
4761 	fprintf(stderr, "'--cpu %s' malformed\n", optarg);
4762 	exit(-1);
4763 }
4764 
4765 /*
4766  * HIDE_LIST - hide this list of counters, show the rest [default]
4767  * SHOW_LIST - show this list of counters, hide the rest
4768  */
4769 enum show_hide_mode { SHOW_LIST, HIDE_LIST } global_show_hide_mode = HIDE_LIST;
4770 
4771 int shown;
4772 /*
4773  * parse_show_hide() - process cmdline to set default counter action
4774  */
4775 void parse_show_hide(char *optarg, enum show_hide_mode new_mode)
4776 {
4777 	/*
4778 	 * --show: show only those specified
4779 	 *  The 1st invocation will clear and replace the enabled mask
4780 	 *  subsequent invocations can add to it.
4781 	 */
4782 	if (new_mode == SHOW_LIST) {
4783 		if (shown == 0)
4784 			bic_enabled = bic_lookup(optarg);
4785 		else
4786 			bic_enabled |= bic_lookup(optarg);
4787 		shown = 1;
4788 
4789 		return;
4790 	}
4791 
4792 	/*
4793 	 * --hide: do not show those specified
4794 	 *  multiple invocations simply clear more bits in enabled mask
4795 	 */
4796 	bic_enabled &= ~bic_lookup(optarg);
4797 
4798 }
4799 
4800 void cmdline(int argc, char **argv)
4801 {
4802 	int opt;
4803 	int option_index = 0;
4804 	static struct option long_options[] = {
4805 		{"add",		required_argument,	0, 'a'},
4806 		{"cpu",		required_argument,	0, 'c'},
4807 		{"Dump",	no_argument,		0, 'D'},
4808 		{"debug",	no_argument,		0, 'd'},	/* internal, not documented */
4809 		{"interval",	required_argument,	0, 'i'},
4810 		{"help",	no_argument,		0, 'h'},
4811 		{"hide",	required_argument,	0, 'H'},	// meh, -h taken by --help
4812 		{"Joules",	no_argument,		0, 'J'},
4813 		{"out",		required_argument,	0, 'o'},
4814 		{"Package",	no_argument,		0, 'p'},
4815 		{"processor",	no_argument,		0, 'p'},
4816 		{"quiet",	no_argument,		0, 'q'},
4817 		{"show",	required_argument,	0, 's'},
4818 		{"Summary",	no_argument,		0, 'S'},
4819 		{"TCC",		required_argument,	0, 'T'},
4820 		{"version",	no_argument,		0, 'v' },
4821 		{0,		0,			0,  0 }
4822 	};
4823 
4824 	progname = argv[0];
4825 
4826 	while ((opt = getopt_long_only(argc, argv, "+C:c:Ddhi:JM:m:o:PpqST:v",
4827 				long_options, &option_index)) != -1) {
4828 		switch (opt) {
4829 		case 'a':
4830 			parse_add_command(optarg);
4831 			break;
4832 		case 'c':
4833 			parse_cpu_command(optarg);
4834 			break;
4835 		case 'D':
4836 			dump_only++;
4837 			break;
4838 		case 'd':
4839 			debug++;
4840 			break;
4841 		case 'H':
4842 			parse_show_hide(optarg, HIDE_LIST);
4843 			break;
4844 		case 'h':
4845 		default:
4846 			help();
4847 			exit(1);
4848 		case 'i':
4849 			{
4850 				double interval = strtod(optarg, NULL);
4851 
4852 				if (interval < 0.001) {
4853 					fprintf(outf, "interval %f seconds is too small\n",
4854 						interval);
4855 					exit(2);
4856 				}
4857 
4858 				interval_ts.tv_sec = interval;
4859 				interval_ts.tv_nsec = (interval - interval_ts.tv_sec) * 1000000000;
4860 			}
4861 			break;
4862 		case 'J':
4863 			rapl_joules++;
4864 			break;
4865 		case 'o':
4866 			outf = fopen_or_die(optarg, "w");
4867 			break;
4868 		case 'P':
4869 			show_pkg_only++;
4870 			break;
4871 		case 'p':
4872 			show_core_only++;
4873 			break;
4874 		case 'q':
4875 			quiet = 1;
4876 			break;
4877 		case 's':
4878 			parse_show_hide(optarg, SHOW_LIST);
4879 			break;
4880 		case 'S':
4881 			summary_only++;
4882 			break;
4883 		case 'T':
4884 			tcc_activation_temp_override = atoi(optarg);
4885 			break;
4886 		case 'v':
4887 			print_version();
4888 			exit(0);
4889 			break;
4890 		}
4891 	}
4892 }
4893 
4894 int main(int argc, char **argv)
4895 {
4896 	outf = stderr;
4897 
4898 	cmdline(argc, argv);
4899 
4900 	if (!quiet)
4901 		print_version();
4902 
4903 	probe_sysfs();
4904 
4905 	turbostat_init();
4906 
4907 	/* dump counters and exit */
4908 	if (dump_only)
4909 		return get_and_dump_counters();
4910 
4911 	/*
4912 	 * if any params left, it must be a command to fork
4913 	 */
4914 	if (argc - optind)
4915 		return fork_it(argv + optind);
4916 	else
4917 		turbostat_loop();
4918 
4919 	return 0;
4920 }
4921