1 /* 2 * turbostat -- show CPU frequency and C-state residency 3 * on modern Intel turbo-capable processors. 4 * 5 * Copyright (c) 2013 Intel Corporation. 6 * Len Brown <len.brown@intel.com> 7 * 8 * This program is free software; you can redistribute it and/or modify it 9 * under the terms and conditions of the GNU General Public License, 10 * version 2, as published by the Free Software Foundation. 11 * 12 * This program is distributed in the hope it will be useful, but WITHOUT 13 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 14 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 15 * more details. 16 * 17 * You should have received a copy of the GNU General Public License along with 18 * this program; if not, write to the Free Software Foundation, Inc., 19 * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. 20 */ 21 22 #define _GNU_SOURCE 23 #include MSRHEADER 24 #include <stdarg.h> 25 #include <stdio.h> 26 #include <err.h> 27 #include <unistd.h> 28 #include <sys/types.h> 29 #include <sys/wait.h> 30 #include <sys/stat.h> 31 #include <sys/resource.h> 32 #include <fcntl.h> 33 #include <signal.h> 34 #include <sys/time.h> 35 #include <stdlib.h> 36 #include <getopt.h> 37 #include <dirent.h> 38 #include <string.h> 39 #include <ctype.h> 40 #include <sched.h> 41 #include <time.h> 42 #include <cpuid.h> 43 #include <linux/capability.h> 44 #include <errno.h> 45 46 char *proc_stat = "/proc/stat"; 47 FILE *outf; 48 int *fd_percpu; 49 struct timespec interval_ts = {5, 0}; 50 unsigned int debug; 51 unsigned int rapl_joules; 52 unsigned int summary_only; 53 unsigned int dump_only; 54 unsigned int do_nhm_cstates; 55 unsigned int do_snb_cstates; 56 unsigned int do_knl_cstates; 57 unsigned int do_pc2; 58 unsigned int do_pc3; 59 unsigned int do_pc6; 60 unsigned int do_pc7; 61 unsigned int do_c8_c9_c10; 62 unsigned int do_skl_residency; 63 unsigned int do_slm_cstates; 64 unsigned int use_c1_residency_msr; 65 unsigned int has_aperf; 66 unsigned int has_epb; 67 unsigned int do_irtl_snb; 68 unsigned int do_irtl_hsw; 69 unsigned int units = 1000000; /* MHz etc */ 70 unsigned int genuine_intel; 71 unsigned int has_invariant_tsc; 72 unsigned int do_nhm_platform_info; 73 unsigned int extra_msr_offset32; 74 unsigned int extra_msr_offset64; 75 unsigned int extra_delta_offset32; 76 unsigned int extra_delta_offset64; 77 unsigned int aperf_mperf_multiplier = 1; 78 int do_irq = 1; 79 int do_smi; 80 double bclk; 81 double base_hz; 82 unsigned int has_base_hz; 83 double tsc_tweak = 1.0; 84 unsigned int show_pkg; 85 unsigned int show_core; 86 unsigned int show_cpu; 87 unsigned int show_pkg_only; 88 unsigned int show_core_only; 89 char *output_buffer, *outp; 90 unsigned int do_rapl; 91 unsigned int do_dts; 92 unsigned int do_ptm; 93 unsigned int do_gfx_rc6_ms; 94 unsigned long long gfx_cur_rc6_ms; 95 unsigned int do_gfx_mhz; 96 unsigned int gfx_cur_mhz; 97 unsigned int tcc_activation_temp; 98 unsigned int tcc_activation_temp_override; 99 double rapl_power_units, rapl_time_units; 100 double rapl_dram_energy_units, rapl_energy_units; 101 double rapl_joule_counter_range; 102 unsigned int do_core_perf_limit_reasons; 103 unsigned int do_gfx_perf_limit_reasons; 104 unsigned int do_ring_perf_limit_reasons; 105 unsigned int crystal_hz; 106 unsigned long long tsc_hz; 107 int base_cpu; 108 double discover_bclk(unsigned int family, unsigned int model); 109 unsigned int has_hwp; /* IA32_PM_ENABLE, IA32_HWP_CAPABILITIES */ 110 /* IA32_HWP_REQUEST, IA32_HWP_STATUS */ 111 unsigned int has_hwp_notify; /* IA32_HWP_INTERRUPT */ 112 unsigned int has_hwp_activity_window; /* IA32_HWP_REQUEST[bits 41:32] */ 113 unsigned int has_hwp_epp; /* IA32_HWP_REQUEST[bits 31:24] */ 114 unsigned int has_hwp_pkg; /* IA32_HWP_REQUEST_PKG */ 115 116 #define RAPL_PKG (1 << 0) 117 /* 0x610 MSR_PKG_POWER_LIMIT */ 118 /* 0x611 MSR_PKG_ENERGY_STATUS */ 119 #define RAPL_PKG_PERF_STATUS (1 << 1) 120 /* 0x613 MSR_PKG_PERF_STATUS */ 121 #define RAPL_PKG_POWER_INFO (1 << 2) 122 /* 0x614 MSR_PKG_POWER_INFO */ 123 124 #define RAPL_DRAM (1 << 3) 125 /* 0x618 MSR_DRAM_POWER_LIMIT */ 126 /* 0x619 MSR_DRAM_ENERGY_STATUS */ 127 #define RAPL_DRAM_PERF_STATUS (1 << 4) 128 /* 0x61b MSR_DRAM_PERF_STATUS */ 129 #define RAPL_DRAM_POWER_INFO (1 << 5) 130 /* 0x61c MSR_DRAM_POWER_INFO */ 131 132 #define RAPL_CORES (1 << 6) 133 /* 0x638 MSR_PP0_POWER_LIMIT */ 134 /* 0x639 MSR_PP0_ENERGY_STATUS */ 135 #define RAPL_CORE_POLICY (1 << 7) 136 /* 0x63a MSR_PP0_POLICY */ 137 138 #define RAPL_GFX (1 << 8) 139 /* 0x640 MSR_PP1_POWER_LIMIT */ 140 /* 0x641 MSR_PP1_ENERGY_STATUS */ 141 /* 0x642 MSR_PP1_POLICY */ 142 #define TJMAX_DEFAULT 100 143 144 #define MAX(a, b) ((a) > (b) ? (a) : (b)) 145 146 int backwards_count; 147 char *progname; 148 149 cpu_set_t *cpu_present_set, *cpu_affinity_set; 150 size_t cpu_present_setsize, cpu_affinity_setsize; 151 152 struct thread_data { 153 unsigned long long tsc; 154 unsigned long long aperf; 155 unsigned long long mperf; 156 unsigned long long c1; 157 unsigned long long extra_msr64; 158 unsigned long long extra_delta64; 159 unsigned long long extra_msr32; 160 unsigned long long extra_delta32; 161 unsigned int irq_count; 162 unsigned int smi_count; 163 unsigned int cpu_id; 164 unsigned int flags; 165 #define CPU_IS_FIRST_THREAD_IN_CORE 0x2 166 #define CPU_IS_FIRST_CORE_IN_PACKAGE 0x4 167 } *thread_even, *thread_odd; 168 169 struct core_data { 170 unsigned long long c3; 171 unsigned long long c6; 172 unsigned long long c7; 173 unsigned int core_temp_c; 174 unsigned int core_id; 175 } *core_even, *core_odd; 176 177 struct pkg_data { 178 unsigned long long pc2; 179 unsigned long long pc3; 180 unsigned long long pc6; 181 unsigned long long pc7; 182 unsigned long long pc8; 183 unsigned long long pc9; 184 unsigned long long pc10; 185 unsigned long long pkg_wtd_core_c0; 186 unsigned long long pkg_any_core_c0; 187 unsigned long long pkg_any_gfxe_c0; 188 unsigned long long pkg_both_core_gfxe_c0; 189 long long gfx_rc6_ms; 190 unsigned int gfx_mhz; 191 unsigned int package_id; 192 unsigned int energy_pkg; /* MSR_PKG_ENERGY_STATUS */ 193 unsigned int energy_dram; /* MSR_DRAM_ENERGY_STATUS */ 194 unsigned int energy_cores; /* MSR_PP0_ENERGY_STATUS */ 195 unsigned int energy_gfx; /* MSR_PP1_ENERGY_STATUS */ 196 unsigned int rapl_pkg_perf_status; /* MSR_PKG_PERF_STATUS */ 197 unsigned int rapl_dram_perf_status; /* MSR_DRAM_PERF_STATUS */ 198 unsigned int pkg_temp_c; 199 200 } *package_even, *package_odd; 201 202 #define ODD_COUNTERS thread_odd, core_odd, package_odd 203 #define EVEN_COUNTERS thread_even, core_even, package_even 204 205 #define GET_THREAD(thread_base, thread_no, core_no, pkg_no) \ 206 (thread_base + (pkg_no) * topo.num_cores_per_pkg * \ 207 topo.num_threads_per_core + \ 208 (core_no) * topo.num_threads_per_core + (thread_no)) 209 #define GET_CORE(core_base, core_no, pkg_no) \ 210 (core_base + (pkg_no) * topo.num_cores_per_pkg + (core_no)) 211 #define GET_PKG(pkg_base, pkg_no) (pkg_base + pkg_no) 212 213 struct system_summary { 214 struct thread_data threads; 215 struct core_data cores; 216 struct pkg_data packages; 217 } sum, average; 218 219 220 struct topo_params { 221 int num_packages; 222 int num_cpus; 223 int num_cores; 224 int max_cpu_num; 225 int num_cores_per_pkg; 226 int num_threads_per_core; 227 } topo; 228 229 struct timeval tv_even, tv_odd, tv_delta; 230 231 int *irq_column_2_cpu; /* /proc/interrupts column numbers */ 232 int *irqs_per_cpu; /* indexed by cpu_num */ 233 234 void setup_all_buffers(void); 235 236 int cpu_is_not_present(int cpu) 237 { 238 return !CPU_ISSET_S(cpu, cpu_present_setsize, cpu_present_set); 239 } 240 /* 241 * run func(thread, core, package) in topology order 242 * skip non-present cpus 243 */ 244 245 int for_all_cpus(int (func)(struct thread_data *, struct core_data *, struct pkg_data *), 246 struct thread_data *thread_base, struct core_data *core_base, struct pkg_data *pkg_base) 247 { 248 int retval, pkg_no, core_no, thread_no; 249 250 for (pkg_no = 0; pkg_no < topo.num_packages; ++pkg_no) { 251 for (core_no = 0; core_no < topo.num_cores_per_pkg; ++core_no) { 252 for (thread_no = 0; thread_no < 253 topo.num_threads_per_core; ++thread_no) { 254 struct thread_data *t; 255 struct core_data *c; 256 struct pkg_data *p; 257 258 t = GET_THREAD(thread_base, thread_no, core_no, pkg_no); 259 260 if (cpu_is_not_present(t->cpu_id)) 261 continue; 262 263 c = GET_CORE(core_base, core_no, pkg_no); 264 p = GET_PKG(pkg_base, pkg_no); 265 266 retval = func(t, c, p); 267 if (retval) 268 return retval; 269 } 270 } 271 } 272 return 0; 273 } 274 275 int cpu_migrate(int cpu) 276 { 277 CPU_ZERO_S(cpu_affinity_setsize, cpu_affinity_set); 278 CPU_SET_S(cpu, cpu_affinity_setsize, cpu_affinity_set); 279 if (sched_setaffinity(0, cpu_affinity_setsize, cpu_affinity_set) == -1) 280 return -1; 281 else 282 return 0; 283 } 284 int get_msr_fd(int cpu) 285 { 286 char pathname[32]; 287 int fd; 288 289 fd = fd_percpu[cpu]; 290 291 if (fd) 292 return fd; 293 294 sprintf(pathname, "/dev/cpu/%d/msr", cpu); 295 fd = open(pathname, O_RDONLY); 296 if (fd < 0) 297 err(-1, "%s open failed, try chown or chmod +r /dev/cpu/*/msr, or run as root", pathname); 298 299 fd_percpu[cpu] = fd; 300 301 return fd; 302 } 303 304 int get_msr(int cpu, off_t offset, unsigned long long *msr) 305 { 306 ssize_t retval; 307 308 retval = pread(get_msr_fd(cpu), msr, sizeof(*msr), offset); 309 310 if (retval != sizeof *msr) 311 err(-1, "msr %d offset 0x%llx read failed", cpu, (unsigned long long)offset); 312 313 return 0; 314 } 315 316 /* 317 * Example Format w/ field column widths: 318 * 319 * Package Core CPU Avg_MHz Bzy_MHz TSC_MHz IRQ SMI Busy% CPU_%c1 CPU_%c3 CPU_%c6 CPU_%c7 CoreTmp PkgTmp GFXMHz Pkg%pc2 Pkg%pc3 Pkg%pc6 Pkg%pc7 PkgWatt CorWatt GFXWatt 320 * 12345678123456781234567812345678123456781234567812345678123456781234567812345678123456781234567812345678123456781234567812345678123456781234567812345678123456781234567812345678 321 */ 322 323 void print_header(void) 324 { 325 if (show_pkg) 326 outp += sprintf(outp, "\tPackage"); 327 if (show_core) 328 outp += sprintf(outp, "\tCore"); 329 if (show_cpu) 330 outp += sprintf(outp, "\tCPU"); 331 if (has_aperf) 332 outp += sprintf(outp, "\tAvg_MHz"); 333 if (has_aperf) 334 outp += sprintf(outp, "\tBusy%%"); 335 if (has_aperf) 336 outp += sprintf(outp, "\tBzy_MHz"); 337 outp += sprintf(outp, "\tTSC_MHz"); 338 339 if (extra_delta_offset32) 340 outp += sprintf(outp, "\tcount 0x%03X", extra_delta_offset32); 341 if (extra_delta_offset64) 342 outp += sprintf(outp, "\tCOUNT 0x%03X", extra_delta_offset64); 343 if (extra_msr_offset32) 344 outp += sprintf(outp, "\tMSR 0x%03X", extra_msr_offset32); 345 if (extra_msr_offset64) 346 outp += sprintf(outp, "\tMSR 0x%03X", extra_msr_offset64); 347 348 if (!debug) 349 goto done; 350 351 if (do_irq) 352 outp += sprintf(outp, "\tIRQ"); 353 if (do_smi) 354 outp += sprintf(outp, "\tSMI"); 355 356 if (do_nhm_cstates) 357 outp += sprintf(outp, "\tCPU%%c1"); 358 if (do_nhm_cstates && !do_slm_cstates && !do_knl_cstates) 359 outp += sprintf(outp, "\tCPU%%c3"); 360 if (do_nhm_cstates) 361 outp += sprintf(outp, "\tCPU%%c6"); 362 if (do_snb_cstates) 363 outp += sprintf(outp, "\tCPU%%c7"); 364 365 if (do_dts) 366 outp += sprintf(outp, "\tCoreTmp"); 367 if (do_ptm) 368 outp += sprintf(outp, "\tPkgTmp"); 369 370 if (do_gfx_rc6_ms) 371 outp += sprintf(outp, "\tGFX%%rc6"); 372 373 if (do_gfx_mhz) 374 outp += sprintf(outp, "\tGFXMHz"); 375 376 if (do_skl_residency) { 377 outp += sprintf(outp, "\tTotl%%C0"); 378 outp += sprintf(outp, "\tAny%%C0"); 379 outp += sprintf(outp, "\tGFX%%C0"); 380 outp += sprintf(outp, "\tCPUGFX%%"); 381 } 382 383 if (do_pc2) 384 outp += sprintf(outp, "\tPkg%%pc2"); 385 if (do_pc3) 386 outp += sprintf(outp, "\tPkg%%pc3"); 387 if (do_pc6) 388 outp += sprintf(outp, "\tPkg%%pc6"); 389 if (do_pc7) 390 outp += sprintf(outp, "\tPkg%%pc7"); 391 if (do_c8_c9_c10) { 392 outp += sprintf(outp, "\tPkg%%pc8"); 393 outp += sprintf(outp, "\tPkg%%pc9"); 394 outp += sprintf(outp, "\tPk%%pc10"); 395 } 396 397 if (do_rapl && !rapl_joules) { 398 if (do_rapl & RAPL_PKG) 399 outp += sprintf(outp, "\tPkgWatt"); 400 if (do_rapl & RAPL_CORES) 401 outp += sprintf(outp, "\tCorWatt"); 402 if (do_rapl & RAPL_GFX) 403 outp += sprintf(outp, "\tGFXWatt"); 404 if (do_rapl & RAPL_DRAM) 405 outp += sprintf(outp, "\tRAMWatt"); 406 if (do_rapl & RAPL_PKG_PERF_STATUS) 407 outp += sprintf(outp, "\tPKG_%%"); 408 if (do_rapl & RAPL_DRAM_PERF_STATUS) 409 outp += sprintf(outp, "\tRAM_%%"); 410 } else if (do_rapl && rapl_joules) { 411 if (do_rapl & RAPL_PKG) 412 outp += sprintf(outp, "\tPkg_J"); 413 if (do_rapl & RAPL_CORES) 414 outp += sprintf(outp, "\tCor_J"); 415 if (do_rapl & RAPL_GFX) 416 outp += sprintf(outp, "\tGFX_J"); 417 if (do_rapl & RAPL_DRAM) 418 outp += sprintf(outp, "\tRAM_J"); 419 if (do_rapl & RAPL_PKG_PERF_STATUS) 420 outp += sprintf(outp, "\tPKG_%%"); 421 if (do_rapl & RAPL_DRAM_PERF_STATUS) 422 outp += sprintf(outp, "\tRAM_%%"); 423 } 424 done: 425 outp += sprintf(outp, "\n"); 426 } 427 428 int dump_counters(struct thread_data *t, struct core_data *c, 429 struct pkg_data *p) 430 { 431 outp += sprintf(outp, "t %p, c %p, p %p\n", t, c, p); 432 433 if (t) { 434 outp += sprintf(outp, "CPU: %d flags 0x%x\n", 435 t->cpu_id, t->flags); 436 outp += sprintf(outp, "TSC: %016llX\n", t->tsc); 437 outp += sprintf(outp, "aperf: %016llX\n", t->aperf); 438 outp += sprintf(outp, "mperf: %016llX\n", t->mperf); 439 outp += sprintf(outp, "c1: %016llX\n", t->c1); 440 outp += sprintf(outp, "msr0x%x: %08llX\n", 441 extra_delta_offset32, t->extra_delta32); 442 outp += sprintf(outp, "msr0x%x: %016llX\n", 443 extra_delta_offset64, t->extra_delta64); 444 outp += sprintf(outp, "msr0x%x: %08llX\n", 445 extra_msr_offset32, t->extra_msr32); 446 outp += sprintf(outp, "msr0x%x: %016llX\n", 447 extra_msr_offset64, t->extra_msr64); 448 if (do_irq) 449 outp += sprintf(outp, "IRQ: %08X\n", t->irq_count); 450 if (do_smi) 451 outp += sprintf(outp, "SMI: %08X\n", t->smi_count); 452 } 453 454 if (c) { 455 outp += sprintf(outp, "core: %d\n", c->core_id); 456 outp += sprintf(outp, "c3: %016llX\n", c->c3); 457 outp += sprintf(outp, "c6: %016llX\n", c->c6); 458 outp += sprintf(outp, "c7: %016llX\n", c->c7); 459 outp += sprintf(outp, "DTS: %dC\n", c->core_temp_c); 460 } 461 462 if (p) { 463 outp += sprintf(outp, "package: %d\n", p->package_id); 464 465 outp += sprintf(outp, "Weighted cores: %016llX\n", p->pkg_wtd_core_c0); 466 outp += sprintf(outp, "Any cores: %016llX\n", p->pkg_any_core_c0); 467 outp += sprintf(outp, "Any GFX: %016llX\n", p->pkg_any_gfxe_c0); 468 outp += sprintf(outp, "CPU + GFX: %016llX\n", p->pkg_both_core_gfxe_c0); 469 470 outp += sprintf(outp, "pc2: %016llX\n", p->pc2); 471 if (do_pc3) 472 outp += sprintf(outp, "pc3: %016llX\n", p->pc3); 473 if (do_pc6) 474 outp += sprintf(outp, "pc6: %016llX\n", p->pc6); 475 if (do_pc7) 476 outp += sprintf(outp, "pc7: %016llX\n", p->pc7); 477 outp += sprintf(outp, "pc8: %016llX\n", p->pc8); 478 outp += sprintf(outp, "pc9: %016llX\n", p->pc9); 479 outp += sprintf(outp, "pc10: %016llX\n", p->pc10); 480 outp += sprintf(outp, "Joules PKG: %0X\n", p->energy_pkg); 481 outp += sprintf(outp, "Joules COR: %0X\n", p->energy_cores); 482 outp += sprintf(outp, "Joules GFX: %0X\n", p->energy_gfx); 483 outp += sprintf(outp, "Joules RAM: %0X\n", p->energy_dram); 484 outp += sprintf(outp, "Throttle PKG: %0X\n", 485 p->rapl_pkg_perf_status); 486 outp += sprintf(outp, "Throttle RAM: %0X\n", 487 p->rapl_dram_perf_status); 488 outp += sprintf(outp, "PTM: %dC\n", p->pkg_temp_c); 489 } 490 491 outp += sprintf(outp, "\n"); 492 493 return 0; 494 } 495 496 /* 497 * column formatting convention & formats 498 */ 499 int format_counters(struct thread_data *t, struct core_data *c, 500 struct pkg_data *p) 501 { 502 double interval_float; 503 char *fmt8; 504 505 /* if showing only 1st thread in core and this isn't one, bail out */ 506 if (show_core_only && !(t->flags & CPU_IS_FIRST_THREAD_IN_CORE)) 507 return 0; 508 509 /* if showing only 1st thread in pkg and this isn't one, bail out */ 510 if (show_pkg_only && !(t->flags & CPU_IS_FIRST_CORE_IN_PACKAGE)) 511 return 0; 512 513 interval_float = tv_delta.tv_sec + tv_delta.tv_usec/1000000.0; 514 515 /* topo columns, print blanks on 1st (average) line */ 516 if (t == &average.threads) { 517 if (show_pkg) 518 outp += sprintf(outp, "\t-"); 519 if (show_core) 520 outp += sprintf(outp, "\t-"); 521 if (show_cpu) 522 outp += sprintf(outp, "\t-"); 523 } else { 524 if (show_pkg) { 525 if (p) 526 outp += sprintf(outp, "\t%d", p->package_id); 527 else 528 outp += sprintf(outp, "\t-"); 529 } 530 if (show_core) { 531 if (c) 532 outp += sprintf(outp, "\t%d", c->core_id); 533 else 534 outp += sprintf(outp, "\t-"); 535 } 536 if (show_cpu) 537 outp += sprintf(outp, "\t%d", t->cpu_id); 538 } 539 540 /* Avg_MHz */ 541 if (has_aperf) 542 outp += sprintf(outp, "\t%.0f", 543 1.0 / units * t->aperf / interval_float); 544 545 /* Busy% */ 546 if (has_aperf) 547 outp += sprintf(outp, "\t%.2f", 100.0 * t->mperf/t->tsc/tsc_tweak); 548 549 /* Bzy_MHz */ 550 if (has_aperf) { 551 if (has_base_hz) 552 outp += sprintf(outp, "\t%.0f", base_hz / units * t->aperf / t->mperf); 553 else 554 outp += sprintf(outp, "\t%.0f", 555 1.0 * t->tsc / units * t->aperf / t->mperf / interval_float); 556 } 557 558 /* TSC_MHz */ 559 outp += sprintf(outp, "\t%.0f", 1.0 * t->tsc/units/interval_float); 560 561 /* delta */ 562 if (extra_delta_offset32) 563 outp += sprintf(outp, "\t%11llu", t->extra_delta32); 564 565 /* DELTA */ 566 if (extra_delta_offset64) 567 outp += sprintf(outp, "\t%11llu", t->extra_delta64); 568 /* msr */ 569 if (extra_msr_offset32) 570 outp += sprintf(outp, "\t0x%08llx", t->extra_msr32); 571 572 /* MSR */ 573 if (extra_msr_offset64) 574 outp += sprintf(outp, "\t0x%016llx", t->extra_msr64); 575 576 if (!debug) 577 goto done; 578 579 /* IRQ */ 580 if (do_irq) 581 outp += sprintf(outp, "\t%d", t->irq_count); 582 583 /* SMI */ 584 if (do_smi) 585 outp += sprintf(outp, "\t%d", t->smi_count); 586 587 if (do_nhm_cstates) 588 outp += sprintf(outp, "\t%.2f", 100.0 * t->c1/t->tsc); 589 590 /* print per-core data only for 1st thread in core */ 591 if (!(t->flags & CPU_IS_FIRST_THREAD_IN_CORE)) 592 goto done; 593 594 if (do_nhm_cstates && !do_slm_cstates && !do_knl_cstates) 595 outp += sprintf(outp, "\t%.2f", 100.0 * c->c3/t->tsc); 596 if (do_nhm_cstates) 597 outp += sprintf(outp, "\t%.2f", 100.0 * c->c6/t->tsc); 598 if (do_snb_cstates) 599 outp += sprintf(outp, "\t%.2f", 100.0 * c->c7/t->tsc); 600 601 if (do_dts) 602 outp += sprintf(outp, "\t%d", c->core_temp_c); 603 604 /* print per-package data only for 1st core in package */ 605 if (!(t->flags & CPU_IS_FIRST_CORE_IN_PACKAGE)) 606 goto done; 607 608 /* PkgTmp */ 609 if (do_ptm) 610 outp += sprintf(outp, "\t%d", p->pkg_temp_c); 611 612 /* GFXrc6 */ 613 if (do_gfx_rc6_ms) { 614 if (p->gfx_rc6_ms == -1) { /* detect GFX counter reset */ 615 outp += sprintf(outp, "\t**.**"); 616 } else { 617 outp += sprintf(outp, "\t%.2f", 618 p->gfx_rc6_ms / 10.0 / interval_float); 619 } 620 } 621 622 /* GFXMHz */ 623 if (do_gfx_mhz) 624 outp += sprintf(outp, "\t%d", p->gfx_mhz); 625 626 /* Totl%C0, Any%C0 GFX%C0 CPUGFX% */ 627 if (do_skl_residency) { 628 outp += sprintf(outp, "\t%.2f", 100.0 * p->pkg_wtd_core_c0/t->tsc); 629 outp += sprintf(outp, "\t%.2f", 100.0 * p->pkg_any_core_c0/t->tsc); 630 outp += sprintf(outp, "\t%.2f", 100.0 * p->pkg_any_gfxe_c0/t->tsc); 631 outp += sprintf(outp, "\t%.2f", 100.0 * p->pkg_both_core_gfxe_c0/t->tsc); 632 } 633 634 if (do_pc2) 635 outp += sprintf(outp, "\t%.2f", 100.0 * p->pc2/t->tsc); 636 if (do_pc3) 637 outp += sprintf(outp, "\t%.2f", 100.0 * p->pc3/t->tsc); 638 if (do_pc6) 639 outp += sprintf(outp, "\t%.2f", 100.0 * p->pc6/t->tsc); 640 if (do_pc7) 641 outp += sprintf(outp, "\t%.2f", 100.0 * p->pc7/t->tsc); 642 if (do_c8_c9_c10) { 643 outp += sprintf(outp, "\t%.2f", 100.0 * p->pc8/t->tsc); 644 outp += sprintf(outp, "\t%.2f", 100.0 * p->pc9/t->tsc); 645 outp += sprintf(outp, "\t%.2f", 100.0 * p->pc10/t->tsc); 646 } 647 648 /* 649 * If measurement interval exceeds minimum RAPL Joule Counter range, 650 * indicate that results are suspect by printing "**" in fraction place. 651 */ 652 if (interval_float < rapl_joule_counter_range) 653 fmt8 = "\t%.2f"; 654 else 655 fmt8 = "%6.0f**"; 656 657 if (do_rapl && !rapl_joules) { 658 if (do_rapl & RAPL_PKG) 659 outp += sprintf(outp, fmt8, p->energy_pkg * rapl_energy_units / interval_float); 660 if (do_rapl & RAPL_CORES) 661 outp += sprintf(outp, fmt8, p->energy_cores * rapl_energy_units / interval_float); 662 if (do_rapl & RAPL_GFX) 663 outp += sprintf(outp, fmt8, p->energy_gfx * rapl_energy_units / interval_float); 664 if (do_rapl & RAPL_DRAM) 665 outp += sprintf(outp, fmt8, p->energy_dram * rapl_dram_energy_units / interval_float); 666 if (do_rapl & RAPL_PKG_PERF_STATUS) 667 outp += sprintf(outp, fmt8, 100.0 * p->rapl_pkg_perf_status * rapl_time_units / interval_float); 668 if (do_rapl & RAPL_DRAM_PERF_STATUS) 669 outp += sprintf(outp, fmt8, 100.0 * p->rapl_dram_perf_status * rapl_time_units / interval_float); 670 } else if (do_rapl && rapl_joules) { 671 if (do_rapl & RAPL_PKG) 672 outp += sprintf(outp, fmt8, 673 p->energy_pkg * rapl_energy_units); 674 if (do_rapl & RAPL_CORES) 675 outp += sprintf(outp, fmt8, 676 p->energy_cores * rapl_energy_units); 677 if (do_rapl & RAPL_GFX) 678 outp += sprintf(outp, fmt8, 679 p->energy_gfx * rapl_energy_units); 680 if (do_rapl & RAPL_DRAM) 681 outp += sprintf(outp, fmt8, 682 p->energy_dram * rapl_dram_energy_units); 683 if (do_rapl & RAPL_PKG_PERF_STATUS) 684 outp += sprintf(outp, fmt8, 100.0 * p->rapl_pkg_perf_status * rapl_time_units / interval_float); 685 if (do_rapl & RAPL_DRAM_PERF_STATUS) 686 outp += sprintf(outp, fmt8, 100.0 * p->rapl_dram_perf_status * rapl_time_units / interval_float); 687 } 688 done: 689 outp += sprintf(outp, "\n"); 690 691 return 0; 692 } 693 694 void flush_output_stdout(void) 695 { 696 FILE *filep; 697 698 if (outf == stderr) 699 filep = stdout; 700 else 701 filep = outf; 702 703 fputs(output_buffer, filep); 704 fflush(filep); 705 706 outp = output_buffer; 707 } 708 void flush_output_stderr(void) 709 { 710 fputs(output_buffer, outf); 711 fflush(outf); 712 outp = output_buffer; 713 } 714 void format_all_counters(struct thread_data *t, struct core_data *c, struct pkg_data *p) 715 { 716 static int printed; 717 718 if (!printed || !summary_only) 719 print_header(); 720 721 if (topo.num_cpus > 1) 722 format_counters(&average.threads, &average.cores, 723 &average.packages); 724 725 printed = 1; 726 727 if (summary_only) 728 return; 729 730 for_all_cpus(format_counters, t, c, p); 731 } 732 733 #define DELTA_WRAP32(new, old) \ 734 if (new > old) { \ 735 old = new - old; \ 736 } else { \ 737 old = 0x100000000 + new - old; \ 738 } 739 740 int 741 delta_package(struct pkg_data *new, struct pkg_data *old) 742 { 743 744 if (do_skl_residency) { 745 old->pkg_wtd_core_c0 = new->pkg_wtd_core_c0 - old->pkg_wtd_core_c0; 746 old->pkg_any_core_c0 = new->pkg_any_core_c0 - old->pkg_any_core_c0; 747 old->pkg_any_gfxe_c0 = new->pkg_any_gfxe_c0 - old->pkg_any_gfxe_c0; 748 old->pkg_both_core_gfxe_c0 = new->pkg_both_core_gfxe_c0 - old->pkg_both_core_gfxe_c0; 749 } 750 old->pc2 = new->pc2 - old->pc2; 751 if (do_pc3) 752 old->pc3 = new->pc3 - old->pc3; 753 if (do_pc6) 754 old->pc6 = new->pc6 - old->pc6; 755 if (do_pc7) 756 old->pc7 = new->pc7 - old->pc7; 757 old->pc8 = new->pc8 - old->pc8; 758 old->pc9 = new->pc9 - old->pc9; 759 old->pc10 = new->pc10 - old->pc10; 760 old->pkg_temp_c = new->pkg_temp_c; 761 762 /* flag an error when rc6 counter resets/wraps */ 763 if (old->gfx_rc6_ms > new->gfx_rc6_ms) 764 old->gfx_rc6_ms = -1; 765 else 766 old->gfx_rc6_ms = new->gfx_rc6_ms - old->gfx_rc6_ms; 767 768 old->gfx_mhz = new->gfx_mhz; 769 770 DELTA_WRAP32(new->energy_pkg, old->energy_pkg); 771 DELTA_WRAP32(new->energy_cores, old->energy_cores); 772 DELTA_WRAP32(new->energy_gfx, old->energy_gfx); 773 DELTA_WRAP32(new->energy_dram, old->energy_dram); 774 DELTA_WRAP32(new->rapl_pkg_perf_status, old->rapl_pkg_perf_status); 775 DELTA_WRAP32(new->rapl_dram_perf_status, old->rapl_dram_perf_status); 776 777 return 0; 778 } 779 780 void 781 delta_core(struct core_data *new, struct core_data *old) 782 { 783 old->c3 = new->c3 - old->c3; 784 old->c6 = new->c6 - old->c6; 785 old->c7 = new->c7 - old->c7; 786 old->core_temp_c = new->core_temp_c; 787 } 788 789 /* 790 * old = new - old 791 */ 792 int 793 delta_thread(struct thread_data *new, struct thread_data *old, 794 struct core_data *core_delta) 795 { 796 old->tsc = new->tsc - old->tsc; 797 798 /* check for TSC < 1 Mcycles over interval */ 799 if (old->tsc < (1000 * 1000)) 800 errx(-3, "Insanely slow TSC rate, TSC stops in idle?\n" 801 "You can disable all c-states by booting with \"idle=poll\"\n" 802 "or just the deep ones with \"processor.max_cstate=1\""); 803 804 old->c1 = new->c1 - old->c1; 805 806 if (has_aperf) { 807 if ((new->aperf > old->aperf) && (new->mperf > old->mperf)) { 808 old->aperf = new->aperf - old->aperf; 809 old->mperf = new->mperf - old->mperf; 810 } else { 811 return -1; 812 } 813 } 814 815 816 if (use_c1_residency_msr) { 817 /* 818 * Some models have a dedicated C1 residency MSR, 819 * which should be more accurate than the derivation below. 820 */ 821 } else { 822 /* 823 * As counter collection is not atomic, 824 * it is possible for mperf's non-halted cycles + idle states 825 * to exceed TSC's all cycles: show c1 = 0% in that case. 826 */ 827 if ((old->mperf + core_delta->c3 + core_delta->c6 + core_delta->c7) > old->tsc) 828 old->c1 = 0; 829 else { 830 /* normal case, derive c1 */ 831 old->c1 = old->tsc - old->mperf - core_delta->c3 832 - core_delta->c6 - core_delta->c7; 833 } 834 } 835 836 if (old->mperf == 0) { 837 if (debug > 1) 838 fprintf(outf, "cpu%d MPERF 0!\n", old->cpu_id); 839 old->mperf = 1; /* divide by 0 protection */ 840 } 841 842 old->extra_delta32 = new->extra_delta32 - old->extra_delta32; 843 old->extra_delta32 &= 0xFFFFFFFF; 844 845 old->extra_delta64 = new->extra_delta64 - old->extra_delta64; 846 847 /* 848 * Extra MSR is just a snapshot, simply copy latest w/o subtracting 849 */ 850 old->extra_msr32 = new->extra_msr32; 851 old->extra_msr64 = new->extra_msr64; 852 853 if (do_irq) 854 old->irq_count = new->irq_count - old->irq_count; 855 856 if (do_smi) 857 old->smi_count = new->smi_count - old->smi_count; 858 859 return 0; 860 } 861 862 int delta_cpu(struct thread_data *t, struct core_data *c, 863 struct pkg_data *p, struct thread_data *t2, 864 struct core_data *c2, struct pkg_data *p2) 865 { 866 int retval = 0; 867 868 /* calculate core delta only for 1st thread in core */ 869 if (t->flags & CPU_IS_FIRST_THREAD_IN_CORE) 870 delta_core(c, c2); 871 872 /* always calculate thread delta */ 873 retval = delta_thread(t, t2, c2); /* c2 is core delta */ 874 if (retval) 875 return retval; 876 877 /* calculate package delta only for 1st core in package */ 878 if (t->flags & CPU_IS_FIRST_CORE_IN_PACKAGE) 879 retval = delta_package(p, p2); 880 881 return retval; 882 } 883 884 void clear_counters(struct thread_data *t, struct core_data *c, struct pkg_data *p) 885 { 886 t->tsc = 0; 887 t->aperf = 0; 888 t->mperf = 0; 889 t->c1 = 0; 890 891 t->extra_delta32 = 0; 892 t->extra_delta64 = 0; 893 894 t->irq_count = 0; 895 t->smi_count = 0; 896 897 /* tells format_counters to dump all fields from this set */ 898 t->flags = CPU_IS_FIRST_THREAD_IN_CORE | CPU_IS_FIRST_CORE_IN_PACKAGE; 899 900 c->c3 = 0; 901 c->c6 = 0; 902 c->c7 = 0; 903 c->core_temp_c = 0; 904 905 p->pkg_wtd_core_c0 = 0; 906 p->pkg_any_core_c0 = 0; 907 p->pkg_any_gfxe_c0 = 0; 908 p->pkg_both_core_gfxe_c0 = 0; 909 910 p->pc2 = 0; 911 if (do_pc3) 912 p->pc3 = 0; 913 if (do_pc6) 914 p->pc6 = 0; 915 if (do_pc7) 916 p->pc7 = 0; 917 p->pc8 = 0; 918 p->pc9 = 0; 919 p->pc10 = 0; 920 921 p->energy_pkg = 0; 922 p->energy_dram = 0; 923 p->energy_cores = 0; 924 p->energy_gfx = 0; 925 p->rapl_pkg_perf_status = 0; 926 p->rapl_dram_perf_status = 0; 927 p->pkg_temp_c = 0; 928 929 p->gfx_rc6_ms = 0; 930 p->gfx_mhz = 0; 931 } 932 int sum_counters(struct thread_data *t, struct core_data *c, 933 struct pkg_data *p) 934 { 935 average.threads.tsc += t->tsc; 936 average.threads.aperf += t->aperf; 937 average.threads.mperf += t->mperf; 938 average.threads.c1 += t->c1; 939 940 average.threads.extra_delta32 += t->extra_delta32; 941 average.threads.extra_delta64 += t->extra_delta64; 942 943 average.threads.irq_count += t->irq_count; 944 average.threads.smi_count += t->smi_count; 945 946 /* sum per-core values only for 1st thread in core */ 947 if (!(t->flags & CPU_IS_FIRST_THREAD_IN_CORE)) 948 return 0; 949 950 average.cores.c3 += c->c3; 951 average.cores.c6 += c->c6; 952 average.cores.c7 += c->c7; 953 954 average.cores.core_temp_c = MAX(average.cores.core_temp_c, c->core_temp_c); 955 956 /* sum per-pkg values only for 1st core in pkg */ 957 if (!(t->flags & CPU_IS_FIRST_CORE_IN_PACKAGE)) 958 return 0; 959 960 if (do_skl_residency) { 961 average.packages.pkg_wtd_core_c0 += p->pkg_wtd_core_c0; 962 average.packages.pkg_any_core_c0 += p->pkg_any_core_c0; 963 average.packages.pkg_any_gfxe_c0 += p->pkg_any_gfxe_c0; 964 average.packages.pkg_both_core_gfxe_c0 += p->pkg_both_core_gfxe_c0; 965 } 966 967 average.packages.pc2 += p->pc2; 968 if (do_pc3) 969 average.packages.pc3 += p->pc3; 970 if (do_pc6) 971 average.packages.pc6 += p->pc6; 972 if (do_pc7) 973 average.packages.pc7 += p->pc7; 974 average.packages.pc8 += p->pc8; 975 average.packages.pc9 += p->pc9; 976 average.packages.pc10 += p->pc10; 977 978 average.packages.energy_pkg += p->energy_pkg; 979 average.packages.energy_dram += p->energy_dram; 980 average.packages.energy_cores += p->energy_cores; 981 average.packages.energy_gfx += p->energy_gfx; 982 983 average.packages.gfx_rc6_ms = p->gfx_rc6_ms; 984 average.packages.gfx_mhz = p->gfx_mhz; 985 986 average.packages.pkg_temp_c = MAX(average.packages.pkg_temp_c, p->pkg_temp_c); 987 988 average.packages.rapl_pkg_perf_status += p->rapl_pkg_perf_status; 989 average.packages.rapl_dram_perf_status += p->rapl_dram_perf_status; 990 return 0; 991 } 992 /* 993 * sum the counters for all cpus in the system 994 * compute the weighted average 995 */ 996 void compute_average(struct thread_data *t, struct core_data *c, 997 struct pkg_data *p) 998 { 999 clear_counters(&average.threads, &average.cores, &average.packages); 1000 1001 for_all_cpus(sum_counters, t, c, p); 1002 1003 average.threads.tsc /= topo.num_cpus; 1004 average.threads.aperf /= topo.num_cpus; 1005 average.threads.mperf /= topo.num_cpus; 1006 average.threads.c1 /= topo.num_cpus; 1007 1008 average.threads.extra_delta32 /= topo.num_cpus; 1009 average.threads.extra_delta32 &= 0xFFFFFFFF; 1010 1011 average.threads.extra_delta64 /= topo.num_cpus; 1012 1013 average.cores.c3 /= topo.num_cores; 1014 average.cores.c6 /= topo.num_cores; 1015 average.cores.c7 /= topo.num_cores; 1016 1017 if (do_skl_residency) { 1018 average.packages.pkg_wtd_core_c0 /= topo.num_packages; 1019 average.packages.pkg_any_core_c0 /= topo.num_packages; 1020 average.packages.pkg_any_gfxe_c0 /= topo.num_packages; 1021 average.packages.pkg_both_core_gfxe_c0 /= topo.num_packages; 1022 } 1023 1024 average.packages.pc2 /= topo.num_packages; 1025 if (do_pc3) 1026 average.packages.pc3 /= topo.num_packages; 1027 if (do_pc6) 1028 average.packages.pc6 /= topo.num_packages; 1029 if (do_pc7) 1030 average.packages.pc7 /= topo.num_packages; 1031 1032 average.packages.pc8 /= topo.num_packages; 1033 average.packages.pc9 /= topo.num_packages; 1034 average.packages.pc10 /= topo.num_packages; 1035 } 1036 1037 static unsigned long long rdtsc(void) 1038 { 1039 unsigned int low, high; 1040 1041 asm volatile("rdtsc" : "=a" (low), "=d" (high)); 1042 1043 return low | ((unsigned long long)high) << 32; 1044 } 1045 1046 /* 1047 * get_counters(...) 1048 * migrate to cpu 1049 * acquire and record local counters for that cpu 1050 */ 1051 int get_counters(struct thread_data *t, struct core_data *c, struct pkg_data *p) 1052 { 1053 int cpu = t->cpu_id; 1054 unsigned long long msr; 1055 int aperf_mperf_retry_count = 0; 1056 1057 if (cpu_migrate(cpu)) { 1058 fprintf(outf, "Could not migrate to CPU %d\n", cpu); 1059 return -1; 1060 } 1061 1062 retry: 1063 t->tsc = rdtsc(); /* we are running on local CPU of interest */ 1064 1065 if (has_aperf) { 1066 unsigned long long tsc_before, tsc_between, tsc_after, aperf_time, mperf_time; 1067 1068 /* 1069 * The TSC, APERF and MPERF must be read together for 1070 * APERF/MPERF and MPERF/TSC to give accurate results. 1071 * 1072 * Unfortunately, APERF and MPERF are read by 1073 * individual system call, so delays may occur 1074 * between them. If the time to read them 1075 * varies by a large amount, we re-read them. 1076 */ 1077 1078 /* 1079 * This initial dummy APERF read has been seen to 1080 * reduce jitter in the subsequent reads. 1081 */ 1082 1083 if (get_msr(cpu, MSR_IA32_APERF, &t->aperf)) 1084 return -3; 1085 1086 t->tsc = rdtsc(); /* re-read close to APERF */ 1087 1088 tsc_before = t->tsc; 1089 1090 if (get_msr(cpu, MSR_IA32_APERF, &t->aperf)) 1091 return -3; 1092 1093 tsc_between = rdtsc(); 1094 1095 if (get_msr(cpu, MSR_IA32_MPERF, &t->mperf)) 1096 return -4; 1097 1098 tsc_after = rdtsc(); 1099 1100 aperf_time = tsc_between - tsc_before; 1101 mperf_time = tsc_after - tsc_between; 1102 1103 /* 1104 * If the system call latency to read APERF and MPERF 1105 * differ by more than 2x, then try again. 1106 */ 1107 if ((aperf_time > (2 * mperf_time)) || (mperf_time > (2 * aperf_time))) { 1108 aperf_mperf_retry_count++; 1109 if (aperf_mperf_retry_count < 5) 1110 goto retry; 1111 else 1112 warnx("cpu%d jitter %lld %lld", 1113 cpu, aperf_time, mperf_time); 1114 } 1115 aperf_mperf_retry_count = 0; 1116 1117 t->aperf = t->aperf * aperf_mperf_multiplier; 1118 t->mperf = t->mperf * aperf_mperf_multiplier; 1119 } 1120 1121 if (do_irq) 1122 t->irq_count = irqs_per_cpu[cpu]; 1123 if (do_smi) { 1124 if (get_msr(cpu, MSR_SMI_COUNT, &msr)) 1125 return -5; 1126 t->smi_count = msr & 0xFFFFFFFF; 1127 } 1128 if (extra_delta_offset32) { 1129 if (get_msr(cpu, extra_delta_offset32, &msr)) 1130 return -5; 1131 t->extra_delta32 = msr & 0xFFFFFFFF; 1132 } 1133 1134 if (extra_delta_offset64) 1135 if (get_msr(cpu, extra_delta_offset64, &t->extra_delta64)) 1136 return -5; 1137 1138 if (extra_msr_offset32) { 1139 if (get_msr(cpu, extra_msr_offset32, &msr)) 1140 return -5; 1141 t->extra_msr32 = msr & 0xFFFFFFFF; 1142 } 1143 1144 if (extra_msr_offset64) 1145 if (get_msr(cpu, extra_msr_offset64, &t->extra_msr64)) 1146 return -5; 1147 1148 if (use_c1_residency_msr) { 1149 if (get_msr(cpu, MSR_CORE_C1_RES, &t->c1)) 1150 return -6; 1151 } 1152 1153 /* collect core counters only for 1st thread in core */ 1154 if (!(t->flags & CPU_IS_FIRST_THREAD_IN_CORE)) 1155 return 0; 1156 1157 if (do_nhm_cstates && !do_slm_cstates && !do_knl_cstates) { 1158 if (get_msr(cpu, MSR_CORE_C3_RESIDENCY, &c->c3)) 1159 return -6; 1160 } 1161 1162 if (do_nhm_cstates && !do_knl_cstates) { 1163 if (get_msr(cpu, MSR_CORE_C6_RESIDENCY, &c->c6)) 1164 return -7; 1165 } else if (do_knl_cstates) { 1166 if (get_msr(cpu, MSR_KNL_CORE_C6_RESIDENCY, &c->c6)) 1167 return -7; 1168 } 1169 1170 if (do_snb_cstates) 1171 if (get_msr(cpu, MSR_CORE_C7_RESIDENCY, &c->c7)) 1172 return -8; 1173 1174 if (do_dts) { 1175 if (get_msr(cpu, MSR_IA32_THERM_STATUS, &msr)) 1176 return -9; 1177 c->core_temp_c = tcc_activation_temp - ((msr >> 16) & 0x7F); 1178 } 1179 1180 1181 /* collect package counters only for 1st core in package */ 1182 if (!(t->flags & CPU_IS_FIRST_CORE_IN_PACKAGE)) 1183 return 0; 1184 1185 if (do_skl_residency) { 1186 if (get_msr(cpu, MSR_PKG_WEIGHTED_CORE_C0_RES, &p->pkg_wtd_core_c0)) 1187 return -10; 1188 if (get_msr(cpu, MSR_PKG_ANY_CORE_C0_RES, &p->pkg_any_core_c0)) 1189 return -11; 1190 if (get_msr(cpu, MSR_PKG_ANY_GFXE_C0_RES, &p->pkg_any_gfxe_c0)) 1191 return -12; 1192 if (get_msr(cpu, MSR_PKG_BOTH_CORE_GFXE_C0_RES, &p->pkg_both_core_gfxe_c0)) 1193 return -13; 1194 } 1195 if (do_pc3) 1196 if (get_msr(cpu, MSR_PKG_C3_RESIDENCY, &p->pc3)) 1197 return -9; 1198 if (do_pc6) 1199 if (get_msr(cpu, MSR_PKG_C6_RESIDENCY, &p->pc6)) 1200 return -10; 1201 if (do_pc2) 1202 if (get_msr(cpu, MSR_PKG_C2_RESIDENCY, &p->pc2)) 1203 return -11; 1204 if (do_pc7) 1205 if (get_msr(cpu, MSR_PKG_C7_RESIDENCY, &p->pc7)) 1206 return -12; 1207 if (do_c8_c9_c10) { 1208 if (get_msr(cpu, MSR_PKG_C8_RESIDENCY, &p->pc8)) 1209 return -13; 1210 if (get_msr(cpu, MSR_PKG_C9_RESIDENCY, &p->pc9)) 1211 return -13; 1212 if (get_msr(cpu, MSR_PKG_C10_RESIDENCY, &p->pc10)) 1213 return -13; 1214 } 1215 if (do_rapl & RAPL_PKG) { 1216 if (get_msr(cpu, MSR_PKG_ENERGY_STATUS, &msr)) 1217 return -13; 1218 p->energy_pkg = msr & 0xFFFFFFFF; 1219 } 1220 if (do_rapl & RAPL_CORES) { 1221 if (get_msr(cpu, MSR_PP0_ENERGY_STATUS, &msr)) 1222 return -14; 1223 p->energy_cores = msr & 0xFFFFFFFF; 1224 } 1225 if (do_rapl & RAPL_DRAM) { 1226 if (get_msr(cpu, MSR_DRAM_ENERGY_STATUS, &msr)) 1227 return -15; 1228 p->energy_dram = msr & 0xFFFFFFFF; 1229 } 1230 if (do_rapl & RAPL_GFX) { 1231 if (get_msr(cpu, MSR_PP1_ENERGY_STATUS, &msr)) 1232 return -16; 1233 p->energy_gfx = msr & 0xFFFFFFFF; 1234 } 1235 if (do_rapl & RAPL_PKG_PERF_STATUS) { 1236 if (get_msr(cpu, MSR_PKG_PERF_STATUS, &msr)) 1237 return -16; 1238 p->rapl_pkg_perf_status = msr & 0xFFFFFFFF; 1239 } 1240 if (do_rapl & RAPL_DRAM_PERF_STATUS) { 1241 if (get_msr(cpu, MSR_DRAM_PERF_STATUS, &msr)) 1242 return -16; 1243 p->rapl_dram_perf_status = msr & 0xFFFFFFFF; 1244 } 1245 if (do_ptm) { 1246 if (get_msr(cpu, MSR_IA32_PACKAGE_THERM_STATUS, &msr)) 1247 return -17; 1248 p->pkg_temp_c = tcc_activation_temp - ((msr >> 16) & 0x7F); 1249 } 1250 1251 if (do_gfx_rc6_ms) 1252 p->gfx_rc6_ms = gfx_cur_rc6_ms; 1253 1254 if (do_gfx_mhz) 1255 p->gfx_mhz = gfx_cur_mhz; 1256 1257 return 0; 1258 } 1259 1260 /* 1261 * MSR_PKG_CST_CONFIG_CONTROL decoding for pkg_cstate_limit: 1262 * If you change the values, note they are used both in comparisons 1263 * (>= PCL__7) and to index pkg_cstate_limit_strings[]. 1264 */ 1265 1266 #define PCLUKN 0 /* Unknown */ 1267 #define PCLRSV 1 /* Reserved */ 1268 #define PCL__0 2 /* PC0 */ 1269 #define PCL__1 3 /* PC1 */ 1270 #define PCL__2 4 /* PC2 */ 1271 #define PCL__3 5 /* PC3 */ 1272 #define PCL__4 6 /* PC4 */ 1273 #define PCL__6 7 /* PC6 */ 1274 #define PCL_6N 8 /* PC6 No Retention */ 1275 #define PCL_6R 9 /* PC6 Retention */ 1276 #define PCL__7 10 /* PC7 */ 1277 #define PCL_7S 11 /* PC7 Shrink */ 1278 #define PCL__8 12 /* PC8 */ 1279 #define PCL__9 13 /* PC9 */ 1280 #define PCLUNL 14 /* Unlimited */ 1281 1282 int pkg_cstate_limit = PCLUKN; 1283 char *pkg_cstate_limit_strings[] = { "reserved", "unknown", "pc0", "pc1", "pc2", 1284 "pc3", "pc4", "pc6", "pc6n", "pc6r", "pc7", "pc7s", "pc8", "pc9", "unlimited"}; 1285 1286 int nhm_pkg_cstate_limits[16] = {PCL__0, PCL__1, PCL__3, PCL__6, PCL__7, PCLRSV, PCLRSV, PCLUNL, PCLRSV, PCLRSV, PCLRSV, PCLRSV, PCLRSV, PCLRSV, PCLRSV, PCLRSV}; 1287 int snb_pkg_cstate_limits[16] = {PCL__0, PCL__2, PCL_6N, PCL_6R, PCL__7, PCL_7S, PCLRSV, PCLUNL, PCLRSV, PCLRSV, PCLRSV, PCLRSV, PCLRSV, PCLRSV, PCLRSV, PCLRSV}; 1288 int hsw_pkg_cstate_limits[16] = {PCL__0, PCL__2, PCL__3, PCL__6, PCL__7, PCL_7S, PCL__8, PCL__9, PCLUNL, PCLRSV, PCLRSV, PCLRSV, PCLRSV, PCLRSV, PCLRSV, PCLRSV}; 1289 int slv_pkg_cstate_limits[16] = {PCL__0, PCL__1, PCLRSV, PCLRSV, PCL__4, PCLRSV, PCL__6, PCL__7, PCLRSV, PCLRSV, PCLRSV, PCLRSV, PCLRSV, PCLRSV, PCLRSV, PCLRSV}; 1290 int amt_pkg_cstate_limits[16] = {PCL__0, PCL__1, PCL__2, PCLRSV, PCLRSV, PCLRSV, PCL__6, PCL__7, PCLRSV, PCLRSV, PCLRSV, PCLRSV, PCLRSV, PCLRSV, PCLRSV, PCLRSV}; 1291 int phi_pkg_cstate_limits[16] = {PCL__0, PCL__2, PCL_6N, PCL_6R, PCLRSV, PCLRSV, PCLRSV, PCLUNL, PCLRSV, PCLRSV, PCLRSV, PCLRSV, PCLRSV, PCLRSV, PCLRSV, PCLRSV}; 1292 int bxt_pkg_cstate_limits[16] = {PCL__0, PCL__2, PCLUNL, PCLRSV, PCLRSV, PCLRSV, PCLRSV, PCLRSV, PCLRSV, PCLRSV, PCLRSV, PCLRSV, PCLRSV, PCLRSV, PCLRSV, PCLRSV}; 1293 1294 1295 static void 1296 calculate_tsc_tweak() 1297 { 1298 tsc_tweak = base_hz / tsc_hz; 1299 } 1300 1301 static void 1302 dump_nhm_platform_info(void) 1303 { 1304 unsigned long long msr; 1305 unsigned int ratio; 1306 1307 get_msr(base_cpu, MSR_PLATFORM_INFO, &msr); 1308 1309 fprintf(outf, "cpu%d: MSR_PLATFORM_INFO: 0x%08llx\n", base_cpu, msr); 1310 1311 ratio = (msr >> 40) & 0xFF; 1312 fprintf(outf, "%d * %.0f = %.0f MHz max efficiency frequency\n", 1313 ratio, bclk, ratio * bclk); 1314 1315 ratio = (msr >> 8) & 0xFF; 1316 fprintf(outf, "%d * %.0f = %.0f MHz base frequency\n", 1317 ratio, bclk, ratio * bclk); 1318 1319 get_msr(base_cpu, MSR_IA32_POWER_CTL, &msr); 1320 fprintf(outf, "cpu%d: MSR_IA32_POWER_CTL: 0x%08llx (C1E auto-promotion: %sabled)\n", 1321 base_cpu, msr, msr & 0x2 ? "EN" : "DIS"); 1322 1323 return; 1324 } 1325 1326 static void 1327 dump_hsw_turbo_ratio_limits(void) 1328 { 1329 unsigned long long msr; 1330 unsigned int ratio; 1331 1332 get_msr(base_cpu, MSR_TURBO_RATIO_LIMIT2, &msr); 1333 1334 fprintf(outf, "cpu%d: MSR_TURBO_RATIO_LIMIT2: 0x%08llx\n", base_cpu, msr); 1335 1336 ratio = (msr >> 8) & 0xFF; 1337 if (ratio) 1338 fprintf(outf, "%d * %.0f = %.0f MHz max turbo 18 active cores\n", 1339 ratio, bclk, ratio * bclk); 1340 1341 ratio = (msr >> 0) & 0xFF; 1342 if (ratio) 1343 fprintf(outf, "%d * %.0f = %.0f MHz max turbo 17 active cores\n", 1344 ratio, bclk, ratio * bclk); 1345 return; 1346 } 1347 1348 static void 1349 dump_ivt_turbo_ratio_limits(void) 1350 { 1351 unsigned long long msr; 1352 unsigned int ratio; 1353 1354 get_msr(base_cpu, MSR_TURBO_RATIO_LIMIT1, &msr); 1355 1356 fprintf(outf, "cpu%d: MSR_TURBO_RATIO_LIMIT1: 0x%08llx\n", base_cpu, msr); 1357 1358 ratio = (msr >> 56) & 0xFF; 1359 if (ratio) 1360 fprintf(outf, "%d * %.0f = %.0f MHz max turbo 16 active cores\n", 1361 ratio, bclk, ratio * bclk); 1362 1363 ratio = (msr >> 48) & 0xFF; 1364 if (ratio) 1365 fprintf(outf, "%d * %.0f = %.0f MHz max turbo 15 active cores\n", 1366 ratio, bclk, ratio * bclk); 1367 1368 ratio = (msr >> 40) & 0xFF; 1369 if (ratio) 1370 fprintf(outf, "%d * %.0f = %.0f MHz max turbo 14 active cores\n", 1371 ratio, bclk, ratio * bclk); 1372 1373 ratio = (msr >> 32) & 0xFF; 1374 if (ratio) 1375 fprintf(outf, "%d * %.0f = %.0f MHz max turbo 13 active cores\n", 1376 ratio, bclk, ratio * bclk); 1377 1378 ratio = (msr >> 24) & 0xFF; 1379 if (ratio) 1380 fprintf(outf, "%d * %.0f = %.0f MHz max turbo 12 active cores\n", 1381 ratio, bclk, ratio * bclk); 1382 1383 ratio = (msr >> 16) & 0xFF; 1384 if (ratio) 1385 fprintf(outf, "%d * %.0f = %.0f MHz max turbo 11 active cores\n", 1386 ratio, bclk, ratio * bclk); 1387 1388 ratio = (msr >> 8) & 0xFF; 1389 if (ratio) 1390 fprintf(outf, "%d * %.0f = %.0f MHz max turbo 10 active cores\n", 1391 ratio, bclk, ratio * bclk); 1392 1393 ratio = (msr >> 0) & 0xFF; 1394 if (ratio) 1395 fprintf(outf, "%d * %.0f = %.0f MHz max turbo 9 active cores\n", 1396 ratio, bclk, ratio * bclk); 1397 return; 1398 } 1399 1400 static void 1401 dump_nhm_turbo_ratio_limits(void) 1402 { 1403 unsigned long long msr; 1404 unsigned int ratio; 1405 1406 get_msr(base_cpu, MSR_TURBO_RATIO_LIMIT, &msr); 1407 1408 fprintf(outf, "cpu%d: MSR_TURBO_RATIO_LIMIT: 0x%08llx\n", base_cpu, msr); 1409 1410 ratio = (msr >> 56) & 0xFF; 1411 if (ratio) 1412 fprintf(outf, "%d * %.0f = %.0f MHz max turbo 8 active cores\n", 1413 ratio, bclk, ratio * bclk); 1414 1415 ratio = (msr >> 48) & 0xFF; 1416 if (ratio) 1417 fprintf(outf, "%d * %.0f = %.0f MHz max turbo 7 active cores\n", 1418 ratio, bclk, ratio * bclk); 1419 1420 ratio = (msr >> 40) & 0xFF; 1421 if (ratio) 1422 fprintf(outf, "%d * %.0f = %.0f MHz max turbo 6 active cores\n", 1423 ratio, bclk, ratio * bclk); 1424 1425 ratio = (msr >> 32) & 0xFF; 1426 if (ratio) 1427 fprintf(outf, "%d * %.0f = %.0f MHz max turbo 5 active cores\n", 1428 ratio, bclk, ratio * bclk); 1429 1430 ratio = (msr >> 24) & 0xFF; 1431 if (ratio) 1432 fprintf(outf, "%d * %.0f = %.0f MHz max turbo 4 active cores\n", 1433 ratio, bclk, ratio * bclk); 1434 1435 ratio = (msr >> 16) & 0xFF; 1436 if (ratio) 1437 fprintf(outf, "%d * %.0f = %.0f MHz max turbo 3 active cores\n", 1438 ratio, bclk, ratio * bclk); 1439 1440 ratio = (msr >> 8) & 0xFF; 1441 if (ratio) 1442 fprintf(outf, "%d * %.0f = %.0f MHz max turbo 2 active cores\n", 1443 ratio, bclk, ratio * bclk); 1444 1445 ratio = (msr >> 0) & 0xFF; 1446 if (ratio) 1447 fprintf(outf, "%d * %.0f = %.0f MHz max turbo 1 active cores\n", 1448 ratio, bclk, ratio * bclk); 1449 return; 1450 } 1451 1452 static void 1453 dump_knl_turbo_ratio_limits(void) 1454 { 1455 const unsigned int buckets_no = 7; 1456 1457 unsigned long long msr; 1458 int delta_cores, delta_ratio; 1459 int i, b_nr; 1460 unsigned int cores[buckets_no]; 1461 unsigned int ratio[buckets_no]; 1462 1463 get_msr(base_cpu, MSR_TURBO_RATIO_LIMIT, &msr); 1464 1465 fprintf(outf, "cpu%d: MSR_TURBO_RATIO_LIMIT: 0x%08llx\n", 1466 base_cpu, msr); 1467 1468 /** 1469 * Turbo encoding in KNL is as follows: 1470 * [0] -- Reserved 1471 * [7:1] -- Base value of number of active cores of bucket 1. 1472 * [15:8] -- Base value of freq ratio of bucket 1. 1473 * [20:16] -- +ve delta of number of active cores of bucket 2. 1474 * i.e. active cores of bucket 2 = 1475 * active cores of bucket 1 + delta 1476 * [23:21] -- Negative delta of freq ratio of bucket 2. 1477 * i.e. freq ratio of bucket 2 = 1478 * freq ratio of bucket 1 - delta 1479 * [28:24]-- +ve delta of number of active cores of bucket 3. 1480 * [31:29]-- -ve delta of freq ratio of bucket 3. 1481 * [36:32]-- +ve delta of number of active cores of bucket 4. 1482 * [39:37]-- -ve delta of freq ratio of bucket 4. 1483 * [44:40]-- +ve delta of number of active cores of bucket 5. 1484 * [47:45]-- -ve delta of freq ratio of bucket 5. 1485 * [52:48]-- +ve delta of number of active cores of bucket 6. 1486 * [55:53]-- -ve delta of freq ratio of bucket 6. 1487 * [60:56]-- +ve delta of number of active cores of bucket 7. 1488 * [63:61]-- -ve delta of freq ratio of bucket 7. 1489 */ 1490 1491 b_nr = 0; 1492 cores[b_nr] = (msr & 0xFF) >> 1; 1493 ratio[b_nr] = (msr >> 8) & 0xFF; 1494 1495 for (i = 16; i < 64; i += 8) { 1496 delta_cores = (msr >> i) & 0x1F; 1497 delta_ratio = (msr >> (i + 5)) & 0x7; 1498 1499 cores[b_nr + 1] = cores[b_nr] + delta_cores; 1500 ratio[b_nr + 1] = ratio[b_nr] - delta_ratio; 1501 b_nr++; 1502 } 1503 1504 for (i = buckets_no - 1; i >= 0; i--) 1505 if (i > 0 ? ratio[i] != ratio[i - 1] : 1) 1506 fprintf(outf, 1507 "%d * %.0f = %.0f MHz max turbo %d active cores\n", 1508 ratio[i], bclk, ratio[i] * bclk, cores[i]); 1509 } 1510 1511 static void 1512 dump_nhm_cst_cfg(void) 1513 { 1514 unsigned long long msr; 1515 1516 get_msr(base_cpu, MSR_NHM_SNB_PKG_CST_CFG_CTL, &msr); 1517 1518 #define SNB_C1_AUTO_UNDEMOTE (1UL << 27) 1519 #define SNB_C3_AUTO_UNDEMOTE (1UL << 28) 1520 1521 fprintf(outf, "cpu%d: MSR_NHM_SNB_PKG_CST_CFG_CTL: 0x%08llx", base_cpu, msr); 1522 1523 fprintf(outf, " (%s%s%s%s%slocked: pkg-cstate-limit=%d: %s)\n", 1524 (msr & SNB_C3_AUTO_UNDEMOTE) ? "UNdemote-C3, " : "", 1525 (msr & SNB_C1_AUTO_UNDEMOTE) ? "UNdemote-C1, " : "", 1526 (msr & NHM_C3_AUTO_DEMOTE) ? "demote-C3, " : "", 1527 (msr & NHM_C1_AUTO_DEMOTE) ? "demote-C1, " : "", 1528 (msr & (1 << 15)) ? "" : "UN", 1529 (unsigned int)msr & 0xF, 1530 pkg_cstate_limit_strings[pkg_cstate_limit]); 1531 return; 1532 } 1533 1534 static void 1535 dump_config_tdp(void) 1536 { 1537 unsigned long long msr; 1538 1539 get_msr(base_cpu, MSR_CONFIG_TDP_NOMINAL, &msr); 1540 fprintf(outf, "cpu%d: MSR_CONFIG_TDP_NOMINAL: 0x%08llx", base_cpu, msr); 1541 fprintf(outf, " (base_ratio=%d)\n", (unsigned int)msr & 0xFF); 1542 1543 get_msr(base_cpu, MSR_CONFIG_TDP_LEVEL_1, &msr); 1544 fprintf(outf, "cpu%d: MSR_CONFIG_TDP_LEVEL_1: 0x%08llx (", base_cpu, msr); 1545 if (msr) { 1546 fprintf(outf, "PKG_MIN_PWR_LVL1=%d ", (unsigned int)(msr >> 48) & 0x7FFF); 1547 fprintf(outf, "PKG_MAX_PWR_LVL1=%d ", (unsigned int)(msr >> 32) & 0x7FFF); 1548 fprintf(outf, "LVL1_RATIO=%d ", (unsigned int)(msr >> 16) & 0xFF); 1549 fprintf(outf, "PKG_TDP_LVL1=%d", (unsigned int)(msr) & 0x7FFF); 1550 } 1551 fprintf(outf, ")\n"); 1552 1553 get_msr(base_cpu, MSR_CONFIG_TDP_LEVEL_2, &msr); 1554 fprintf(outf, "cpu%d: MSR_CONFIG_TDP_LEVEL_2: 0x%08llx (", base_cpu, msr); 1555 if (msr) { 1556 fprintf(outf, "PKG_MIN_PWR_LVL2=%d ", (unsigned int)(msr >> 48) & 0x7FFF); 1557 fprintf(outf, "PKG_MAX_PWR_LVL2=%d ", (unsigned int)(msr >> 32) & 0x7FFF); 1558 fprintf(outf, "LVL2_RATIO=%d ", (unsigned int)(msr >> 16) & 0xFF); 1559 fprintf(outf, "PKG_TDP_LVL2=%d", (unsigned int)(msr) & 0x7FFF); 1560 } 1561 fprintf(outf, ")\n"); 1562 1563 get_msr(base_cpu, MSR_CONFIG_TDP_CONTROL, &msr); 1564 fprintf(outf, "cpu%d: MSR_CONFIG_TDP_CONTROL: 0x%08llx (", base_cpu, msr); 1565 if ((msr) & 0x3) 1566 fprintf(outf, "TDP_LEVEL=%d ", (unsigned int)(msr) & 0x3); 1567 fprintf(outf, " lock=%d", (unsigned int)(msr >> 31) & 1); 1568 fprintf(outf, ")\n"); 1569 1570 get_msr(base_cpu, MSR_TURBO_ACTIVATION_RATIO, &msr); 1571 fprintf(outf, "cpu%d: MSR_TURBO_ACTIVATION_RATIO: 0x%08llx (", base_cpu, msr); 1572 fprintf(outf, "MAX_NON_TURBO_RATIO=%d", (unsigned int)(msr) & 0xFF); 1573 fprintf(outf, " lock=%d", (unsigned int)(msr >> 31) & 1); 1574 fprintf(outf, ")\n"); 1575 } 1576 1577 unsigned int irtl_time_units[] = {1, 32, 1024, 32768, 1048576, 33554432, 0, 0 }; 1578 1579 void print_irtl(void) 1580 { 1581 unsigned long long msr; 1582 1583 get_msr(base_cpu, MSR_PKGC3_IRTL, &msr); 1584 fprintf(outf, "cpu%d: MSR_PKGC3_IRTL: 0x%08llx (", base_cpu, msr); 1585 fprintf(outf, "%svalid, %lld ns)\n", msr & (1 << 15) ? "" : "NOT", 1586 (msr & 0x3FF) * irtl_time_units[(msr >> 10) & 0x3]); 1587 1588 get_msr(base_cpu, MSR_PKGC6_IRTL, &msr); 1589 fprintf(outf, "cpu%d: MSR_PKGC6_IRTL: 0x%08llx (", base_cpu, msr); 1590 fprintf(outf, "%svalid, %lld ns)\n", msr & (1 << 15) ? "" : "NOT", 1591 (msr & 0x3FF) * irtl_time_units[(msr >> 10) & 0x3]); 1592 1593 get_msr(base_cpu, MSR_PKGC7_IRTL, &msr); 1594 fprintf(outf, "cpu%d: MSR_PKGC7_IRTL: 0x%08llx (", base_cpu, msr); 1595 fprintf(outf, "%svalid, %lld ns)\n", msr & (1 << 15) ? "" : "NOT", 1596 (msr & 0x3FF) * irtl_time_units[(msr >> 10) & 0x3]); 1597 1598 if (!do_irtl_hsw) 1599 return; 1600 1601 get_msr(base_cpu, MSR_PKGC8_IRTL, &msr); 1602 fprintf(outf, "cpu%d: MSR_PKGC8_IRTL: 0x%08llx (", base_cpu, msr); 1603 fprintf(outf, "%svalid, %lld ns)\n", msr & (1 << 15) ? "" : "NOT", 1604 (msr & 0x3FF) * irtl_time_units[(msr >> 10) & 0x3]); 1605 1606 get_msr(base_cpu, MSR_PKGC9_IRTL, &msr); 1607 fprintf(outf, "cpu%d: MSR_PKGC9_IRTL: 0x%08llx (", base_cpu, msr); 1608 fprintf(outf, "%svalid, %lld ns)\n", msr & (1 << 15) ? "" : "NOT", 1609 (msr & 0x3FF) * irtl_time_units[(msr >> 10) & 0x3]); 1610 1611 get_msr(base_cpu, MSR_PKGC10_IRTL, &msr); 1612 fprintf(outf, "cpu%d: MSR_PKGC10_IRTL: 0x%08llx (", base_cpu, msr); 1613 fprintf(outf, "%svalid, %lld ns)\n", msr & (1 << 15) ? "" : "NOT", 1614 (msr & 0x3FF) * irtl_time_units[(msr >> 10) & 0x3]); 1615 1616 } 1617 void free_fd_percpu(void) 1618 { 1619 int i; 1620 1621 for (i = 0; i < topo.max_cpu_num + 1; ++i) { 1622 if (fd_percpu[i] != 0) 1623 close(fd_percpu[i]); 1624 } 1625 1626 free(fd_percpu); 1627 } 1628 1629 void free_all_buffers(void) 1630 { 1631 CPU_FREE(cpu_present_set); 1632 cpu_present_set = NULL; 1633 cpu_present_setsize = 0; 1634 1635 CPU_FREE(cpu_affinity_set); 1636 cpu_affinity_set = NULL; 1637 cpu_affinity_setsize = 0; 1638 1639 free(thread_even); 1640 free(core_even); 1641 free(package_even); 1642 1643 thread_even = NULL; 1644 core_even = NULL; 1645 package_even = NULL; 1646 1647 free(thread_odd); 1648 free(core_odd); 1649 free(package_odd); 1650 1651 thread_odd = NULL; 1652 core_odd = NULL; 1653 package_odd = NULL; 1654 1655 free(output_buffer); 1656 output_buffer = NULL; 1657 outp = NULL; 1658 1659 free_fd_percpu(); 1660 1661 free(irq_column_2_cpu); 1662 free(irqs_per_cpu); 1663 } 1664 1665 /* 1666 * Open a file, and exit on failure 1667 */ 1668 FILE *fopen_or_die(const char *path, const char *mode) 1669 { 1670 FILE *filep = fopen(path, mode); 1671 if (!filep) 1672 err(1, "%s: open failed", path); 1673 return filep; 1674 } 1675 1676 /* 1677 * Parse a file containing a single int. 1678 */ 1679 int parse_int_file(const char *fmt, ...) 1680 { 1681 va_list args; 1682 char path[PATH_MAX]; 1683 FILE *filep; 1684 int value; 1685 1686 va_start(args, fmt); 1687 vsnprintf(path, sizeof(path), fmt, args); 1688 va_end(args); 1689 filep = fopen_or_die(path, "r"); 1690 if (fscanf(filep, "%d", &value) != 1) 1691 err(1, "%s: failed to parse number from file", path); 1692 fclose(filep); 1693 return value; 1694 } 1695 1696 /* 1697 * get_cpu_position_in_core(cpu) 1698 * return the position of the CPU among its HT siblings in the core 1699 * return -1 if the sibling is not in list 1700 */ 1701 int get_cpu_position_in_core(int cpu) 1702 { 1703 char path[64]; 1704 FILE *filep; 1705 int this_cpu; 1706 char character; 1707 int i; 1708 1709 sprintf(path, 1710 "/sys/devices/system/cpu/cpu%d/topology/thread_siblings_list", 1711 cpu); 1712 filep = fopen(path, "r"); 1713 if (filep == NULL) { 1714 perror(path); 1715 exit(1); 1716 } 1717 1718 for (i = 0; i < topo.num_threads_per_core; i++) { 1719 fscanf(filep, "%d", &this_cpu); 1720 if (this_cpu == cpu) { 1721 fclose(filep); 1722 return i; 1723 } 1724 1725 /* Account for no separator after last thread*/ 1726 if (i != (topo.num_threads_per_core - 1)) 1727 fscanf(filep, "%c", &character); 1728 } 1729 1730 fclose(filep); 1731 return -1; 1732 } 1733 1734 /* 1735 * cpu_is_first_core_in_package(cpu) 1736 * return 1 if given CPU is 1st core in package 1737 */ 1738 int cpu_is_first_core_in_package(int cpu) 1739 { 1740 return cpu == parse_int_file("/sys/devices/system/cpu/cpu%d/topology/core_siblings_list", cpu); 1741 } 1742 1743 int get_physical_package_id(int cpu) 1744 { 1745 return parse_int_file("/sys/devices/system/cpu/cpu%d/topology/physical_package_id", cpu); 1746 } 1747 1748 int get_core_id(int cpu) 1749 { 1750 return parse_int_file("/sys/devices/system/cpu/cpu%d/topology/core_id", cpu); 1751 } 1752 1753 int get_num_ht_siblings(int cpu) 1754 { 1755 char path[80]; 1756 FILE *filep; 1757 int sib1; 1758 int matches = 0; 1759 char character; 1760 char str[100]; 1761 char *ch; 1762 1763 sprintf(path, "/sys/devices/system/cpu/cpu%d/topology/thread_siblings_list", cpu); 1764 filep = fopen_or_die(path, "r"); 1765 1766 /* 1767 * file format: 1768 * A ',' separated or '-' separated set of numbers 1769 * (eg 1-2 or 1,3,4,5) 1770 */ 1771 fscanf(filep, "%d%c\n", &sib1, &character); 1772 fseek(filep, 0, SEEK_SET); 1773 fgets(str, 100, filep); 1774 ch = strchr(str, character); 1775 while (ch != NULL) { 1776 matches++; 1777 ch = strchr(ch+1, character); 1778 } 1779 1780 fclose(filep); 1781 return matches+1; 1782 } 1783 1784 /* 1785 * run func(thread, core, package) in topology order 1786 * skip non-present cpus 1787 */ 1788 1789 int for_all_cpus_2(int (func)(struct thread_data *, struct core_data *, 1790 struct pkg_data *, struct thread_data *, struct core_data *, 1791 struct pkg_data *), struct thread_data *thread_base, 1792 struct core_data *core_base, struct pkg_data *pkg_base, 1793 struct thread_data *thread_base2, struct core_data *core_base2, 1794 struct pkg_data *pkg_base2) 1795 { 1796 int retval, pkg_no, core_no, thread_no; 1797 1798 for (pkg_no = 0; pkg_no < topo.num_packages; ++pkg_no) { 1799 for (core_no = 0; core_no < topo.num_cores_per_pkg; ++core_no) { 1800 for (thread_no = 0; thread_no < 1801 topo.num_threads_per_core; ++thread_no) { 1802 struct thread_data *t, *t2; 1803 struct core_data *c, *c2; 1804 struct pkg_data *p, *p2; 1805 1806 t = GET_THREAD(thread_base, thread_no, core_no, pkg_no); 1807 1808 if (cpu_is_not_present(t->cpu_id)) 1809 continue; 1810 1811 t2 = GET_THREAD(thread_base2, thread_no, core_no, pkg_no); 1812 1813 c = GET_CORE(core_base, core_no, pkg_no); 1814 c2 = GET_CORE(core_base2, core_no, pkg_no); 1815 1816 p = GET_PKG(pkg_base, pkg_no); 1817 p2 = GET_PKG(pkg_base2, pkg_no); 1818 1819 retval = func(t, c, p, t2, c2, p2); 1820 if (retval) 1821 return retval; 1822 } 1823 } 1824 } 1825 return 0; 1826 } 1827 1828 /* 1829 * run func(cpu) on every cpu in /proc/stat 1830 * return max_cpu number 1831 */ 1832 int for_all_proc_cpus(int (func)(int)) 1833 { 1834 FILE *fp; 1835 int cpu_num; 1836 int retval; 1837 1838 fp = fopen_or_die(proc_stat, "r"); 1839 1840 retval = fscanf(fp, "cpu %*d %*d %*d %*d %*d %*d %*d %*d %*d %*d\n"); 1841 if (retval != 0) 1842 err(1, "%s: failed to parse format", proc_stat); 1843 1844 while (1) { 1845 retval = fscanf(fp, "cpu%u %*d %*d %*d %*d %*d %*d %*d %*d %*d %*d\n", &cpu_num); 1846 if (retval != 1) 1847 break; 1848 1849 retval = func(cpu_num); 1850 if (retval) { 1851 fclose(fp); 1852 return(retval); 1853 } 1854 } 1855 fclose(fp); 1856 return 0; 1857 } 1858 1859 void re_initialize(void) 1860 { 1861 free_all_buffers(); 1862 setup_all_buffers(); 1863 printf("turbostat: re-initialized with num_cpus %d\n", topo.num_cpus); 1864 } 1865 1866 1867 /* 1868 * count_cpus() 1869 * remember the last one seen, it will be the max 1870 */ 1871 int count_cpus(int cpu) 1872 { 1873 if (topo.max_cpu_num < cpu) 1874 topo.max_cpu_num = cpu; 1875 1876 topo.num_cpus += 1; 1877 return 0; 1878 } 1879 int mark_cpu_present(int cpu) 1880 { 1881 CPU_SET_S(cpu, cpu_present_setsize, cpu_present_set); 1882 return 0; 1883 } 1884 1885 /* 1886 * snapshot_proc_interrupts() 1887 * 1888 * read and record summary of /proc/interrupts 1889 * 1890 * return 1 if config change requires a restart, else return 0 1891 */ 1892 int snapshot_proc_interrupts(void) 1893 { 1894 static FILE *fp; 1895 int column, retval; 1896 1897 if (fp == NULL) 1898 fp = fopen_or_die("/proc/interrupts", "r"); 1899 else 1900 rewind(fp); 1901 1902 /* read 1st line of /proc/interrupts to get cpu* name for each column */ 1903 for (column = 0; column < topo.num_cpus; ++column) { 1904 int cpu_number; 1905 1906 retval = fscanf(fp, " CPU%d", &cpu_number); 1907 if (retval != 1) 1908 break; 1909 1910 if (cpu_number > topo.max_cpu_num) { 1911 warn("/proc/interrupts: cpu%d: > %d", cpu_number, topo.max_cpu_num); 1912 return 1; 1913 } 1914 1915 irq_column_2_cpu[column] = cpu_number; 1916 irqs_per_cpu[cpu_number] = 0; 1917 } 1918 1919 /* read /proc/interrupt count lines and sum up irqs per cpu */ 1920 while (1) { 1921 int column; 1922 char buf[64]; 1923 1924 retval = fscanf(fp, " %s:", buf); /* flush irq# "N:" */ 1925 if (retval != 1) 1926 break; 1927 1928 /* read the count per cpu */ 1929 for (column = 0; column < topo.num_cpus; ++column) { 1930 1931 int cpu_number, irq_count; 1932 1933 retval = fscanf(fp, " %d", &irq_count); 1934 if (retval != 1) 1935 break; 1936 1937 cpu_number = irq_column_2_cpu[column]; 1938 irqs_per_cpu[cpu_number] += irq_count; 1939 1940 } 1941 1942 while (getc(fp) != '\n') 1943 ; /* flush interrupt description */ 1944 1945 } 1946 return 0; 1947 } 1948 /* 1949 * snapshot_gfx_rc6_ms() 1950 * 1951 * record snapshot of 1952 * /sys/class/drm/card0/power/rc6_residency_ms 1953 * 1954 * return 1 if config change requires a restart, else return 0 1955 */ 1956 int snapshot_gfx_rc6_ms(void) 1957 { 1958 FILE *fp; 1959 int retval; 1960 1961 fp = fopen_or_die("/sys/class/drm/card0/power/rc6_residency_ms", "r"); 1962 1963 retval = fscanf(fp, "%lld", &gfx_cur_rc6_ms); 1964 if (retval != 1) 1965 err(1, "GFX rc6"); 1966 1967 fclose(fp); 1968 1969 return 0; 1970 } 1971 /* 1972 * snapshot_gfx_mhz() 1973 * 1974 * record snapshot of 1975 * /sys/class/graphics/fb0/device/drm/card0/gt_cur_freq_mhz 1976 * 1977 * return 1 if config change requires a restart, else return 0 1978 */ 1979 int snapshot_gfx_mhz(void) 1980 { 1981 static FILE *fp; 1982 int retval; 1983 1984 if (fp == NULL) 1985 fp = fopen_or_die("/sys/class/graphics/fb0/device/drm/card0/gt_cur_freq_mhz", "r"); 1986 else 1987 rewind(fp); 1988 1989 retval = fscanf(fp, "%d", &gfx_cur_mhz); 1990 if (retval != 1) 1991 err(1, "GFX MHz"); 1992 1993 return 0; 1994 } 1995 1996 /* 1997 * snapshot /proc and /sys files 1998 * 1999 * return 1 if configuration restart needed, else return 0 2000 */ 2001 int snapshot_proc_sysfs_files(void) 2002 { 2003 if (snapshot_proc_interrupts()) 2004 return 1; 2005 2006 if (do_gfx_rc6_ms) 2007 snapshot_gfx_rc6_ms(); 2008 2009 if (do_gfx_mhz) 2010 snapshot_gfx_mhz(); 2011 2012 return 0; 2013 } 2014 2015 void turbostat_loop() 2016 { 2017 int retval; 2018 int restarted = 0; 2019 2020 restart: 2021 restarted++; 2022 2023 snapshot_proc_sysfs_files(); 2024 retval = for_all_cpus(get_counters, EVEN_COUNTERS); 2025 if (retval < -1) { 2026 exit(retval); 2027 } else if (retval == -1) { 2028 if (restarted > 1) { 2029 exit(retval); 2030 } 2031 re_initialize(); 2032 goto restart; 2033 } 2034 restarted = 0; 2035 gettimeofday(&tv_even, (struct timezone *)NULL); 2036 2037 while (1) { 2038 if (for_all_proc_cpus(cpu_is_not_present)) { 2039 re_initialize(); 2040 goto restart; 2041 } 2042 nanosleep(&interval_ts, NULL); 2043 if (snapshot_proc_sysfs_files()) 2044 goto restart; 2045 retval = for_all_cpus(get_counters, ODD_COUNTERS); 2046 if (retval < -1) { 2047 exit(retval); 2048 } else if (retval == -1) { 2049 re_initialize(); 2050 goto restart; 2051 } 2052 gettimeofday(&tv_odd, (struct timezone *)NULL); 2053 timersub(&tv_odd, &tv_even, &tv_delta); 2054 if (for_all_cpus_2(delta_cpu, ODD_COUNTERS, EVEN_COUNTERS)) { 2055 re_initialize(); 2056 goto restart; 2057 } 2058 compute_average(EVEN_COUNTERS); 2059 format_all_counters(EVEN_COUNTERS); 2060 flush_output_stdout(); 2061 nanosleep(&interval_ts, NULL); 2062 if (snapshot_proc_sysfs_files()) 2063 goto restart; 2064 retval = for_all_cpus(get_counters, EVEN_COUNTERS); 2065 if (retval < -1) { 2066 exit(retval); 2067 } else if (retval == -1) { 2068 re_initialize(); 2069 goto restart; 2070 } 2071 gettimeofday(&tv_even, (struct timezone *)NULL); 2072 timersub(&tv_even, &tv_odd, &tv_delta); 2073 if (for_all_cpus_2(delta_cpu, EVEN_COUNTERS, ODD_COUNTERS)) { 2074 re_initialize(); 2075 goto restart; 2076 } 2077 compute_average(ODD_COUNTERS); 2078 format_all_counters(ODD_COUNTERS); 2079 flush_output_stdout(); 2080 } 2081 } 2082 2083 void check_dev_msr() 2084 { 2085 struct stat sb; 2086 char pathname[32]; 2087 2088 sprintf(pathname, "/dev/cpu/%d/msr", base_cpu); 2089 if (stat(pathname, &sb)) 2090 if (system("/sbin/modprobe msr > /dev/null 2>&1")) 2091 err(-5, "no /dev/cpu/0/msr, Try \"# modprobe msr\" "); 2092 } 2093 2094 void check_permissions() 2095 { 2096 struct __user_cap_header_struct cap_header_data; 2097 cap_user_header_t cap_header = &cap_header_data; 2098 struct __user_cap_data_struct cap_data_data; 2099 cap_user_data_t cap_data = &cap_data_data; 2100 extern int capget(cap_user_header_t hdrp, cap_user_data_t datap); 2101 int do_exit = 0; 2102 char pathname[32]; 2103 2104 /* check for CAP_SYS_RAWIO */ 2105 cap_header->pid = getpid(); 2106 cap_header->version = _LINUX_CAPABILITY_VERSION; 2107 if (capget(cap_header, cap_data) < 0) 2108 err(-6, "capget(2) failed"); 2109 2110 if ((cap_data->effective & (1 << CAP_SYS_RAWIO)) == 0) { 2111 do_exit++; 2112 warnx("capget(CAP_SYS_RAWIO) failed," 2113 " try \"# setcap cap_sys_rawio=ep %s\"", progname); 2114 } 2115 2116 /* test file permissions */ 2117 sprintf(pathname, "/dev/cpu/%d/msr", base_cpu); 2118 if (euidaccess(pathname, R_OK)) { 2119 do_exit++; 2120 warn("/dev/cpu/0/msr open failed, try chown or chmod +r /dev/cpu/*/msr"); 2121 } 2122 2123 /* if all else fails, thell them to be root */ 2124 if (do_exit) 2125 if (getuid() != 0) 2126 warnx("... or simply run as root"); 2127 2128 if (do_exit) 2129 exit(-6); 2130 } 2131 2132 /* 2133 * NHM adds support for additional MSRs: 2134 * 2135 * MSR_SMI_COUNT 0x00000034 2136 * 2137 * MSR_PLATFORM_INFO 0x000000ce 2138 * MSR_NHM_SNB_PKG_CST_CFG_CTL 0x000000e2 2139 * 2140 * MSR_PKG_C3_RESIDENCY 0x000003f8 2141 * MSR_PKG_C6_RESIDENCY 0x000003f9 2142 * MSR_CORE_C3_RESIDENCY 0x000003fc 2143 * MSR_CORE_C6_RESIDENCY 0x000003fd 2144 * 2145 * Side effect: 2146 * sets global pkg_cstate_limit to decode MSR_NHM_SNB_PKG_CST_CFG_CTL 2147 */ 2148 int probe_nhm_msrs(unsigned int family, unsigned int model) 2149 { 2150 unsigned long long msr; 2151 unsigned int base_ratio; 2152 int *pkg_cstate_limits; 2153 2154 if (!genuine_intel) 2155 return 0; 2156 2157 if (family != 6) 2158 return 0; 2159 2160 bclk = discover_bclk(family, model); 2161 2162 switch (model) { 2163 case 0x1A: /* Core i7, Xeon 5500 series - Bloomfield, Gainstown NHM-EP */ 2164 case 0x1E: /* Core i7 and i5 Processor - Clarksfield, Lynnfield, Jasper Forest */ 2165 case 0x1F: /* Core i7 and i5 Processor - Nehalem */ 2166 case 0x25: /* Westmere Client - Clarkdale, Arrandale */ 2167 case 0x2C: /* Westmere EP - Gulftown */ 2168 case 0x2E: /* Nehalem-EX Xeon - Beckton */ 2169 case 0x2F: /* Westmere-EX Xeon - Eagleton */ 2170 pkg_cstate_limits = nhm_pkg_cstate_limits; 2171 break; 2172 case 0x2A: /* SNB */ 2173 case 0x2D: /* SNB Xeon */ 2174 case 0x3A: /* IVB */ 2175 case 0x3E: /* IVB Xeon */ 2176 pkg_cstate_limits = snb_pkg_cstate_limits; 2177 break; 2178 case 0x3C: /* HSW */ 2179 case 0x3F: /* HSX */ 2180 case 0x45: /* HSW */ 2181 case 0x46: /* HSW */ 2182 case 0x3D: /* BDW */ 2183 case 0x47: /* BDW */ 2184 case 0x4F: /* BDX */ 2185 case 0x56: /* BDX-DE */ 2186 case 0x4E: /* SKL */ 2187 case 0x5E: /* SKL */ 2188 case 0x8E: /* KBL */ 2189 case 0x9E: /* KBL */ 2190 case 0x55: /* SKX */ 2191 pkg_cstate_limits = hsw_pkg_cstate_limits; 2192 break; 2193 case 0x37: /* BYT */ 2194 case 0x4D: /* AVN */ 2195 pkg_cstate_limits = slv_pkg_cstate_limits; 2196 break; 2197 case 0x4C: /* AMT */ 2198 pkg_cstate_limits = amt_pkg_cstate_limits; 2199 break; 2200 case 0x57: /* PHI */ 2201 pkg_cstate_limits = phi_pkg_cstate_limits; 2202 break; 2203 case 0x5C: /* BXT */ 2204 pkg_cstate_limits = bxt_pkg_cstate_limits; 2205 break; 2206 default: 2207 return 0; 2208 } 2209 get_msr(base_cpu, MSR_NHM_SNB_PKG_CST_CFG_CTL, &msr); 2210 pkg_cstate_limit = pkg_cstate_limits[msr & 0xF]; 2211 2212 get_msr(base_cpu, MSR_PLATFORM_INFO, &msr); 2213 base_ratio = (msr >> 8) & 0xFF; 2214 2215 base_hz = base_ratio * bclk * 1000000; 2216 has_base_hz = 1; 2217 return 1; 2218 } 2219 int has_nhm_turbo_ratio_limit(unsigned int family, unsigned int model) 2220 { 2221 switch (model) { 2222 /* Nehalem compatible, but do not include turbo-ratio limit support */ 2223 case 0x2E: /* Nehalem-EX Xeon - Beckton */ 2224 case 0x2F: /* Westmere-EX Xeon - Eagleton */ 2225 case 0x57: /* PHI - Knights Landing (different MSR definition) */ 2226 return 0; 2227 default: 2228 return 1; 2229 } 2230 } 2231 int has_ivt_turbo_ratio_limit(unsigned int family, unsigned int model) 2232 { 2233 if (!genuine_intel) 2234 return 0; 2235 2236 if (family != 6) 2237 return 0; 2238 2239 switch (model) { 2240 case 0x3E: /* IVB Xeon */ 2241 case 0x3F: /* HSW Xeon */ 2242 return 1; 2243 default: 2244 return 0; 2245 } 2246 } 2247 int has_hsw_turbo_ratio_limit(unsigned int family, unsigned int model) 2248 { 2249 if (!genuine_intel) 2250 return 0; 2251 2252 if (family != 6) 2253 return 0; 2254 2255 switch (model) { 2256 case 0x3F: /* HSW Xeon */ 2257 return 1; 2258 default: 2259 return 0; 2260 } 2261 } 2262 2263 int has_knl_turbo_ratio_limit(unsigned int family, unsigned int model) 2264 { 2265 if (!genuine_intel) 2266 return 0; 2267 2268 if (family != 6) 2269 return 0; 2270 2271 switch (model) { 2272 case 0x57: /* Knights Landing */ 2273 return 1; 2274 default: 2275 return 0; 2276 } 2277 } 2278 int has_config_tdp(unsigned int family, unsigned int model) 2279 { 2280 if (!genuine_intel) 2281 return 0; 2282 2283 if (family != 6) 2284 return 0; 2285 2286 switch (model) { 2287 case 0x3A: /* IVB */ 2288 case 0x3C: /* HSW */ 2289 case 0x3F: /* HSX */ 2290 case 0x45: /* HSW */ 2291 case 0x46: /* HSW */ 2292 case 0x3D: /* BDW */ 2293 case 0x47: /* BDW */ 2294 case 0x4F: /* BDX */ 2295 case 0x56: /* BDX-DE */ 2296 case 0x4E: /* SKL */ 2297 case 0x5E: /* SKL */ 2298 case 0x8E: /* KBL */ 2299 case 0x9E: /* KBL */ 2300 case 0x55: /* SKX */ 2301 2302 case 0x57: /* Knights Landing */ 2303 return 1; 2304 default: 2305 return 0; 2306 } 2307 } 2308 2309 static void 2310 dump_cstate_pstate_config_info(unsigned int family, unsigned int model) 2311 { 2312 if (!do_nhm_platform_info) 2313 return; 2314 2315 dump_nhm_platform_info(); 2316 2317 if (has_hsw_turbo_ratio_limit(family, model)) 2318 dump_hsw_turbo_ratio_limits(); 2319 2320 if (has_ivt_turbo_ratio_limit(family, model)) 2321 dump_ivt_turbo_ratio_limits(); 2322 2323 if (has_nhm_turbo_ratio_limit(family, model)) 2324 dump_nhm_turbo_ratio_limits(); 2325 2326 if (has_knl_turbo_ratio_limit(family, model)) 2327 dump_knl_turbo_ratio_limits(); 2328 2329 if (has_config_tdp(family, model)) 2330 dump_config_tdp(); 2331 2332 dump_nhm_cst_cfg(); 2333 } 2334 2335 2336 /* 2337 * print_epb() 2338 * Decode the ENERGY_PERF_BIAS MSR 2339 */ 2340 int print_epb(struct thread_data *t, struct core_data *c, struct pkg_data *p) 2341 { 2342 unsigned long long msr; 2343 char *epb_string; 2344 int cpu; 2345 2346 if (!has_epb) 2347 return 0; 2348 2349 cpu = t->cpu_id; 2350 2351 /* EPB is per-package */ 2352 if (!(t->flags & CPU_IS_FIRST_THREAD_IN_CORE) || !(t->flags & CPU_IS_FIRST_CORE_IN_PACKAGE)) 2353 return 0; 2354 2355 if (cpu_migrate(cpu)) { 2356 fprintf(outf, "Could not migrate to CPU %d\n", cpu); 2357 return -1; 2358 } 2359 2360 if (get_msr(cpu, MSR_IA32_ENERGY_PERF_BIAS, &msr)) 2361 return 0; 2362 2363 switch (msr & 0xF) { 2364 case ENERGY_PERF_BIAS_PERFORMANCE: 2365 epb_string = "performance"; 2366 break; 2367 case ENERGY_PERF_BIAS_NORMAL: 2368 epb_string = "balanced"; 2369 break; 2370 case ENERGY_PERF_BIAS_POWERSAVE: 2371 epb_string = "powersave"; 2372 break; 2373 default: 2374 epb_string = "custom"; 2375 break; 2376 } 2377 fprintf(outf, "cpu%d: MSR_IA32_ENERGY_PERF_BIAS: 0x%08llx (%s)\n", cpu, msr, epb_string); 2378 2379 return 0; 2380 } 2381 /* 2382 * print_hwp() 2383 * Decode the MSR_HWP_CAPABILITIES 2384 */ 2385 int print_hwp(struct thread_data *t, struct core_data *c, struct pkg_data *p) 2386 { 2387 unsigned long long msr; 2388 int cpu; 2389 2390 if (!has_hwp) 2391 return 0; 2392 2393 cpu = t->cpu_id; 2394 2395 /* MSR_HWP_CAPABILITIES is per-package */ 2396 if (!(t->flags & CPU_IS_FIRST_THREAD_IN_CORE) || !(t->flags & CPU_IS_FIRST_CORE_IN_PACKAGE)) 2397 return 0; 2398 2399 if (cpu_migrate(cpu)) { 2400 fprintf(outf, "Could not migrate to CPU %d\n", cpu); 2401 return -1; 2402 } 2403 2404 if (get_msr(cpu, MSR_PM_ENABLE, &msr)) 2405 return 0; 2406 2407 fprintf(outf, "cpu%d: MSR_PM_ENABLE: 0x%08llx (%sHWP)\n", 2408 cpu, msr, (msr & (1 << 0)) ? "" : "No-"); 2409 2410 /* MSR_PM_ENABLE[1] == 1 if HWP is enabled and MSRs visible */ 2411 if ((msr & (1 << 0)) == 0) 2412 return 0; 2413 2414 if (get_msr(cpu, MSR_HWP_CAPABILITIES, &msr)) 2415 return 0; 2416 2417 fprintf(outf, "cpu%d: MSR_HWP_CAPABILITIES: 0x%08llx " 2418 "(high 0x%x guar 0x%x eff 0x%x low 0x%x)\n", 2419 cpu, msr, 2420 (unsigned int)HWP_HIGHEST_PERF(msr), 2421 (unsigned int)HWP_GUARANTEED_PERF(msr), 2422 (unsigned int)HWP_MOSTEFFICIENT_PERF(msr), 2423 (unsigned int)HWP_LOWEST_PERF(msr)); 2424 2425 if (get_msr(cpu, MSR_HWP_REQUEST, &msr)) 2426 return 0; 2427 2428 fprintf(outf, "cpu%d: MSR_HWP_REQUEST: 0x%08llx " 2429 "(min 0x%x max 0x%x des 0x%x epp 0x%x window 0x%x pkg 0x%x)\n", 2430 cpu, msr, 2431 (unsigned int)(((msr) >> 0) & 0xff), 2432 (unsigned int)(((msr) >> 8) & 0xff), 2433 (unsigned int)(((msr) >> 16) & 0xff), 2434 (unsigned int)(((msr) >> 24) & 0xff), 2435 (unsigned int)(((msr) >> 32) & 0xff3), 2436 (unsigned int)(((msr) >> 42) & 0x1)); 2437 2438 if (has_hwp_pkg) { 2439 if (get_msr(cpu, MSR_HWP_REQUEST_PKG, &msr)) 2440 return 0; 2441 2442 fprintf(outf, "cpu%d: MSR_HWP_REQUEST_PKG: 0x%08llx " 2443 "(min 0x%x max 0x%x des 0x%x epp 0x%x window 0x%x)\n", 2444 cpu, msr, 2445 (unsigned int)(((msr) >> 0) & 0xff), 2446 (unsigned int)(((msr) >> 8) & 0xff), 2447 (unsigned int)(((msr) >> 16) & 0xff), 2448 (unsigned int)(((msr) >> 24) & 0xff), 2449 (unsigned int)(((msr) >> 32) & 0xff3)); 2450 } 2451 if (has_hwp_notify) { 2452 if (get_msr(cpu, MSR_HWP_INTERRUPT, &msr)) 2453 return 0; 2454 2455 fprintf(outf, "cpu%d: MSR_HWP_INTERRUPT: 0x%08llx " 2456 "(%s_Guaranteed_Perf_Change, %s_Excursion_Min)\n", 2457 cpu, msr, 2458 ((msr) & 0x1) ? "EN" : "Dis", 2459 ((msr) & 0x2) ? "EN" : "Dis"); 2460 } 2461 if (get_msr(cpu, MSR_HWP_STATUS, &msr)) 2462 return 0; 2463 2464 fprintf(outf, "cpu%d: MSR_HWP_STATUS: 0x%08llx " 2465 "(%sGuaranteed_Perf_Change, %sExcursion_Min)\n", 2466 cpu, msr, 2467 ((msr) & 0x1) ? "" : "No-", 2468 ((msr) & 0x2) ? "" : "No-"); 2469 2470 return 0; 2471 } 2472 2473 /* 2474 * print_perf_limit() 2475 */ 2476 int print_perf_limit(struct thread_data *t, struct core_data *c, struct pkg_data *p) 2477 { 2478 unsigned long long msr; 2479 int cpu; 2480 2481 cpu = t->cpu_id; 2482 2483 /* per-package */ 2484 if (!(t->flags & CPU_IS_FIRST_THREAD_IN_CORE) || !(t->flags & CPU_IS_FIRST_CORE_IN_PACKAGE)) 2485 return 0; 2486 2487 if (cpu_migrate(cpu)) { 2488 fprintf(outf, "Could not migrate to CPU %d\n", cpu); 2489 return -1; 2490 } 2491 2492 if (do_core_perf_limit_reasons) { 2493 get_msr(cpu, MSR_CORE_PERF_LIMIT_REASONS, &msr); 2494 fprintf(outf, "cpu%d: MSR_CORE_PERF_LIMIT_REASONS, 0x%08llx", cpu, msr); 2495 fprintf(outf, " (Active: %s%s%s%s%s%s%s%s%s%s%s%s%s%s)", 2496 (msr & 1 << 15) ? "bit15, " : "", 2497 (msr & 1 << 14) ? "bit14, " : "", 2498 (msr & 1 << 13) ? "Transitions, " : "", 2499 (msr & 1 << 12) ? "MultiCoreTurbo, " : "", 2500 (msr & 1 << 11) ? "PkgPwrL2, " : "", 2501 (msr & 1 << 10) ? "PkgPwrL1, " : "", 2502 (msr & 1 << 9) ? "CorePwr, " : "", 2503 (msr & 1 << 8) ? "Amps, " : "", 2504 (msr & 1 << 6) ? "VR-Therm, " : "", 2505 (msr & 1 << 5) ? "Auto-HWP, " : "", 2506 (msr & 1 << 4) ? "Graphics, " : "", 2507 (msr & 1 << 2) ? "bit2, " : "", 2508 (msr & 1 << 1) ? "ThermStatus, " : "", 2509 (msr & 1 << 0) ? "PROCHOT, " : ""); 2510 fprintf(outf, " (Logged: %s%s%s%s%s%s%s%s%s%s%s%s%s%s)\n", 2511 (msr & 1 << 31) ? "bit31, " : "", 2512 (msr & 1 << 30) ? "bit30, " : "", 2513 (msr & 1 << 29) ? "Transitions, " : "", 2514 (msr & 1 << 28) ? "MultiCoreTurbo, " : "", 2515 (msr & 1 << 27) ? "PkgPwrL2, " : "", 2516 (msr & 1 << 26) ? "PkgPwrL1, " : "", 2517 (msr & 1 << 25) ? "CorePwr, " : "", 2518 (msr & 1 << 24) ? "Amps, " : "", 2519 (msr & 1 << 22) ? "VR-Therm, " : "", 2520 (msr & 1 << 21) ? "Auto-HWP, " : "", 2521 (msr & 1 << 20) ? "Graphics, " : "", 2522 (msr & 1 << 18) ? "bit18, " : "", 2523 (msr & 1 << 17) ? "ThermStatus, " : "", 2524 (msr & 1 << 16) ? "PROCHOT, " : ""); 2525 2526 } 2527 if (do_gfx_perf_limit_reasons) { 2528 get_msr(cpu, MSR_GFX_PERF_LIMIT_REASONS, &msr); 2529 fprintf(outf, "cpu%d: MSR_GFX_PERF_LIMIT_REASONS, 0x%08llx", cpu, msr); 2530 fprintf(outf, " (Active: %s%s%s%s%s%s%s%s)", 2531 (msr & 1 << 0) ? "PROCHOT, " : "", 2532 (msr & 1 << 1) ? "ThermStatus, " : "", 2533 (msr & 1 << 4) ? "Graphics, " : "", 2534 (msr & 1 << 6) ? "VR-Therm, " : "", 2535 (msr & 1 << 8) ? "Amps, " : "", 2536 (msr & 1 << 9) ? "GFXPwr, " : "", 2537 (msr & 1 << 10) ? "PkgPwrL1, " : "", 2538 (msr & 1 << 11) ? "PkgPwrL2, " : ""); 2539 fprintf(outf, " (Logged: %s%s%s%s%s%s%s%s)\n", 2540 (msr & 1 << 16) ? "PROCHOT, " : "", 2541 (msr & 1 << 17) ? "ThermStatus, " : "", 2542 (msr & 1 << 20) ? "Graphics, " : "", 2543 (msr & 1 << 22) ? "VR-Therm, " : "", 2544 (msr & 1 << 24) ? "Amps, " : "", 2545 (msr & 1 << 25) ? "GFXPwr, " : "", 2546 (msr & 1 << 26) ? "PkgPwrL1, " : "", 2547 (msr & 1 << 27) ? "PkgPwrL2, " : ""); 2548 } 2549 if (do_ring_perf_limit_reasons) { 2550 get_msr(cpu, MSR_RING_PERF_LIMIT_REASONS, &msr); 2551 fprintf(outf, "cpu%d: MSR_RING_PERF_LIMIT_REASONS, 0x%08llx", cpu, msr); 2552 fprintf(outf, " (Active: %s%s%s%s%s%s)", 2553 (msr & 1 << 0) ? "PROCHOT, " : "", 2554 (msr & 1 << 1) ? "ThermStatus, " : "", 2555 (msr & 1 << 6) ? "VR-Therm, " : "", 2556 (msr & 1 << 8) ? "Amps, " : "", 2557 (msr & 1 << 10) ? "PkgPwrL1, " : "", 2558 (msr & 1 << 11) ? "PkgPwrL2, " : ""); 2559 fprintf(outf, " (Logged: %s%s%s%s%s%s)\n", 2560 (msr & 1 << 16) ? "PROCHOT, " : "", 2561 (msr & 1 << 17) ? "ThermStatus, " : "", 2562 (msr & 1 << 22) ? "VR-Therm, " : "", 2563 (msr & 1 << 24) ? "Amps, " : "", 2564 (msr & 1 << 26) ? "PkgPwrL1, " : "", 2565 (msr & 1 << 27) ? "PkgPwrL2, " : ""); 2566 } 2567 return 0; 2568 } 2569 2570 #define RAPL_POWER_GRANULARITY 0x7FFF /* 15 bit power granularity */ 2571 #define RAPL_TIME_GRANULARITY 0x3F /* 6 bit time granularity */ 2572 2573 double get_tdp(unsigned int model) 2574 { 2575 unsigned long long msr; 2576 2577 if (do_rapl & RAPL_PKG_POWER_INFO) 2578 if (!get_msr(base_cpu, MSR_PKG_POWER_INFO, &msr)) 2579 return ((msr >> 0) & RAPL_POWER_GRANULARITY) * rapl_power_units; 2580 2581 switch (model) { 2582 case 0x37: 2583 case 0x4D: 2584 return 30.0; 2585 default: 2586 return 135.0; 2587 } 2588 } 2589 2590 /* 2591 * rapl_dram_energy_units_probe() 2592 * Energy units are either hard-coded, or come from RAPL Energy Unit MSR. 2593 */ 2594 static double 2595 rapl_dram_energy_units_probe(int model, double rapl_energy_units) 2596 { 2597 /* only called for genuine_intel, family 6 */ 2598 2599 switch (model) { 2600 case 0x3F: /* HSX */ 2601 case 0x4F: /* BDX */ 2602 case 0x56: /* BDX-DE */ 2603 case 0x57: /* KNL */ 2604 return (rapl_dram_energy_units = 15.3 / 1000000); 2605 default: 2606 return (rapl_energy_units); 2607 } 2608 } 2609 2610 2611 /* 2612 * rapl_probe() 2613 * 2614 * sets do_rapl, rapl_power_units, rapl_energy_units, rapl_time_units 2615 */ 2616 void rapl_probe(unsigned int family, unsigned int model) 2617 { 2618 unsigned long long msr; 2619 unsigned int time_unit; 2620 double tdp; 2621 2622 if (!genuine_intel) 2623 return; 2624 2625 if (family != 6) 2626 return; 2627 2628 switch (model) { 2629 case 0x2A: 2630 case 0x3A: 2631 case 0x3C: /* HSW */ 2632 case 0x45: /* HSW */ 2633 case 0x46: /* HSW */ 2634 case 0x3D: /* BDW */ 2635 case 0x47: /* BDW */ 2636 do_rapl = RAPL_PKG | RAPL_CORES | RAPL_CORE_POLICY | RAPL_GFX | RAPL_PKG_POWER_INFO; 2637 break; 2638 case 0x5C: /* BXT */ 2639 do_rapl = RAPL_PKG | RAPL_PKG_POWER_INFO; 2640 break; 2641 case 0x4E: /* SKL */ 2642 case 0x5E: /* SKL */ 2643 case 0x8E: /* KBL */ 2644 case 0x9E: /* KBL */ 2645 do_rapl = RAPL_PKG | RAPL_DRAM | RAPL_DRAM_PERF_STATUS | RAPL_PKG_PERF_STATUS | RAPL_PKG_POWER_INFO; 2646 break; 2647 case 0x3F: /* HSX */ 2648 case 0x4F: /* BDX */ 2649 case 0x56: /* BDX-DE */ 2650 case 0x55: /* SKX */ 2651 case 0x57: /* KNL */ 2652 do_rapl = RAPL_PKG | RAPL_DRAM | RAPL_DRAM_POWER_INFO | RAPL_DRAM_PERF_STATUS | RAPL_PKG_PERF_STATUS | RAPL_PKG_POWER_INFO; 2653 break; 2654 case 0x2D: 2655 case 0x3E: 2656 do_rapl = RAPL_PKG | RAPL_CORES | RAPL_CORE_POLICY | RAPL_DRAM | RAPL_DRAM_POWER_INFO | RAPL_PKG_PERF_STATUS | RAPL_DRAM_PERF_STATUS | RAPL_PKG_POWER_INFO; 2657 break; 2658 case 0x37: /* BYT */ 2659 case 0x4D: /* AVN */ 2660 do_rapl = RAPL_PKG | RAPL_CORES ; 2661 break; 2662 default: 2663 return; 2664 } 2665 2666 /* units on package 0, verify later other packages match */ 2667 if (get_msr(base_cpu, MSR_RAPL_POWER_UNIT, &msr)) 2668 return; 2669 2670 rapl_power_units = 1.0 / (1 << (msr & 0xF)); 2671 if (model == 0x37) 2672 rapl_energy_units = 1.0 * (1 << (msr >> 8 & 0x1F)) / 1000000; 2673 else 2674 rapl_energy_units = 1.0 / (1 << (msr >> 8 & 0x1F)); 2675 2676 rapl_dram_energy_units = rapl_dram_energy_units_probe(model, rapl_energy_units); 2677 2678 time_unit = msr >> 16 & 0xF; 2679 if (time_unit == 0) 2680 time_unit = 0xA; 2681 2682 rapl_time_units = 1.0 / (1 << (time_unit)); 2683 2684 tdp = get_tdp(model); 2685 2686 rapl_joule_counter_range = 0xFFFFFFFF * rapl_energy_units / tdp; 2687 if (debug) 2688 fprintf(outf, "RAPL: %.0f sec. Joule Counter Range, at %.0f Watts\n", rapl_joule_counter_range, tdp); 2689 2690 return; 2691 } 2692 2693 void perf_limit_reasons_probe(unsigned int family, unsigned int model) 2694 { 2695 if (!genuine_intel) 2696 return; 2697 2698 if (family != 6) 2699 return; 2700 2701 switch (model) { 2702 case 0x3C: /* HSW */ 2703 case 0x45: /* HSW */ 2704 case 0x46: /* HSW */ 2705 do_gfx_perf_limit_reasons = 1; 2706 case 0x3F: /* HSX */ 2707 do_core_perf_limit_reasons = 1; 2708 do_ring_perf_limit_reasons = 1; 2709 default: 2710 return; 2711 } 2712 } 2713 2714 int print_thermal(struct thread_data *t, struct core_data *c, struct pkg_data *p) 2715 { 2716 unsigned long long msr; 2717 unsigned int dts; 2718 int cpu; 2719 2720 if (!(do_dts || do_ptm)) 2721 return 0; 2722 2723 cpu = t->cpu_id; 2724 2725 /* DTS is per-core, no need to print for each thread */ 2726 if (!(t->flags & CPU_IS_FIRST_THREAD_IN_CORE)) 2727 return 0; 2728 2729 if (cpu_migrate(cpu)) { 2730 fprintf(outf, "Could not migrate to CPU %d\n", cpu); 2731 return -1; 2732 } 2733 2734 if (do_ptm && (t->flags & CPU_IS_FIRST_CORE_IN_PACKAGE)) { 2735 if (get_msr(cpu, MSR_IA32_PACKAGE_THERM_STATUS, &msr)) 2736 return 0; 2737 2738 dts = (msr >> 16) & 0x7F; 2739 fprintf(outf, "cpu%d: MSR_IA32_PACKAGE_THERM_STATUS: 0x%08llx (%d C)\n", 2740 cpu, msr, tcc_activation_temp - dts); 2741 2742 #ifdef THERM_DEBUG 2743 if (get_msr(cpu, MSR_IA32_PACKAGE_THERM_INTERRUPT, &msr)) 2744 return 0; 2745 2746 dts = (msr >> 16) & 0x7F; 2747 dts2 = (msr >> 8) & 0x7F; 2748 fprintf(outf, "cpu%d: MSR_IA32_PACKAGE_THERM_INTERRUPT: 0x%08llx (%d C, %d C)\n", 2749 cpu, msr, tcc_activation_temp - dts, tcc_activation_temp - dts2); 2750 #endif 2751 } 2752 2753 2754 if (do_dts) { 2755 unsigned int resolution; 2756 2757 if (get_msr(cpu, MSR_IA32_THERM_STATUS, &msr)) 2758 return 0; 2759 2760 dts = (msr >> 16) & 0x7F; 2761 resolution = (msr >> 27) & 0xF; 2762 fprintf(outf, "cpu%d: MSR_IA32_THERM_STATUS: 0x%08llx (%d C +/- %d)\n", 2763 cpu, msr, tcc_activation_temp - dts, resolution); 2764 2765 #ifdef THERM_DEBUG 2766 if (get_msr(cpu, MSR_IA32_THERM_INTERRUPT, &msr)) 2767 return 0; 2768 2769 dts = (msr >> 16) & 0x7F; 2770 dts2 = (msr >> 8) & 0x7F; 2771 fprintf(outf, "cpu%d: MSR_IA32_THERM_INTERRUPT: 0x%08llx (%d C, %d C)\n", 2772 cpu, msr, tcc_activation_temp - dts, tcc_activation_temp - dts2); 2773 #endif 2774 } 2775 2776 return 0; 2777 } 2778 2779 void print_power_limit_msr(int cpu, unsigned long long msr, char *label) 2780 { 2781 fprintf(outf, "cpu%d: %s: %sabled (%f Watts, %f sec, clamp %sabled)\n", 2782 cpu, label, 2783 ((msr >> 15) & 1) ? "EN" : "DIS", 2784 ((msr >> 0) & 0x7FFF) * rapl_power_units, 2785 (1.0 + (((msr >> 22) & 0x3)/4.0)) * (1 << ((msr >> 17) & 0x1F)) * rapl_time_units, 2786 (((msr >> 16) & 1) ? "EN" : "DIS")); 2787 2788 return; 2789 } 2790 2791 int print_rapl(struct thread_data *t, struct core_data *c, struct pkg_data *p) 2792 { 2793 unsigned long long msr; 2794 int cpu; 2795 2796 if (!do_rapl) 2797 return 0; 2798 2799 /* RAPL counters are per package, so print only for 1st thread/package */ 2800 if (!(t->flags & CPU_IS_FIRST_THREAD_IN_CORE) || !(t->flags & CPU_IS_FIRST_CORE_IN_PACKAGE)) 2801 return 0; 2802 2803 cpu = t->cpu_id; 2804 if (cpu_migrate(cpu)) { 2805 fprintf(outf, "Could not migrate to CPU %d\n", cpu); 2806 return -1; 2807 } 2808 2809 if (get_msr(cpu, MSR_RAPL_POWER_UNIT, &msr)) 2810 return -1; 2811 2812 if (debug) { 2813 fprintf(outf, "cpu%d: MSR_RAPL_POWER_UNIT: 0x%08llx " 2814 "(%f Watts, %f Joules, %f sec.)\n", cpu, msr, 2815 rapl_power_units, rapl_energy_units, rapl_time_units); 2816 } 2817 if (do_rapl & RAPL_PKG_POWER_INFO) { 2818 2819 if (get_msr(cpu, MSR_PKG_POWER_INFO, &msr)) 2820 return -5; 2821 2822 2823 fprintf(outf, "cpu%d: MSR_PKG_POWER_INFO: 0x%08llx (%.0f W TDP, RAPL %.0f - %.0f W, %f sec.)\n", 2824 cpu, msr, 2825 ((msr >> 0) & RAPL_POWER_GRANULARITY) * rapl_power_units, 2826 ((msr >> 16) & RAPL_POWER_GRANULARITY) * rapl_power_units, 2827 ((msr >> 32) & RAPL_POWER_GRANULARITY) * rapl_power_units, 2828 ((msr >> 48) & RAPL_TIME_GRANULARITY) * rapl_time_units); 2829 2830 } 2831 if (do_rapl & RAPL_PKG) { 2832 2833 if (get_msr(cpu, MSR_PKG_POWER_LIMIT, &msr)) 2834 return -9; 2835 2836 fprintf(outf, "cpu%d: MSR_PKG_POWER_LIMIT: 0x%08llx (%slocked)\n", 2837 cpu, msr, (msr >> 63) & 1 ? "": "UN"); 2838 2839 print_power_limit_msr(cpu, msr, "PKG Limit #1"); 2840 fprintf(outf, "cpu%d: PKG Limit #2: %sabled (%f Watts, %f* sec, clamp %sabled)\n", 2841 cpu, 2842 ((msr >> 47) & 1) ? "EN" : "DIS", 2843 ((msr >> 32) & 0x7FFF) * rapl_power_units, 2844 (1.0 + (((msr >> 54) & 0x3)/4.0)) * (1 << ((msr >> 49) & 0x1F)) * rapl_time_units, 2845 ((msr >> 48) & 1) ? "EN" : "DIS"); 2846 } 2847 2848 if (do_rapl & RAPL_DRAM_POWER_INFO) { 2849 if (get_msr(cpu, MSR_DRAM_POWER_INFO, &msr)) 2850 return -6; 2851 2852 fprintf(outf, "cpu%d: MSR_DRAM_POWER_INFO,: 0x%08llx (%.0f W TDP, RAPL %.0f - %.0f W, %f sec.)\n", 2853 cpu, msr, 2854 ((msr >> 0) & RAPL_POWER_GRANULARITY) * rapl_power_units, 2855 ((msr >> 16) & RAPL_POWER_GRANULARITY) * rapl_power_units, 2856 ((msr >> 32) & RAPL_POWER_GRANULARITY) * rapl_power_units, 2857 ((msr >> 48) & RAPL_TIME_GRANULARITY) * rapl_time_units); 2858 } 2859 if (do_rapl & RAPL_DRAM) { 2860 if (get_msr(cpu, MSR_DRAM_POWER_LIMIT, &msr)) 2861 return -9; 2862 fprintf(outf, "cpu%d: MSR_DRAM_POWER_LIMIT: 0x%08llx (%slocked)\n", 2863 cpu, msr, (msr >> 31) & 1 ? "": "UN"); 2864 2865 print_power_limit_msr(cpu, msr, "DRAM Limit"); 2866 } 2867 if (do_rapl & RAPL_CORE_POLICY) { 2868 if (debug) { 2869 if (get_msr(cpu, MSR_PP0_POLICY, &msr)) 2870 return -7; 2871 2872 fprintf(outf, "cpu%d: MSR_PP0_POLICY: %lld\n", cpu, msr & 0xF); 2873 } 2874 } 2875 if (do_rapl & RAPL_CORES) { 2876 if (debug) { 2877 2878 if (get_msr(cpu, MSR_PP0_POWER_LIMIT, &msr)) 2879 return -9; 2880 fprintf(outf, "cpu%d: MSR_PP0_POWER_LIMIT: 0x%08llx (%slocked)\n", 2881 cpu, msr, (msr >> 31) & 1 ? "": "UN"); 2882 print_power_limit_msr(cpu, msr, "Cores Limit"); 2883 } 2884 } 2885 if (do_rapl & RAPL_GFX) { 2886 if (debug) { 2887 if (get_msr(cpu, MSR_PP1_POLICY, &msr)) 2888 return -8; 2889 2890 fprintf(outf, "cpu%d: MSR_PP1_POLICY: %lld\n", cpu, msr & 0xF); 2891 2892 if (get_msr(cpu, MSR_PP1_POWER_LIMIT, &msr)) 2893 return -9; 2894 fprintf(outf, "cpu%d: MSR_PP1_POWER_LIMIT: 0x%08llx (%slocked)\n", 2895 cpu, msr, (msr >> 31) & 1 ? "": "UN"); 2896 print_power_limit_msr(cpu, msr, "GFX Limit"); 2897 } 2898 } 2899 return 0; 2900 } 2901 2902 /* 2903 * SNB adds support for additional MSRs: 2904 * 2905 * MSR_PKG_C7_RESIDENCY 0x000003fa 2906 * MSR_CORE_C7_RESIDENCY 0x000003fe 2907 * MSR_PKG_C2_RESIDENCY 0x0000060d 2908 */ 2909 2910 int has_snb_msrs(unsigned int family, unsigned int model) 2911 { 2912 if (!genuine_intel) 2913 return 0; 2914 2915 switch (model) { 2916 case 0x2A: 2917 case 0x2D: 2918 case 0x3A: /* IVB */ 2919 case 0x3E: /* IVB Xeon */ 2920 case 0x3C: /* HSW */ 2921 case 0x3F: /* HSW */ 2922 case 0x45: /* HSW */ 2923 case 0x46: /* HSW */ 2924 case 0x3D: /* BDW */ 2925 case 0x47: /* BDW */ 2926 case 0x4F: /* BDX */ 2927 case 0x56: /* BDX-DE */ 2928 case 0x4E: /* SKL */ 2929 case 0x5E: /* SKL */ 2930 case 0x8E: /* KBL */ 2931 case 0x9E: /* KBL */ 2932 case 0x55: /* SKX */ 2933 case 0x5C: /* BXT */ 2934 return 1; 2935 } 2936 return 0; 2937 } 2938 2939 /* 2940 * HSW adds support for additional MSRs: 2941 * 2942 * MSR_PKG_C8_RESIDENCY 0x00000630 2943 * MSR_PKG_C9_RESIDENCY 0x00000631 2944 * MSR_PKG_C10_RESIDENCY 0x00000632 2945 * 2946 * MSR_PKGC8_IRTL 0x00000633 2947 * MSR_PKGC9_IRTL 0x00000634 2948 * MSR_PKGC10_IRTL 0x00000635 2949 * 2950 */ 2951 int has_hsw_msrs(unsigned int family, unsigned int model) 2952 { 2953 if (!genuine_intel) 2954 return 0; 2955 2956 switch (model) { 2957 case 0x45: /* HSW */ 2958 case 0x3D: /* BDW */ 2959 case 0x4E: /* SKL */ 2960 case 0x5E: /* SKL */ 2961 case 0x8E: /* KBL */ 2962 case 0x9E: /* KBL */ 2963 case 0x5C: /* BXT */ 2964 return 1; 2965 } 2966 return 0; 2967 } 2968 2969 /* 2970 * SKL adds support for additional MSRS: 2971 * 2972 * MSR_PKG_WEIGHTED_CORE_C0_RES 0x00000658 2973 * MSR_PKG_ANY_CORE_C0_RES 0x00000659 2974 * MSR_PKG_ANY_GFXE_C0_RES 0x0000065A 2975 * MSR_PKG_BOTH_CORE_GFXE_C0_RES 0x0000065B 2976 */ 2977 int has_skl_msrs(unsigned int family, unsigned int model) 2978 { 2979 if (!genuine_intel) 2980 return 0; 2981 2982 switch (model) { 2983 case 0x4E: /* SKL */ 2984 case 0x5E: /* SKL */ 2985 case 0x8E: /* KBL */ 2986 case 0x9E: /* KBL */ 2987 return 1; 2988 } 2989 return 0; 2990 } 2991 2992 2993 2994 int is_slm(unsigned int family, unsigned int model) 2995 { 2996 if (!genuine_intel) 2997 return 0; 2998 switch (model) { 2999 case 0x37: /* BYT */ 3000 case 0x4D: /* AVN */ 3001 return 1; 3002 } 3003 return 0; 3004 } 3005 3006 int is_knl(unsigned int family, unsigned int model) 3007 { 3008 if (!genuine_intel) 3009 return 0; 3010 switch (model) { 3011 case 0x57: /* KNL */ 3012 return 1; 3013 } 3014 return 0; 3015 } 3016 3017 unsigned int get_aperf_mperf_multiplier(unsigned int family, unsigned int model) 3018 { 3019 if (is_knl(family, model)) 3020 return 1024; 3021 return 1; 3022 } 3023 3024 #define SLM_BCLK_FREQS 5 3025 double slm_freq_table[SLM_BCLK_FREQS] = { 83.3, 100.0, 133.3, 116.7, 80.0}; 3026 3027 double slm_bclk(void) 3028 { 3029 unsigned long long msr = 3; 3030 unsigned int i; 3031 double freq; 3032 3033 if (get_msr(base_cpu, MSR_FSB_FREQ, &msr)) 3034 fprintf(outf, "SLM BCLK: unknown\n"); 3035 3036 i = msr & 0xf; 3037 if (i >= SLM_BCLK_FREQS) { 3038 fprintf(outf, "SLM BCLK[%d] invalid\n", i); 3039 msr = 3; 3040 } 3041 freq = slm_freq_table[i]; 3042 3043 fprintf(outf, "SLM BCLK: %.1f Mhz\n", freq); 3044 3045 return freq; 3046 } 3047 3048 double discover_bclk(unsigned int family, unsigned int model) 3049 { 3050 if (has_snb_msrs(family, model) || is_knl(family, model)) 3051 return 100.00; 3052 else if (is_slm(family, model)) 3053 return slm_bclk(); 3054 else 3055 return 133.33; 3056 } 3057 3058 /* 3059 * MSR_IA32_TEMPERATURE_TARGET indicates the temperature where 3060 * the Thermal Control Circuit (TCC) activates. 3061 * This is usually equal to tjMax. 3062 * 3063 * Older processors do not have this MSR, so there we guess, 3064 * but also allow cmdline over-ride with -T. 3065 * 3066 * Several MSR temperature values are in units of degrees-C 3067 * below this value, including the Digital Thermal Sensor (DTS), 3068 * Package Thermal Management Sensor (PTM), and thermal event thresholds. 3069 */ 3070 int set_temperature_target(struct thread_data *t, struct core_data *c, struct pkg_data *p) 3071 { 3072 unsigned long long msr; 3073 unsigned int target_c_local; 3074 int cpu; 3075 3076 /* tcc_activation_temp is used only for dts or ptm */ 3077 if (!(do_dts || do_ptm)) 3078 return 0; 3079 3080 /* this is a per-package concept */ 3081 if (!(t->flags & CPU_IS_FIRST_THREAD_IN_CORE) || !(t->flags & CPU_IS_FIRST_CORE_IN_PACKAGE)) 3082 return 0; 3083 3084 cpu = t->cpu_id; 3085 if (cpu_migrate(cpu)) { 3086 fprintf(outf, "Could not migrate to CPU %d\n", cpu); 3087 return -1; 3088 } 3089 3090 if (tcc_activation_temp_override != 0) { 3091 tcc_activation_temp = tcc_activation_temp_override; 3092 fprintf(outf, "cpu%d: Using cmdline TCC Target (%d C)\n", 3093 cpu, tcc_activation_temp); 3094 return 0; 3095 } 3096 3097 /* Temperature Target MSR is Nehalem and newer only */ 3098 if (!do_nhm_platform_info) 3099 goto guess; 3100 3101 if (get_msr(base_cpu, MSR_IA32_TEMPERATURE_TARGET, &msr)) 3102 goto guess; 3103 3104 target_c_local = (msr >> 16) & 0xFF; 3105 3106 if (debug) 3107 fprintf(outf, "cpu%d: MSR_IA32_TEMPERATURE_TARGET: 0x%08llx (%d C)\n", 3108 cpu, msr, target_c_local); 3109 3110 if (!target_c_local) 3111 goto guess; 3112 3113 tcc_activation_temp = target_c_local; 3114 3115 return 0; 3116 3117 guess: 3118 tcc_activation_temp = TJMAX_DEFAULT; 3119 fprintf(outf, "cpu%d: Guessing tjMax %d C, Please use -T to specify\n", 3120 cpu, tcc_activation_temp); 3121 3122 return 0; 3123 } 3124 3125 void decode_feature_control_msr(void) 3126 { 3127 unsigned long long msr; 3128 3129 if (!get_msr(base_cpu, MSR_IA32_FEATURE_CONTROL, &msr)) 3130 fprintf(outf, "cpu%d: MSR_IA32_FEATURE_CONTROL: 0x%08llx (%sLocked %s)\n", 3131 base_cpu, msr, 3132 msr & FEATURE_CONTROL_LOCKED ? "" : "UN-", 3133 msr & (1 << 18) ? "SGX" : ""); 3134 } 3135 3136 void decode_misc_enable_msr(void) 3137 { 3138 unsigned long long msr; 3139 3140 if (!get_msr(base_cpu, MSR_IA32_MISC_ENABLE, &msr)) 3141 fprintf(outf, "cpu%d: MSR_IA32_MISC_ENABLE: 0x%08llx (%s %s %s)\n", 3142 base_cpu, msr, 3143 msr & (1 << 3) ? "TCC" : "", 3144 msr & (1 << 16) ? "EIST" : "", 3145 msr & (1 << 18) ? "MONITOR" : ""); 3146 } 3147 3148 /* 3149 * Decode MSR_MISC_PWR_MGMT 3150 * 3151 * Decode the bits according to the Nehalem documentation 3152 * bit[0] seems to continue to have same meaning going forward 3153 * bit[1] less so... 3154 */ 3155 void decode_misc_pwr_mgmt_msr(void) 3156 { 3157 unsigned long long msr; 3158 3159 if (!do_nhm_platform_info) 3160 return; 3161 3162 if (!get_msr(base_cpu, MSR_MISC_PWR_MGMT, &msr)) 3163 fprintf(outf, "cpu%d: MSR_MISC_PWR_MGMT: 0x%08llx (%sable-EIST_Coordination %sable-EPB)\n", 3164 base_cpu, msr, 3165 msr & (1 << 0) ? "DIS" : "EN", 3166 msr & (1 << 1) ? "EN" : "DIS"); 3167 } 3168 3169 void process_cpuid() 3170 { 3171 unsigned int eax, ebx, ecx, edx, max_level, max_extended_level; 3172 unsigned int fms, family, model, stepping; 3173 3174 eax = ebx = ecx = edx = 0; 3175 3176 __cpuid(0, max_level, ebx, ecx, edx); 3177 3178 if (ebx == 0x756e6547 && edx == 0x49656e69 && ecx == 0x6c65746e) 3179 genuine_intel = 1; 3180 3181 if (debug) 3182 fprintf(outf, "CPUID(0): %.4s%.4s%.4s ", 3183 (char *)&ebx, (char *)&edx, (char *)&ecx); 3184 3185 __cpuid(1, fms, ebx, ecx, edx); 3186 family = (fms >> 8) & 0xf; 3187 model = (fms >> 4) & 0xf; 3188 stepping = fms & 0xf; 3189 if (family == 6 || family == 0xf) 3190 model += ((fms >> 16) & 0xf) << 4; 3191 3192 if (debug) { 3193 fprintf(outf, "%d CPUID levels; family:model:stepping 0x%x:%x:%x (%d:%d:%d)\n", 3194 max_level, family, model, stepping, family, model, stepping); 3195 fprintf(outf, "CPUID(1): %s %s %s %s %s %s %s %s %s\n", 3196 ecx & (1 << 0) ? "SSE3" : "-", 3197 ecx & (1 << 3) ? "MONITOR" : "-", 3198 ecx & (1 << 6) ? "SMX" : "-", 3199 ecx & (1 << 7) ? "EIST" : "-", 3200 ecx & (1 << 8) ? "TM2" : "-", 3201 edx & (1 << 4) ? "TSC" : "-", 3202 edx & (1 << 5) ? "MSR" : "-", 3203 edx & (1 << 22) ? "ACPI-TM" : "-", 3204 edx & (1 << 29) ? "TM" : "-"); 3205 } 3206 3207 if (!(edx & (1 << 5))) 3208 errx(1, "CPUID: no MSR"); 3209 3210 /* 3211 * check max extended function levels of CPUID. 3212 * This is needed to check for invariant TSC. 3213 * This check is valid for both Intel and AMD. 3214 */ 3215 ebx = ecx = edx = 0; 3216 __cpuid(0x80000000, max_extended_level, ebx, ecx, edx); 3217 3218 if (max_extended_level >= 0x80000007) { 3219 3220 /* 3221 * Non-Stop TSC is advertised by CPUID.EAX=0x80000007: EDX.bit8 3222 * this check is valid for both Intel and AMD 3223 */ 3224 __cpuid(0x80000007, eax, ebx, ecx, edx); 3225 has_invariant_tsc = edx & (1 << 8); 3226 } 3227 3228 /* 3229 * APERF/MPERF is advertised by CPUID.EAX=0x6: ECX.bit0 3230 * this check is valid for both Intel and AMD 3231 */ 3232 3233 __cpuid(0x6, eax, ebx, ecx, edx); 3234 has_aperf = ecx & (1 << 0); 3235 do_dts = eax & (1 << 0); 3236 do_ptm = eax & (1 << 6); 3237 has_hwp = eax & (1 << 7); 3238 has_hwp_notify = eax & (1 << 8); 3239 has_hwp_activity_window = eax & (1 << 9); 3240 has_hwp_epp = eax & (1 << 10); 3241 has_hwp_pkg = eax & (1 << 11); 3242 has_epb = ecx & (1 << 3); 3243 3244 if (debug) 3245 fprintf(outf, "CPUID(6): %sAPERF, %sDTS, %sPTM, %sHWP, " 3246 "%sHWPnotify, %sHWPwindow, %sHWPepp, %sHWPpkg, %sEPB\n", 3247 has_aperf ? "" : "No-", 3248 do_dts ? "" : "No-", 3249 do_ptm ? "" : "No-", 3250 has_hwp ? "" : "No-", 3251 has_hwp_notify ? "" : "No-", 3252 has_hwp_activity_window ? "" : "No-", 3253 has_hwp_epp ? "" : "No-", 3254 has_hwp_pkg ? "" : "No-", 3255 has_epb ? "" : "No-"); 3256 3257 if (debug) 3258 decode_misc_enable_msr(); 3259 3260 if (max_level >= 0x7 && debug) { 3261 int has_sgx; 3262 3263 ecx = 0; 3264 3265 __cpuid_count(0x7, 0, eax, ebx, ecx, edx); 3266 3267 has_sgx = ebx & (1 << 2); 3268 fprintf(outf, "CPUID(7): %sSGX\n", has_sgx ? "" : "No-"); 3269 3270 if (has_sgx) 3271 decode_feature_control_msr(); 3272 } 3273 3274 if (max_level >= 0x15) { 3275 unsigned int eax_crystal; 3276 unsigned int ebx_tsc; 3277 3278 /* 3279 * CPUID 15H TSC/Crystal ratio, possibly Crystal Hz 3280 */ 3281 eax_crystal = ebx_tsc = crystal_hz = edx = 0; 3282 __cpuid(0x15, eax_crystal, ebx_tsc, crystal_hz, edx); 3283 3284 if (ebx_tsc != 0) { 3285 3286 if (debug && (ebx != 0)) 3287 fprintf(outf, "CPUID(0x15): eax_crystal: %d ebx_tsc: %d ecx_crystal_hz: %d\n", 3288 eax_crystal, ebx_tsc, crystal_hz); 3289 3290 if (crystal_hz == 0) 3291 switch(model) { 3292 case 0x4E: /* SKL */ 3293 case 0x5E: /* SKL */ 3294 case 0x8E: /* KBL */ 3295 case 0x9E: /* KBL */ 3296 crystal_hz = 24000000; /* 24.0 MHz */ 3297 break; 3298 case 0x55: /* SKX */ 3299 crystal_hz = 25000000; /* 25.0 MHz */ 3300 break; 3301 case 0x5C: /* BXT */ 3302 crystal_hz = 19200000; /* 19.2 MHz */ 3303 break; 3304 default: 3305 crystal_hz = 0; 3306 } 3307 3308 if (crystal_hz) { 3309 tsc_hz = (unsigned long long) crystal_hz * ebx_tsc / eax_crystal; 3310 if (debug) 3311 fprintf(outf, "TSC: %lld MHz (%d Hz * %d / %d / 1000000)\n", 3312 tsc_hz / 1000000, crystal_hz, ebx_tsc, eax_crystal); 3313 } 3314 } 3315 } 3316 if (max_level >= 0x16) { 3317 unsigned int base_mhz, max_mhz, bus_mhz, edx; 3318 3319 /* 3320 * CPUID 16H Base MHz, Max MHz, Bus MHz 3321 */ 3322 base_mhz = max_mhz = bus_mhz = edx = 0; 3323 3324 __cpuid(0x16, base_mhz, max_mhz, bus_mhz, edx); 3325 if (debug) 3326 fprintf(outf, "CPUID(0x16): base_mhz: %d max_mhz: %d bus_mhz: %d\n", 3327 base_mhz, max_mhz, bus_mhz); 3328 } 3329 3330 if (has_aperf) 3331 aperf_mperf_multiplier = get_aperf_mperf_multiplier(family, model); 3332 3333 do_nhm_platform_info = do_nhm_cstates = do_smi = probe_nhm_msrs(family, model); 3334 do_snb_cstates = has_snb_msrs(family, model); 3335 do_irtl_snb = has_snb_msrs(family, model); 3336 do_pc2 = do_snb_cstates && (pkg_cstate_limit >= PCL__2); 3337 do_pc3 = (pkg_cstate_limit >= PCL__3); 3338 do_pc6 = (pkg_cstate_limit >= PCL__6); 3339 do_pc7 = do_snb_cstates && (pkg_cstate_limit >= PCL__7); 3340 do_c8_c9_c10 = has_hsw_msrs(family, model); 3341 do_irtl_hsw = has_hsw_msrs(family, model); 3342 do_skl_residency = has_skl_msrs(family, model); 3343 do_slm_cstates = is_slm(family, model); 3344 do_knl_cstates = is_knl(family, model); 3345 3346 if (debug) 3347 decode_misc_pwr_mgmt_msr(); 3348 3349 rapl_probe(family, model); 3350 perf_limit_reasons_probe(family, model); 3351 3352 if (debug) 3353 dump_cstate_pstate_config_info(family, model); 3354 3355 if (has_skl_msrs(family, model)) 3356 calculate_tsc_tweak(); 3357 3358 do_gfx_rc6_ms = !access("/sys/class/drm/card0/power/rc6_residency_ms", R_OK); 3359 3360 do_gfx_mhz = !access("/sys/class/graphics/fb0/device/drm/card0/gt_cur_freq_mhz", R_OK); 3361 3362 return; 3363 } 3364 3365 void help() 3366 { 3367 fprintf(outf, 3368 "Usage: turbostat [OPTIONS][(--interval seconds) | COMMAND ...]\n" 3369 "\n" 3370 "Turbostat forks the specified COMMAND and prints statistics\n" 3371 "when COMMAND completes.\n" 3372 "If no COMMAND is specified, turbostat wakes every 5-seconds\n" 3373 "to print statistics, until interrupted.\n" 3374 "--debug run in \"debug\" mode\n" 3375 "--interval sec Override default 5-second measurement interval\n" 3376 "--help print this help message\n" 3377 "--counter msr print 32-bit counter at address \"msr\"\n" 3378 "--Counter msr print 64-bit Counter at address \"msr\"\n" 3379 "--out file create or truncate \"file\" for all output\n" 3380 "--msr msr print 32-bit value at address \"msr\"\n" 3381 "--MSR msr print 64-bit Value at address \"msr\"\n" 3382 "--version print version information\n" 3383 "\n" 3384 "For more help, run \"man turbostat\"\n"); 3385 } 3386 3387 3388 /* 3389 * in /dev/cpu/ return success for names that are numbers 3390 * ie. filter out ".", "..", "microcode". 3391 */ 3392 int dir_filter(const struct dirent *dirp) 3393 { 3394 if (isdigit(dirp->d_name[0])) 3395 return 1; 3396 else 3397 return 0; 3398 } 3399 3400 int open_dev_cpu_msr(int dummy1) 3401 { 3402 return 0; 3403 } 3404 3405 void topology_probe() 3406 { 3407 int i; 3408 int max_core_id = 0; 3409 int max_package_id = 0; 3410 int max_siblings = 0; 3411 struct cpu_topology { 3412 int core_id; 3413 int physical_package_id; 3414 } *cpus; 3415 3416 /* Initialize num_cpus, max_cpu_num */ 3417 topo.num_cpus = 0; 3418 topo.max_cpu_num = 0; 3419 for_all_proc_cpus(count_cpus); 3420 if (!summary_only && topo.num_cpus > 1) 3421 show_cpu = 1; 3422 3423 if (debug > 1) 3424 fprintf(outf, "num_cpus %d max_cpu_num %d\n", topo.num_cpus, topo.max_cpu_num); 3425 3426 cpus = calloc(1, (topo.max_cpu_num + 1) * sizeof(struct cpu_topology)); 3427 if (cpus == NULL) 3428 err(1, "calloc cpus"); 3429 3430 /* 3431 * Allocate and initialize cpu_present_set 3432 */ 3433 cpu_present_set = CPU_ALLOC((topo.max_cpu_num + 1)); 3434 if (cpu_present_set == NULL) 3435 err(3, "CPU_ALLOC"); 3436 cpu_present_setsize = CPU_ALLOC_SIZE((topo.max_cpu_num + 1)); 3437 CPU_ZERO_S(cpu_present_setsize, cpu_present_set); 3438 for_all_proc_cpus(mark_cpu_present); 3439 3440 /* 3441 * Allocate and initialize cpu_affinity_set 3442 */ 3443 cpu_affinity_set = CPU_ALLOC((topo.max_cpu_num + 1)); 3444 if (cpu_affinity_set == NULL) 3445 err(3, "CPU_ALLOC"); 3446 cpu_affinity_setsize = CPU_ALLOC_SIZE((topo.max_cpu_num + 1)); 3447 CPU_ZERO_S(cpu_affinity_setsize, cpu_affinity_set); 3448 3449 3450 /* 3451 * For online cpus 3452 * find max_core_id, max_package_id 3453 */ 3454 for (i = 0; i <= topo.max_cpu_num; ++i) { 3455 int siblings; 3456 3457 if (cpu_is_not_present(i)) { 3458 if (debug > 1) 3459 fprintf(outf, "cpu%d NOT PRESENT\n", i); 3460 continue; 3461 } 3462 cpus[i].core_id = get_core_id(i); 3463 if (cpus[i].core_id > max_core_id) 3464 max_core_id = cpus[i].core_id; 3465 3466 cpus[i].physical_package_id = get_physical_package_id(i); 3467 if (cpus[i].physical_package_id > max_package_id) 3468 max_package_id = cpus[i].physical_package_id; 3469 3470 siblings = get_num_ht_siblings(i); 3471 if (siblings > max_siblings) 3472 max_siblings = siblings; 3473 if (debug > 1) 3474 fprintf(outf, "cpu %d pkg %d core %d\n", 3475 i, cpus[i].physical_package_id, cpus[i].core_id); 3476 } 3477 topo.num_cores_per_pkg = max_core_id + 1; 3478 if (debug > 1) 3479 fprintf(outf, "max_core_id %d, sizing for %d cores per package\n", 3480 max_core_id, topo.num_cores_per_pkg); 3481 if (debug && !summary_only && topo.num_cores_per_pkg > 1) 3482 show_core = 1; 3483 3484 topo.num_packages = max_package_id + 1; 3485 if (debug > 1) 3486 fprintf(outf, "max_package_id %d, sizing for %d packages\n", 3487 max_package_id, topo.num_packages); 3488 if (debug && !summary_only && topo.num_packages > 1) 3489 show_pkg = 1; 3490 3491 topo.num_threads_per_core = max_siblings; 3492 if (debug > 1) 3493 fprintf(outf, "max_siblings %d\n", max_siblings); 3494 3495 free(cpus); 3496 } 3497 3498 void 3499 allocate_counters(struct thread_data **t, struct core_data **c, struct pkg_data **p) 3500 { 3501 int i; 3502 3503 *t = calloc(topo.num_threads_per_core * topo.num_cores_per_pkg * 3504 topo.num_packages, sizeof(struct thread_data)); 3505 if (*t == NULL) 3506 goto error; 3507 3508 for (i = 0; i < topo.num_threads_per_core * 3509 topo.num_cores_per_pkg * topo.num_packages; i++) 3510 (*t)[i].cpu_id = -1; 3511 3512 *c = calloc(topo.num_cores_per_pkg * topo.num_packages, 3513 sizeof(struct core_data)); 3514 if (*c == NULL) 3515 goto error; 3516 3517 for (i = 0; i < topo.num_cores_per_pkg * topo.num_packages; i++) 3518 (*c)[i].core_id = -1; 3519 3520 *p = calloc(topo.num_packages, sizeof(struct pkg_data)); 3521 if (*p == NULL) 3522 goto error; 3523 3524 for (i = 0; i < topo.num_packages; i++) 3525 (*p)[i].package_id = i; 3526 3527 return; 3528 error: 3529 err(1, "calloc counters"); 3530 } 3531 /* 3532 * init_counter() 3533 * 3534 * set cpu_id, core_num, pkg_num 3535 * set FIRST_THREAD_IN_CORE and FIRST_CORE_IN_PACKAGE 3536 * 3537 * increment topo.num_cores when 1st core in pkg seen 3538 */ 3539 void init_counter(struct thread_data *thread_base, struct core_data *core_base, 3540 struct pkg_data *pkg_base, int thread_num, int core_num, 3541 int pkg_num, int cpu_id) 3542 { 3543 struct thread_data *t; 3544 struct core_data *c; 3545 struct pkg_data *p; 3546 3547 t = GET_THREAD(thread_base, thread_num, core_num, pkg_num); 3548 c = GET_CORE(core_base, core_num, pkg_num); 3549 p = GET_PKG(pkg_base, pkg_num); 3550 3551 t->cpu_id = cpu_id; 3552 if (thread_num == 0) { 3553 t->flags |= CPU_IS_FIRST_THREAD_IN_CORE; 3554 if (cpu_is_first_core_in_package(cpu_id)) 3555 t->flags |= CPU_IS_FIRST_CORE_IN_PACKAGE; 3556 } 3557 3558 c->core_id = core_num; 3559 p->package_id = pkg_num; 3560 } 3561 3562 3563 int initialize_counters(int cpu_id) 3564 { 3565 int my_thread_id, my_core_id, my_package_id; 3566 3567 my_package_id = get_physical_package_id(cpu_id); 3568 my_core_id = get_core_id(cpu_id); 3569 my_thread_id = get_cpu_position_in_core(cpu_id); 3570 if (!my_thread_id) 3571 topo.num_cores++; 3572 3573 init_counter(EVEN_COUNTERS, my_thread_id, my_core_id, my_package_id, cpu_id); 3574 init_counter(ODD_COUNTERS, my_thread_id, my_core_id, my_package_id, cpu_id); 3575 return 0; 3576 } 3577 3578 void allocate_output_buffer() 3579 { 3580 output_buffer = calloc(1, (1 + topo.num_cpus) * 1024); 3581 outp = output_buffer; 3582 if (outp == NULL) 3583 err(-1, "calloc output buffer"); 3584 } 3585 void allocate_fd_percpu(void) 3586 { 3587 fd_percpu = calloc(topo.max_cpu_num + 1, sizeof(int)); 3588 if (fd_percpu == NULL) 3589 err(-1, "calloc fd_percpu"); 3590 } 3591 void allocate_irq_buffers(void) 3592 { 3593 irq_column_2_cpu = calloc(topo.num_cpus, sizeof(int)); 3594 if (irq_column_2_cpu == NULL) 3595 err(-1, "calloc %d", topo.num_cpus); 3596 3597 irqs_per_cpu = calloc(topo.max_cpu_num + 1, sizeof(int)); 3598 if (irqs_per_cpu == NULL) 3599 err(-1, "calloc %d", topo.max_cpu_num + 1); 3600 } 3601 void setup_all_buffers(void) 3602 { 3603 topology_probe(); 3604 allocate_irq_buffers(); 3605 allocate_fd_percpu(); 3606 allocate_counters(&thread_even, &core_even, &package_even); 3607 allocate_counters(&thread_odd, &core_odd, &package_odd); 3608 allocate_output_buffer(); 3609 for_all_proc_cpus(initialize_counters); 3610 } 3611 3612 void set_base_cpu(void) 3613 { 3614 base_cpu = sched_getcpu(); 3615 if (base_cpu < 0) 3616 err(-ENODEV, "No valid cpus found"); 3617 3618 if (debug > 1) 3619 fprintf(outf, "base_cpu = %d\n", base_cpu); 3620 } 3621 3622 void turbostat_init() 3623 { 3624 setup_all_buffers(); 3625 set_base_cpu(); 3626 check_dev_msr(); 3627 check_permissions(); 3628 process_cpuid(); 3629 3630 3631 if (debug) 3632 for_all_cpus(print_hwp, ODD_COUNTERS); 3633 3634 if (debug) 3635 for_all_cpus(print_epb, ODD_COUNTERS); 3636 3637 if (debug) 3638 for_all_cpus(print_perf_limit, ODD_COUNTERS); 3639 3640 if (debug) 3641 for_all_cpus(print_rapl, ODD_COUNTERS); 3642 3643 for_all_cpus(set_temperature_target, ODD_COUNTERS); 3644 3645 if (debug) 3646 for_all_cpus(print_thermal, ODD_COUNTERS); 3647 3648 if (debug && do_irtl_snb) 3649 print_irtl(); 3650 } 3651 3652 int fork_it(char **argv) 3653 { 3654 pid_t child_pid; 3655 int status; 3656 3657 status = for_all_cpus(get_counters, EVEN_COUNTERS); 3658 if (status) 3659 exit(status); 3660 /* clear affinity side-effect of get_counters() */ 3661 sched_setaffinity(0, cpu_present_setsize, cpu_present_set); 3662 gettimeofday(&tv_even, (struct timezone *)NULL); 3663 3664 child_pid = fork(); 3665 if (!child_pid) { 3666 /* child */ 3667 execvp(argv[0], argv); 3668 } else { 3669 3670 /* parent */ 3671 if (child_pid == -1) 3672 err(1, "fork"); 3673 3674 signal(SIGINT, SIG_IGN); 3675 signal(SIGQUIT, SIG_IGN); 3676 if (waitpid(child_pid, &status, 0) == -1) 3677 err(status, "waitpid"); 3678 } 3679 /* 3680 * n.b. fork_it() does not check for errors from for_all_cpus() 3681 * because re-starting is problematic when forking 3682 */ 3683 for_all_cpus(get_counters, ODD_COUNTERS); 3684 gettimeofday(&tv_odd, (struct timezone *)NULL); 3685 timersub(&tv_odd, &tv_even, &tv_delta); 3686 if (for_all_cpus_2(delta_cpu, ODD_COUNTERS, EVEN_COUNTERS)) 3687 fprintf(outf, "%s: Counter reset detected\n", progname); 3688 else { 3689 compute_average(EVEN_COUNTERS); 3690 format_all_counters(EVEN_COUNTERS); 3691 } 3692 3693 fprintf(outf, "%.6f sec\n", tv_delta.tv_sec + tv_delta.tv_usec/1000000.0); 3694 3695 flush_output_stderr(); 3696 3697 return status; 3698 } 3699 3700 int get_and_dump_counters(void) 3701 { 3702 int status; 3703 3704 status = for_all_cpus(get_counters, ODD_COUNTERS); 3705 if (status) 3706 return status; 3707 3708 status = for_all_cpus(dump_counters, ODD_COUNTERS); 3709 if (status) 3710 return status; 3711 3712 flush_output_stdout(); 3713 3714 return status; 3715 } 3716 3717 void print_version() { 3718 fprintf(outf, "turbostat version 4.14 22 Apr 2016" 3719 " - Len Brown <lenb@kernel.org>\n"); 3720 } 3721 3722 void cmdline(int argc, char **argv) 3723 { 3724 int opt; 3725 int option_index = 0; 3726 static struct option long_options[] = { 3727 {"Counter", required_argument, 0, 'C'}, 3728 {"counter", required_argument, 0, 'c'}, 3729 {"Dump", no_argument, 0, 'D'}, 3730 {"debug", no_argument, 0, 'd'}, 3731 {"interval", required_argument, 0, 'i'}, 3732 {"help", no_argument, 0, 'h'}, 3733 {"Joules", no_argument, 0, 'J'}, 3734 {"MSR", required_argument, 0, 'M'}, 3735 {"msr", required_argument, 0, 'm'}, 3736 {"out", required_argument, 0, 'o'}, 3737 {"Package", no_argument, 0, 'p'}, 3738 {"processor", no_argument, 0, 'p'}, 3739 {"Summary", no_argument, 0, 'S'}, 3740 {"TCC", required_argument, 0, 'T'}, 3741 {"version", no_argument, 0, 'v' }, 3742 {0, 0, 0, 0 } 3743 }; 3744 3745 progname = argv[0]; 3746 3747 while ((opt = getopt_long_only(argc, argv, "+C:c:Ddhi:JM:m:o:PpST:v", 3748 long_options, &option_index)) != -1) { 3749 switch (opt) { 3750 case 'C': 3751 sscanf(optarg, "%x", &extra_delta_offset64); 3752 break; 3753 case 'c': 3754 sscanf(optarg, "%x", &extra_delta_offset32); 3755 break; 3756 case 'D': 3757 dump_only++; 3758 break; 3759 case 'd': 3760 debug++; 3761 break; 3762 case 'h': 3763 default: 3764 help(); 3765 exit(1); 3766 case 'i': 3767 { 3768 double interval = strtod(optarg, NULL); 3769 3770 if (interval < 0.001) { 3771 fprintf(outf, "interval %f seconds is too small\n", 3772 interval); 3773 exit(2); 3774 } 3775 3776 interval_ts.tv_sec = interval; 3777 interval_ts.tv_nsec = (interval - interval_ts.tv_sec) * 1000000000; 3778 } 3779 break; 3780 case 'J': 3781 rapl_joules++; 3782 break; 3783 case 'M': 3784 sscanf(optarg, "%x", &extra_msr_offset64); 3785 break; 3786 case 'm': 3787 sscanf(optarg, "%x", &extra_msr_offset32); 3788 break; 3789 case 'o': 3790 outf = fopen_or_die(optarg, "w"); 3791 break; 3792 case 'P': 3793 show_pkg_only++; 3794 break; 3795 case 'p': 3796 show_core_only++; 3797 break; 3798 case 'S': 3799 summary_only++; 3800 break; 3801 case 'T': 3802 tcc_activation_temp_override = atoi(optarg); 3803 break; 3804 case 'v': 3805 print_version(); 3806 exit(0); 3807 break; 3808 } 3809 } 3810 } 3811 3812 int main(int argc, char **argv) 3813 { 3814 outf = stderr; 3815 3816 cmdline(argc, argv); 3817 3818 if (debug) 3819 print_version(); 3820 3821 turbostat_init(); 3822 3823 /* dump counters and exit */ 3824 if (dump_only) 3825 return get_and_dump_counters(); 3826 3827 /* 3828 * if any params left, it must be a command to fork 3829 */ 3830 if (argc - optind) 3831 return fork_it(argv + optind); 3832 else 3833 turbostat_loop(); 3834 3835 return 0; 3836 } 3837