1 /* 2 * turbostat -- show CPU frequency and C-state residency 3 * on modern Intel turbo-capable processors. 4 * 5 * Copyright (c) 2013 Intel Corporation. 6 * Len Brown <len.brown@intel.com> 7 * 8 * This program is free software; you can redistribute it and/or modify it 9 * under the terms and conditions of the GNU General Public License, 10 * version 2, as published by the Free Software Foundation. 11 * 12 * This program is distributed in the hope it will be useful, but WITHOUT 13 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 14 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 15 * more details. 16 * 17 * You should have received a copy of the GNU General Public License along with 18 * this program; if not, write to the Free Software Foundation, Inc., 19 * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. 20 */ 21 22 #define _GNU_SOURCE 23 #include MSRHEADER 24 #include <stdarg.h> 25 #include <stdio.h> 26 #include <err.h> 27 #include <unistd.h> 28 #include <sys/types.h> 29 #include <sys/wait.h> 30 #include <sys/stat.h> 31 #include <sys/resource.h> 32 #include <fcntl.h> 33 #include <signal.h> 34 #include <sys/time.h> 35 #include <stdlib.h> 36 #include <getopt.h> 37 #include <dirent.h> 38 #include <string.h> 39 #include <ctype.h> 40 #include <sched.h> 41 #include <time.h> 42 #include <cpuid.h> 43 #include <linux/capability.h> 44 #include <errno.h> 45 46 char *proc_stat = "/proc/stat"; 47 FILE *outf; 48 int *fd_percpu; 49 struct timespec interval_ts = {5, 0}; 50 unsigned int debug; 51 unsigned int rapl_joules; 52 unsigned int summary_only; 53 unsigned int dump_only; 54 unsigned int skip_c0; 55 unsigned int skip_c1; 56 unsigned int do_nhm_cstates; 57 unsigned int do_snb_cstates; 58 unsigned int do_knl_cstates; 59 unsigned int do_pc2; 60 unsigned int do_pc3; 61 unsigned int do_pc6; 62 unsigned int do_pc7; 63 unsigned int do_c8_c9_c10; 64 unsigned int do_skl_residency; 65 unsigned int do_slm_cstates; 66 unsigned int use_c1_residency_msr; 67 unsigned int has_aperf; 68 unsigned int has_epb; 69 unsigned int do_irtl_snb; 70 unsigned int do_irtl_hsw; 71 unsigned int units = 1000000; /* MHz etc */ 72 unsigned int genuine_intel; 73 unsigned int has_invariant_tsc; 74 unsigned int do_nhm_platform_info; 75 unsigned int extra_msr_offset32; 76 unsigned int extra_msr_offset64; 77 unsigned int extra_delta_offset32; 78 unsigned int extra_delta_offset64; 79 unsigned int aperf_mperf_multiplier = 1; 80 int do_irq = 1; 81 int do_smi; 82 double bclk; 83 double base_hz; 84 unsigned int has_base_hz; 85 double tsc_tweak = 1.0; 86 unsigned int show_pkg; 87 unsigned int show_core; 88 unsigned int show_cpu; 89 unsigned int show_pkg_only; 90 unsigned int show_core_only; 91 char *output_buffer, *outp; 92 unsigned int do_rapl; 93 unsigned int do_dts; 94 unsigned int do_ptm; 95 unsigned int do_gfx_rc6_ms; 96 unsigned long long gfx_cur_rc6_ms; 97 unsigned int do_gfx_mhz; 98 unsigned int gfx_cur_mhz; 99 unsigned int tcc_activation_temp; 100 unsigned int tcc_activation_temp_override; 101 double rapl_power_units, rapl_time_units; 102 double rapl_dram_energy_units, rapl_energy_units; 103 double rapl_joule_counter_range; 104 unsigned int do_core_perf_limit_reasons; 105 unsigned int do_gfx_perf_limit_reasons; 106 unsigned int do_ring_perf_limit_reasons; 107 unsigned int crystal_hz; 108 unsigned long long tsc_hz; 109 int base_cpu; 110 double discover_bclk(unsigned int family, unsigned int model); 111 unsigned int has_hwp; /* IA32_PM_ENABLE, IA32_HWP_CAPABILITIES */ 112 /* IA32_HWP_REQUEST, IA32_HWP_STATUS */ 113 unsigned int has_hwp_notify; /* IA32_HWP_INTERRUPT */ 114 unsigned int has_hwp_activity_window; /* IA32_HWP_REQUEST[bits 41:32] */ 115 unsigned int has_hwp_epp; /* IA32_HWP_REQUEST[bits 31:24] */ 116 unsigned int has_hwp_pkg; /* IA32_HWP_REQUEST_PKG */ 117 118 #define RAPL_PKG (1 << 0) 119 /* 0x610 MSR_PKG_POWER_LIMIT */ 120 /* 0x611 MSR_PKG_ENERGY_STATUS */ 121 #define RAPL_PKG_PERF_STATUS (1 << 1) 122 /* 0x613 MSR_PKG_PERF_STATUS */ 123 #define RAPL_PKG_POWER_INFO (1 << 2) 124 /* 0x614 MSR_PKG_POWER_INFO */ 125 126 #define RAPL_DRAM (1 << 3) 127 /* 0x618 MSR_DRAM_POWER_LIMIT */ 128 /* 0x619 MSR_DRAM_ENERGY_STATUS */ 129 #define RAPL_DRAM_PERF_STATUS (1 << 4) 130 /* 0x61b MSR_DRAM_PERF_STATUS */ 131 #define RAPL_DRAM_POWER_INFO (1 << 5) 132 /* 0x61c MSR_DRAM_POWER_INFO */ 133 134 #define RAPL_CORES (1 << 6) 135 /* 0x638 MSR_PP0_POWER_LIMIT */ 136 /* 0x639 MSR_PP0_ENERGY_STATUS */ 137 #define RAPL_CORE_POLICY (1 << 7) 138 /* 0x63a MSR_PP0_POLICY */ 139 140 #define RAPL_GFX (1 << 8) 141 /* 0x640 MSR_PP1_POWER_LIMIT */ 142 /* 0x641 MSR_PP1_ENERGY_STATUS */ 143 /* 0x642 MSR_PP1_POLICY */ 144 #define TJMAX_DEFAULT 100 145 146 #define MAX(a, b) ((a) > (b) ? (a) : (b)) 147 148 int aperf_mperf_unstable; 149 int backwards_count; 150 char *progname; 151 152 cpu_set_t *cpu_present_set, *cpu_affinity_set; 153 size_t cpu_present_setsize, cpu_affinity_setsize; 154 155 struct thread_data { 156 unsigned long long tsc; 157 unsigned long long aperf; 158 unsigned long long mperf; 159 unsigned long long c1; 160 unsigned long long extra_msr64; 161 unsigned long long extra_delta64; 162 unsigned long long extra_msr32; 163 unsigned long long extra_delta32; 164 unsigned int irq_count; 165 unsigned int smi_count; 166 unsigned int cpu_id; 167 unsigned int flags; 168 #define CPU_IS_FIRST_THREAD_IN_CORE 0x2 169 #define CPU_IS_FIRST_CORE_IN_PACKAGE 0x4 170 } *thread_even, *thread_odd; 171 172 struct core_data { 173 unsigned long long c3; 174 unsigned long long c6; 175 unsigned long long c7; 176 unsigned int core_temp_c; 177 unsigned int core_id; 178 } *core_even, *core_odd; 179 180 struct pkg_data { 181 unsigned long long pc2; 182 unsigned long long pc3; 183 unsigned long long pc6; 184 unsigned long long pc7; 185 unsigned long long pc8; 186 unsigned long long pc9; 187 unsigned long long pc10; 188 unsigned long long pkg_wtd_core_c0; 189 unsigned long long pkg_any_core_c0; 190 unsigned long long pkg_any_gfxe_c0; 191 unsigned long long pkg_both_core_gfxe_c0; 192 unsigned long long gfx_rc6_ms; 193 unsigned int gfx_mhz; 194 unsigned int package_id; 195 unsigned int energy_pkg; /* MSR_PKG_ENERGY_STATUS */ 196 unsigned int energy_dram; /* MSR_DRAM_ENERGY_STATUS */ 197 unsigned int energy_cores; /* MSR_PP0_ENERGY_STATUS */ 198 unsigned int energy_gfx; /* MSR_PP1_ENERGY_STATUS */ 199 unsigned int rapl_pkg_perf_status; /* MSR_PKG_PERF_STATUS */ 200 unsigned int rapl_dram_perf_status; /* MSR_DRAM_PERF_STATUS */ 201 unsigned int pkg_temp_c; 202 203 } *package_even, *package_odd; 204 205 #define ODD_COUNTERS thread_odd, core_odd, package_odd 206 #define EVEN_COUNTERS thread_even, core_even, package_even 207 208 #define GET_THREAD(thread_base, thread_no, core_no, pkg_no) \ 209 (thread_base + (pkg_no) * topo.num_cores_per_pkg * \ 210 topo.num_threads_per_core + \ 211 (core_no) * topo.num_threads_per_core + (thread_no)) 212 #define GET_CORE(core_base, core_no, pkg_no) \ 213 (core_base + (pkg_no) * topo.num_cores_per_pkg + (core_no)) 214 #define GET_PKG(pkg_base, pkg_no) (pkg_base + pkg_no) 215 216 struct system_summary { 217 struct thread_data threads; 218 struct core_data cores; 219 struct pkg_data packages; 220 } sum, average; 221 222 223 struct topo_params { 224 int num_packages; 225 int num_cpus; 226 int num_cores; 227 int max_cpu_num; 228 int num_cores_per_pkg; 229 int num_threads_per_core; 230 } topo; 231 232 struct timeval tv_even, tv_odd, tv_delta; 233 234 int *irq_column_2_cpu; /* /proc/interrupts column numbers */ 235 int *irqs_per_cpu; /* indexed by cpu_num */ 236 237 void setup_all_buffers(void); 238 239 int cpu_is_not_present(int cpu) 240 { 241 return !CPU_ISSET_S(cpu, cpu_present_setsize, cpu_present_set); 242 } 243 /* 244 * run func(thread, core, package) in topology order 245 * skip non-present cpus 246 */ 247 248 int for_all_cpus(int (func)(struct thread_data *, struct core_data *, struct pkg_data *), 249 struct thread_data *thread_base, struct core_data *core_base, struct pkg_data *pkg_base) 250 { 251 int retval, pkg_no, core_no, thread_no; 252 253 for (pkg_no = 0; pkg_no < topo.num_packages; ++pkg_no) { 254 for (core_no = 0; core_no < topo.num_cores_per_pkg; ++core_no) { 255 for (thread_no = 0; thread_no < 256 topo.num_threads_per_core; ++thread_no) { 257 struct thread_data *t; 258 struct core_data *c; 259 struct pkg_data *p; 260 261 t = GET_THREAD(thread_base, thread_no, core_no, pkg_no); 262 263 if (cpu_is_not_present(t->cpu_id)) 264 continue; 265 266 c = GET_CORE(core_base, core_no, pkg_no); 267 p = GET_PKG(pkg_base, pkg_no); 268 269 retval = func(t, c, p); 270 if (retval) 271 return retval; 272 } 273 } 274 } 275 return 0; 276 } 277 278 int cpu_migrate(int cpu) 279 { 280 CPU_ZERO_S(cpu_affinity_setsize, cpu_affinity_set); 281 CPU_SET_S(cpu, cpu_affinity_setsize, cpu_affinity_set); 282 if (sched_setaffinity(0, cpu_affinity_setsize, cpu_affinity_set) == -1) 283 return -1; 284 else 285 return 0; 286 } 287 int get_msr_fd(int cpu) 288 { 289 char pathname[32]; 290 int fd; 291 292 fd = fd_percpu[cpu]; 293 294 if (fd) 295 return fd; 296 297 sprintf(pathname, "/dev/cpu/%d/msr", cpu); 298 fd = open(pathname, O_RDONLY); 299 if (fd < 0) 300 err(-1, "%s open failed, try chown or chmod +r /dev/cpu/*/msr, or run as root", pathname); 301 302 fd_percpu[cpu] = fd; 303 304 return fd; 305 } 306 307 int get_msr(int cpu, off_t offset, unsigned long long *msr) 308 { 309 ssize_t retval; 310 311 retval = pread(get_msr_fd(cpu), msr, sizeof(*msr), offset); 312 313 if (retval != sizeof *msr) 314 err(-1, "msr %d offset 0x%llx read failed", cpu, (unsigned long long)offset); 315 316 return 0; 317 } 318 319 /* 320 * Example Format w/ field column widths: 321 * 322 * Package Core CPU Avg_MHz Bzy_MHz TSC_MHz IRQ SMI Busy% CPU_%c1 CPU_%c3 CPU_%c6 CPU_%c7 CoreTmp PkgTmp GFXMHz Pkg%pc2 Pkg%pc3 Pkg%pc6 Pkg%pc7 PkgWatt CorWatt GFXWatt 323 * 12345678123456781234567812345678123456781234567812345678123456781234567812345678123456781234567812345678123456781234567812345678123456781234567812345678123456781234567812345678 324 */ 325 326 void print_header(void) 327 { 328 if (show_pkg) 329 outp += sprintf(outp, " Package"); 330 if (show_core) 331 outp += sprintf(outp, " Core"); 332 if (show_cpu) 333 outp += sprintf(outp, " CPU"); 334 if (has_aperf) 335 outp += sprintf(outp, " Avg_MHz"); 336 if (has_aperf) 337 outp += sprintf(outp, " Busy%%"); 338 if (has_aperf) 339 outp += sprintf(outp, " Bzy_MHz"); 340 outp += sprintf(outp, " TSC_MHz"); 341 342 if (extra_delta_offset32) 343 outp += sprintf(outp, " count 0x%03X", extra_delta_offset32); 344 if (extra_delta_offset64) 345 outp += sprintf(outp, " COUNT 0x%03X", extra_delta_offset64); 346 if (extra_msr_offset32) 347 outp += sprintf(outp, " MSR 0x%03X", extra_msr_offset32); 348 if (extra_msr_offset64) 349 outp += sprintf(outp, " MSR 0x%03X", extra_msr_offset64); 350 351 if (!debug) 352 goto done; 353 354 if (do_irq) 355 outp += sprintf(outp, " IRQ"); 356 if (do_smi) 357 outp += sprintf(outp, " SMI"); 358 359 if (do_nhm_cstates) 360 outp += sprintf(outp, " CPU%%c1"); 361 if (do_nhm_cstates && !do_slm_cstates && !do_knl_cstates) 362 outp += sprintf(outp, " CPU%%c3"); 363 if (do_nhm_cstates) 364 outp += sprintf(outp, " CPU%%c6"); 365 if (do_snb_cstates) 366 outp += sprintf(outp, " CPU%%c7"); 367 368 if (do_dts) 369 outp += sprintf(outp, " CoreTmp"); 370 if (do_ptm) 371 outp += sprintf(outp, " PkgTmp"); 372 373 if (do_gfx_rc6_ms) 374 outp += sprintf(outp, " GFX%%rc6"); 375 376 if (do_gfx_mhz) 377 outp += sprintf(outp, " GFXMHz"); 378 379 if (do_skl_residency) { 380 outp += sprintf(outp, " Totl%%C0"); 381 outp += sprintf(outp, " Any%%C0"); 382 outp += sprintf(outp, " GFX%%C0"); 383 outp += sprintf(outp, " CPUGFX%%"); 384 } 385 386 if (do_pc2) 387 outp += sprintf(outp, " Pkg%%pc2"); 388 if (do_pc3) 389 outp += sprintf(outp, " Pkg%%pc3"); 390 if (do_pc6) 391 outp += sprintf(outp, " Pkg%%pc6"); 392 if (do_pc7) 393 outp += sprintf(outp, " Pkg%%pc7"); 394 if (do_c8_c9_c10) { 395 outp += sprintf(outp, " Pkg%%pc8"); 396 outp += sprintf(outp, " Pkg%%pc9"); 397 outp += sprintf(outp, " Pk%%pc10"); 398 } 399 400 if (do_rapl && !rapl_joules) { 401 if (do_rapl & RAPL_PKG) 402 outp += sprintf(outp, " PkgWatt"); 403 if (do_rapl & RAPL_CORES) 404 outp += sprintf(outp, " CorWatt"); 405 if (do_rapl & RAPL_GFX) 406 outp += sprintf(outp, " GFXWatt"); 407 if (do_rapl & RAPL_DRAM) 408 outp += sprintf(outp, " RAMWatt"); 409 if (do_rapl & RAPL_PKG_PERF_STATUS) 410 outp += sprintf(outp, " PKG_%%"); 411 if (do_rapl & RAPL_DRAM_PERF_STATUS) 412 outp += sprintf(outp, " RAM_%%"); 413 } else if (do_rapl && rapl_joules) { 414 if (do_rapl & RAPL_PKG) 415 outp += sprintf(outp, " Pkg_J"); 416 if (do_rapl & RAPL_CORES) 417 outp += sprintf(outp, " Cor_J"); 418 if (do_rapl & RAPL_GFX) 419 outp += sprintf(outp, " GFX_J"); 420 if (do_rapl & RAPL_DRAM) 421 outp += sprintf(outp, " RAM_J"); 422 if (do_rapl & RAPL_PKG_PERF_STATUS) 423 outp += sprintf(outp, " PKG_%%"); 424 if (do_rapl & RAPL_DRAM_PERF_STATUS) 425 outp += sprintf(outp, " RAM_%%"); 426 outp += sprintf(outp, " time"); 427 428 } 429 done: 430 outp += sprintf(outp, "\n"); 431 } 432 433 int dump_counters(struct thread_data *t, struct core_data *c, 434 struct pkg_data *p) 435 { 436 outp += sprintf(outp, "t %p, c %p, p %p\n", t, c, p); 437 438 if (t) { 439 outp += sprintf(outp, "CPU: %d flags 0x%x\n", 440 t->cpu_id, t->flags); 441 outp += sprintf(outp, "TSC: %016llX\n", t->tsc); 442 outp += sprintf(outp, "aperf: %016llX\n", t->aperf); 443 outp += sprintf(outp, "mperf: %016llX\n", t->mperf); 444 outp += sprintf(outp, "c1: %016llX\n", t->c1); 445 outp += sprintf(outp, "msr0x%x: %08llX\n", 446 extra_delta_offset32, t->extra_delta32); 447 outp += sprintf(outp, "msr0x%x: %016llX\n", 448 extra_delta_offset64, t->extra_delta64); 449 outp += sprintf(outp, "msr0x%x: %08llX\n", 450 extra_msr_offset32, t->extra_msr32); 451 outp += sprintf(outp, "msr0x%x: %016llX\n", 452 extra_msr_offset64, t->extra_msr64); 453 if (do_irq) 454 outp += sprintf(outp, "IRQ: %08X\n", t->irq_count); 455 if (do_smi) 456 outp += sprintf(outp, "SMI: %08X\n", t->smi_count); 457 } 458 459 if (c) { 460 outp += sprintf(outp, "core: %d\n", c->core_id); 461 outp += sprintf(outp, "c3: %016llX\n", c->c3); 462 outp += sprintf(outp, "c6: %016llX\n", c->c6); 463 outp += sprintf(outp, "c7: %016llX\n", c->c7); 464 outp += sprintf(outp, "DTS: %dC\n", c->core_temp_c); 465 } 466 467 if (p) { 468 outp += sprintf(outp, "package: %d\n", p->package_id); 469 470 outp += sprintf(outp, "Weighted cores: %016llX\n", p->pkg_wtd_core_c0); 471 outp += sprintf(outp, "Any cores: %016llX\n", p->pkg_any_core_c0); 472 outp += sprintf(outp, "Any GFX: %016llX\n", p->pkg_any_gfxe_c0); 473 outp += sprintf(outp, "CPU + GFX: %016llX\n", p->pkg_both_core_gfxe_c0); 474 475 outp += sprintf(outp, "pc2: %016llX\n", p->pc2); 476 if (do_pc3) 477 outp += sprintf(outp, "pc3: %016llX\n", p->pc3); 478 if (do_pc6) 479 outp += sprintf(outp, "pc6: %016llX\n", p->pc6); 480 if (do_pc7) 481 outp += sprintf(outp, "pc7: %016llX\n", p->pc7); 482 outp += sprintf(outp, "pc8: %016llX\n", p->pc8); 483 outp += sprintf(outp, "pc9: %016llX\n", p->pc9); 484 outp += sprintf(outp, "pc10: %016llX\n", p->pc10); 485 outp += sprintf(outp, "Joules PKG: %0X\n", p->energy_pkg); 486 outp += sprintf(outp, "Joules COR: %0X\n", p->energy_cores); 487 outp += sprintf(outp, "Joules GFX: %0X\n", p->energy_gfx); 488 outp += sprintf(outp, "Joules RAM: %0X\n", p->energy_dram); 489 outp += sprintf(outp, "Throttle PKG: %0X\n", 490 p->rapl_pkg_perf_status); 491 outp += sprintf(outp, "Throttle RAM: %0X\n", 492 p->rapl_dram_perf_status); 493 outp += sprintf(outp, "PTM: %dC\n", p->pkg_temp_c); 494 } 495 496 outp += sprintf(outp, "\n"); 497 498 return 0; 499 } 500 501 /* 502 * column formatting convention & formats 503 */ 504 int format_counters(struct thread_data *t, struct core_data *c, 505 struct pkg_data *p) 506 { 507 double interval_float; 508 char *fmt8; 509 510 /* if showing only 1st thread in core and this isn't one, bail out */ 511 if (show_core_only && !(t->flags & CPU_IS_FIRST_THREAD_IN_CORE)) 512 return 0; 513 514 /* if showing only 1st thread in pkg and this isn't one, bail out */ 515 if (show_pkg_only && !(t->flags & CPU_IS_FIRST_CORE_IN_PACKAGE)) 516 return 0; 517 518 interval_float = tv_delta.tv_sec + tv_delta.tv_usec/1000000.0; 519 520 /* topo columns, print blanks on 1st (average) line */ 521 if (t == &average.threads) { 522 if (show_pkg) 523 outp += sprintf(outp, " -"); 524 if (show_core) 525 outp += sprintf(outp, " -"); 526 if (show_cpu) 527 outp += sprintf(outp, " -"); 528 } else { 529 if (show_pkg) { 530 if (p) 531 outp += sprintf(outp, "%8d", p->package_id); 532 else 533 outp += sprintf(outp, " -"); 534 } 535 if (show_core) { 536 if (c) 537 outp += sprintf(outp, "%8d", c->core_id); 538 else 539 outp += sprintf(outp, " -"); 540 } 541 if (show_cpu) 542 outp += sprintf(outp, "%8d", t->cpu_id); 543 } 544 545 /* Avg_MHz */ 546 if (has_aperf) 547 outp += sprintf(outp, "%8.0f", 548 1.0 / units * t->aperf / interval_float); 549 550 /* Busy% */ 551 if (has_aperf) { 552 if (!skip_c0) 553 outp += sprintf(outp, "%8.2f", 100.0 * t->mperf/t->tsc/tsc_tweak); 554 else 555 outp += sprintf(outp, "********"); 556 } 557 558 /* Bzy_MHz */ 559 if (has_aperf) { 560 if (has_base_hz) 561 outp += sprintf(outp, "%8.0f", base_hz / units * t->aperf / t->mperf); 562 else 563 outp += sprintf(outp, "%8.0f", 564 1.0 * t->tsc / units * t->aperf / t->mperf / interval_float); 565 } 566 567 /* TSC_MHz */ 568 outp += sprintf(outp, "%8.0f", 1.0 * t->tsc/units/interval_float); 569 570 /* delta */ 571 if (extra_delta_offset32) 572 outp += sprintf(outp, " %11llu", t->extra_delta32); 573 574 /* DELTA */ 575 if (extra_delta_offset64) 576 outp += sprintf(outp, " %11llu", t->extra_delta64); 577 /* msr */ 578 if (extra_msr_offset32) 579 outp += sprintf(outp, " 0x%08llx", t->extra_msr32); 580 581 /* MSR */ 582 if (extra_msr_offset64) 583 outp += sprintf(outp, " 0x%016llx", t->extra_msr64); 584 585 if (!debug) 586 goto done; 587 588 /* IRQ */ 589 if (do_irq) 590 outp += sprintf(outp, "%8d", t->irq_count); 591 592 /* SMI */ 593 if (do_smi) 594 outp += sprintf(outp, "%8d", t->smi_count); 595 596 if (do_nhm_cstates) { 597 if (!skip_c1) 598 outp += sprintf(outp, "%8.2f", 100.0 * t->c1/t->tsc); 599 else 600 outp += sprintf(outp, "********"); 601 } 602 603 /* print per-core data only for 1st thread in core */ 604 if (!(t->flags & CPU_IS_FIRST_THREAD_IN_CORE)) 605 goto done; 606 607 if (do_nhm_cstates && !do_slm_cstates && !do_knl_cstates) 608 outp += sprintf(outp, "%8.2f", 100.0 * c->c3/t->tsc); 609 if (do_nhm_cstates) 610 outp += sprintf(outp, "%8.2f", 100.0 * c->c6/t->tsc); 611 if (do_snb_cstates) 612 outp += sprintf(outp, "%8.2f", 100.0 * c->c7/t->tsc); 613 614 if (do_dts) 615 outp += sprintf(outp, "%8d", c->core_temp_c); 616 617 /* print per-package data only for 1st core in package */ 618 if (!(t->flags & CPU_IS_FIRST_CORE_IN_PACKAGE)) 619 goto done; 620 621 /* PkgTmp */ 622 if (do_ptm) 623 outp += sprintf(outp, "%8d", p->pkg_temp_c); 624 625 /* GFXrc6 */ 626 if (do_gfx_rc6_ms) 627 outp += sprintf(outp, "%8.2f", 100.0 * p->gfx_rc6_ms / 1000.0 / interval_float); 628 629 /* GFXMHz */ 630 if (do_gfx_mhz) 631 outp += sprintf(outp, "%8d", p->gfx_mhz); 632 633 /* Totl%C0, Any%C0 GFX%C0 CPUGFX% */ 634 if (do_skl_residency) { 635 outp += sprintf(outp, "%8.2f", 100.0 * p->pkg_wtd_core_c0/t->tsc); 636 outp += sprintf(outp, "%8.2f", 100.0 * p->pkg_any_core_c0/t->tsc); 637 outp += sprintf(outp, "%8.2f", 100.0 * p->pkg_any_gfxe_c0/t->tsc); 638 outp += sprintf(outp, "%8.2f", 100.0 * p->pkg_both_core_gfxe_c0/t->tsc); 639 } 640 641 if (do_pc2) 642 outp += sprintf(outp, "%8.2f", 100.0 * p->pc2/t->tsc); 643 if (do_pc3) 644 outp += sprintf(outp, "%8.2f", 100.0 * p->pc3/t->tsc); 645 if (do_pc6) 646 outp += sprintf(outp, "%8.2f", 100.0 * p->pc6/t->tsc); 647 if (do_pc7) 648 outp += sprintf(outp, "%8.2f", 100.0 * p->pc7/t->tsc); 649 if (do_c8_c9_c10) { 650 outp += sprintf(outp, "%8.2f", 100.0 * p->pc8/t->tsc); 651 outp += sprintf(outp, "%8.2f", 100.0 * p->pc9/t->tsc); 652 outp += sprintf(outp, "%8.2f", 100.0 * p->pc10/t->tsc); 653 } 654 655 /* 656 * If measurement interval exceeds minimum RAPL Joule Counter range, 657 * indicate that results are suspect by printing "**" in fraction place. 658 */ 659 if (interval_float < rapl_joule_counter_range) 660 fmt8 = "%8.2f"; 661 else 662 fmt8 = " %6.0f**"; 663 664 if (do_rapl && !rapl_joules) { 665 if (do_rapl & RAPL_PKG) 666 outp += sprintf(outp, fmt8, p->energy_pkg * rapl_energy_units / interval_float); 667 if (do_rapl & RAPL_CORES) 668 outp += sprintf(outp, fmt8, p->energy_cores * rapl_energy_units / interval_float); 669 if (do_rapl & RAPL_GFX) 670 outp += sprintf(outp, fmt8, p->energy_gfx * rapl_energy_units / interval_float); 671 if (do_rapl & RAPL_DRAM) 672 outp += sprintf(outp, fmt8, p->energy_dram * rapl_dram_energy_units / interval_float); 673 if (do_rapl & RAPL_PKG_PERF_STATUS) 674 outp += sprintf(outp, fmt8, 100.0 * p->rapl_pkg_perf_status * rapl_time_units / interval_float); 675 if (do_rapl & RAPL_DRAM_PERF_STATUS) 676 outp += sprintf(outp, fmt8, 100.0 * p->rapl_dram_perf_status * rapl_time_units / interval_float); 677 } else if (do_rapl && rapl_joules) { 678 if (do_rapl & RAPL_PKG) 679 outp += sprintf(outp, fmt8, 680 p->energy_pkg * rapl_energy_units); 681 if (do_rapl & RAPL_CORES) 682 outp += sprintf(outp, fmt8, 683 p->energy_cores * rapl_energy_units); 684 if (do_rapl & RAPL_GFX) 685 outp += sprintf(outp, fmt8, 686 p->energy_gfx * rapl_energy_units); 687 if (do_rapl & RAPL_DRAM) 688 outp += sprintf(outp, fmt8, 689 p->energy_dram * rapl_dram_energy_units); 690 if (do_rapl & RAPL_PKG_PERF_STATUS) 691 outp += sprintf(outp, fmt8, 100.0 * p->rapl_pkg_perf_status * rapl_time_units / interval_float); 692 if (do_rapl & RAPL_DRAM_PERF_STATUS) 693 outp += sprintf(outp, fmt8, 100.0 * p->rapl_dram_perf_status * rapl_time_units / interval_float); 694 695 outp += sprintf(outp, fmt8, interval_float); 696 } 697 done: 698 outp += sprintf(outp, "\n"); 699 700 return 0; 701 } 702 703 void flush_output_stdout(void) 704 { 705 FILE *filep; 706 707 if (outf == stderr) 708 filep = stdout; 709 else 710 filep = outf; 711 712 fputs(output_buffer, filep); 713 fflush(filep); 714 715 outp = output_buffer; 716 } 717 void flush_output_stderr(void) 718 { 719 fputs(output_buffer, outf); 720 fflush(outf); 721 outp = output_buffer; 722 } 723 void format_all_counters(struct thread_data *t, struct core_data *c, struct pkg_data *p) 724 { 725 static int printed; 726 727 if (!printed || !summary_only) 728 print_header(); 729 730 if (topo.num_cpus > 1) 731 format_counters(&average.threads, &average.cores, 732 &average.packages); 733 734 printed = 1; 735 736 if (summary_only) 737 return; 738 739 for_all_cpus(format_counters, t, c, p); 740 } 741 742 #define DELTA_WRAP32(new, old) \ 743 if (new > old) { \ 744 old = new - old; \ 745 } else { \ 746 old = 0x100000000 + new - old; \ 747 } 748 749 void 750 delta_package(struct pkg_data *new, struct pkg_data *old) 751 { 752 753 if (do_skl_residency) { 754 old->pkg_wtd_core_c0 = new->pkg_wtd_core_c0 - old->pkg_wtd_core_c0; 755 old->pkg_any_core_c0 = new->pkg_any_core_c0 - old->pkg_any_core_c0; 756 old->pkg_any_gfxe_c0 = new->pkg_any_gfxe_c0 - old->pkg_any_gfxe_c0; 757 old->pkg_both_core_gfxe_c0 = new->pkg_both_core_gfxe_c0 - old->pkg_both_core_gfxe_c0; 758 } 759 old->pc2 = new->pc2 - old->pc2; 760 if (do_pc3) 761 old->pc3 = new->pc3 - old->pc3; 762 if (do_pc6) 763 old->pc6 = new->pc6 - old->pc6; 764 if (do_pc7) 765 old->pc7 = new->pc7 - old->pc7; 766 old->pc8 = new->pc8 - old->pc8; 767 old->pc9 = new->pc9 - old->pc9; 768 old->pc10 = new->pc10 - old->pc10; 769 old->pkg_temp_c = new->pkg_temp_c; 770 771 old->gfx_rc6_ms = new->gfx_rc6_ms - old->gfx_rc6_ms; 772 old->gfx_mhz = new->gfx_mhz; 773 774 DELTA_WRAP32(new->energy_pkg, old->energy_pkg); 775 DELTA_WRAP32(new->energy_cores, old->energy_cores); 776 DELTA_WRAP32(new->energy_gfx, old->energy_gfx); 777 DELTA_WRAP32(new->energy_dram, old->energy_dram); 778 DELTA_WRAP32(new->rapl_pkg_perf_status, old->rapl_pkg_perf_status); 779 DELTA_WRAP32(new->rapl_dram_perf_status, old->rapl_dram_perf_status); 780 } 781 782 void 783 delta_core(struct core_data *new, struct core_data *old) 784 { 785 old->c3 = new->c3 - old->c3; 786 old->c6 = new->c6 - old->c6; 787 old->c7 = new->c7 - old->c7; 788 old->core_temp_c = new->core_temp_c; 789 } 790 791 /* 792 * old = new - old 793 */ 794 void 795 delta_thread(struct thread_data *new, struct thread_data *old, 796 struct core_data *core_delta) 797 { 798 old->tsc = new->tsc - old->tsc; 799 800 /* check for TSC < 1 Mcycles over interval */ 801 if (old->tsc < (1000 * 1000)) 802 errx(-3, "Insanely slow TSC rate, TSC stops in idle?\n" 803 "You can disable all c-states by booting with \"idle=poll\"\n" 804 "or just the deep ones with \"processor.max_cstate=1\""); 805 806 old->c1 = new->c1 - old->c1; 807 808 if (has_aperf) { 809 if ((new->aperf > old->aperf) && (new->mperf > old->mperf)) { 810 old->aperf = new->aperf - old->aperf; 811 old->mperf = new->mperf - old->mperf; 812 } else { 813 814 if (!aperf_mperf_unstable) { 815 fprintf(outf, "%s: APERF or MPERF went backwards *\n", progname); 816 fprintf(outf, "* Frequency results do not cover entire interval *\n"); 817 fprintf(outf, "* fix this by running Linux-2.6.30 or later *\n"); 818 819 aperf_mperf_unstable = 1; 820 } 821 /* 822 * mperf delta is likely a huge "positive" number 823 * can not use it for calculating c0 time 824 */ 825 skip_c0 = 1; 826 skip_c1 = 1; 827 } 828 } 829 830 831 if (use_c1_residency_msr) { 832 /* 833 * Some models have a dedicated C1 residency MSR, 834 * which should be more accurate than the derivation below. 835 */ 836 } else { 837 /* 838 * As counter collection is not atomic, 839 * it is possible for mperf's non-halted cycles + idle states 840 * to exceed TSC's all cycles: show c1 = 0% in that case. 841 */ 842 if ((old->mperf + core_delta->c3 + core_delta->c6 + core_delta->c7) > old->tsc) 843 old->c1 = 0; 844 else { 845 /* normal case, derive c1 */ 846 old->c1 = old->tsc - old->mperf - core_delta->c3 847 - core_delta->c6 - core_delta->c7; 848 } 849 } 850 851 if (old->mperf == 0) { 852 if (debug > 1) 853 fprintf(outf, "cpu%d MPERF 0!\n", old->cpu_id); 854 old->mperf = 1; /* divide by 0 protection */ 855 } 856 857 old->extra_delta32 = new->extra_delta32 - old->extra_delta32; 858 old->extra_delta32 &= 0xFFFFFFFF; 859 860 old->extra_delta64 = new->extra_delta64 - old->extra_delta64; 861 862 /* 863 * Extra MSR is just a snapshot, simply copy latest w/o subtracting 864 */ 865 old->extra_msr32 = new->extra_msr32; 866 old->extra_msr64 = new->extra_msr64; 867 868 if (do_irq) 869 old->irq_count = new->irq_count - old->irq_count; 870 871 if (do_smi) 872 old->smi_count = new->smi_count - old->smi_count; 873 } 874 875 int delta_cpu(struct thread_data *t, struct core_data *c, 876 struct pkg_data *p, struct thread_data *t2, 877 struct core_data *c2, struct pkg_data *p2) 878 { 879 /* calculate core delta only for 1st thread in core */ 880 if (t->flags & CPU_IS_FIRST_THREAD_IN_CORE) 881 delta_core(c, c2); 882 883 /* always calculate thread delta */ 884 delta_thread(t, t2, c2); /* c2 is core delta */ 885 886 /* calculate package delta only for 1st core in package */ 887 if (t->flags & CPU_IS_FIRST_CORE_IN_PACKAGE) 888 delta_package(p, p2); 889 890 return 0; 891 } 892 893 void clear_counters(struct thread_data *t, struct core_data *c, struct pkg_data *p) 894 { 895 t->tsc = 0; 896 t->aperf = 0; 897 t->mperf = 0; 898 t->c1 = 0; 899 900 t->extra_delta32 = 0; 901 t->extra_delta64 = 0; 902 903 t->irq_count = 0; 904 t->smi_count = 0; 905 906 /* tells format_counters to dump all fields from this set */ 907 t->flags = CPU_IS_FIRST_THREAD_IN_CORE | CPU_IS_FIRST_CORE_IN_PACKAGE; 908 909 c->c3 = 0; 910 c->c6 = 0; 911 c->c7 = 0; 912 c->core_temp_c = 0; 913 914 p->pkg_wtd_core_c0 = 0; 915 p->pkg_any_core_c0 = 0; 916 p->pkg_any_gfxe_c0 = 0; 917 p->pkg_both_core_gfxe_c0 = 0; 918 919 p->pc2 = 0; 920 if (do_pc3) 921 p->pc3 = 0; 922 if (do_pc6) 923 p->pc6 = 0; 924 if (do_pc7) 925 p->pc7 = 0; 926 p->pc8 = 0; 927 p->pc9 = 0; 928 p->pc10 = 0; 929 930 p->energy_pkg = 0; 931 p->energy_dram = 0; 932 p->energy_cores = 0; 933 p->energy_gfx = 0; 934 p->rapl_pkg_perf_status = 0; 935 p->rapl_dram_perf_status = 0; 936 p->pkg_temp_c = 0; 937 938 p->gfx_rc6_ms = 0; 939 p->gfx_mhz = 0; 940 } 941 int sum_counters(struct thread_data *t, struct core_data *c, 942 struct pkg_data *p) 943 { 944 average.threads.tsc += t->tsc; 945 average.threads.aperf += t->aperf; 946 average.threads.mperf += t->mperf; 947 average.threads.c1 += t->c1; 948 949 average.threads.extra_delta32 += t->extra_delta32; 950 average.threads.extra_delta64 += t->extra_delta64; 951 952 average.threads.irq_count += t->irq_count; 953 average.threads.smi_count += t->smi_count; 954 955 /* sum per-core values only for 1st thread in core */ 956 if (!(t->flags & CPU_IS_FIRST_THREAD_IN_CORE)) 957 return 0; 958 959 average.cores.c3 += c->c3; 960 average.cores.c6 += c->c6; 961 average.cores.c7 += c->c7; 962 963 average.cores.core_temp_c = MAX(average.cores.core_temp_c, c->core_temp_c); 964 965 /* sum per-pkg values only for 1st core in pkg */ 966 if (!(t->flags & CPU_IS_FIRST_CORE_IN_PACKAGE)) 967 return 0; 968 969 if (do_skl_residency) { 970 average.packages.pkg_wtd_core_c0 += p->pkg_wtd_core_c0; 971 average.packages.pkg_any_core_c0 += p->pkg_any_core_c0; 972 average.packages.pkg_any_gfxe_c0 += p->pkg_any_gfxe_c0; 973 average.packages.pkg_both_core_gfxe_c0 += p->pkg_both_core_gfxe_c0; 974 } 975 976 average.packages.pc2 += p->pc2; 977 if (do_pc3) 978 average.packages.pc3 += p->pc3; 979 if (do_pc6) 980 average.packages.pc6 += p->pc6; 981 if (do_pc7) 982 average.packages.pc7 += p->pc7; 983 average.packages.pc8 += p->pc8; 984 average.packages.pc9 += p->pc9; 985 average.packages.pc10 += p->pc10; 986 987 average.packages.energy_pkg += p->energy_pkg; 988 average.packages.energy_dram += p->energy_dram; 989 average.packages.energy_cores += p->energy_cores; 990 average.packages.energy_gfx += p->energy_gfx; 991 992 average.packages.gfx_rc6_ms = p->gfx_rc6_ms; 993 average.packages.gfx_mhz = p->gfx_mhz; 994 995 average.packages.pkg_temp_c = MAX(average.packages.pkg_temp_c, p->pkg_temp_c); 996 997 average.packages.rapl_pkg_perf_status += p->rapl_pkg_perf_status; 998 average.packages.rapl_dram_perf_status += p->rapl_dram_perf_status; 999 return 0; 1000 } 1001 /* 1002 * sum the counters for all cpus in the system 1003 * compute the weighted average 1004 */ 1005 void compute_average(struct thread_data *t, struct core_data *c, 1006 struct pkg_data *p) 1007 { 1008 clear_counters(&average.threads, &average.cores, &average.packages); 1009 1010 for_all_cpus(sum_counters, t, c, p); 1011 1012 average.threads.tsc /= topo.num_cpus; 1013 average.threads.aperf /= topo.num_cpus; 1014 average.threads.mperf /= topo.num_cpus; 1015 average.threads.c1 /= topo.num_cpus; 1016 1017 average.threads.extra_delta32 /= topo.num_cpus; 1018 average.threads.extra_delta32 &= 0xFFFFFFFF; 1019 1020 average.threads.extra_delta64 /= topo.num_cpus; 1021 1022 average.cores.c3 /= topo.num_cores; 1023 average.cores.c6 /= topo.num_cores; 1024 average.cores.c7 /= topo.num_cores; 1025 1026 if (do_skl_residency) { 1027 average.packages.pkg_wtd_core_c0 /= topo.num_packages; 1028 average.packages.pkg_any_core_c0 /= topo.num_packages; 1029 average.packages.pkg_any_gfxe_c0 /= topo.num_packages; 1030 average.packages.pkg_both_core_gfxe_c0 /= topo.num_packages; 1031 } 1032 1033 average.packages.pc2 /= topo.num_packages; 1034 if (do_pc3) 1035 average.packages.pc3 /= topo.num_packages; 1036 if (do_pc6) 1037 average.packages.pc6 /= topo.num_packages; 1038 if (do_pc7) 1039 average.packages.pc7 /= topo.num_packages; 1040 1041 average.packages.pc8 /= topo.num_packages; 1042 average.packages.pc9 /= topo.num_packages; 1043 average.packages.pc10 /= topo.num_packages; 1044 } 1045 1046 static unsigned long long rdtsc(void) 1047 { 1048 unsigned int low, high; 1049 1050 asm volatile("rdtsc" : "=a" (low), "=d" (high)); 1051 1052 return low | ((unsigned long long)high) << 32; 1053 } 1054 1055 /* 1056 * get_counters(...) 1057 * migrate to cpu 1058 * acquire and record local counters for that cpu 1059 */ 1060 int get_counters(struct thread_data *t, struct core_data *c, struct pkg_data *p) 1061 { 1062 int cpu = t->cpu_id; 1063 unsigned long long msr; 1064 int aperf_mperf_retry_count = 0; 1065 1066 if (cpu_migrate(cpu)) { 1067 fprintf(outf, "Could not migrate to CPU %d\n", cpu); 1068 return -1; 1069 } 1070 1071 retry: 1072 t->tsc = rdtsc(); /* we are running on local CPU of interest */ 1073 1074 if (has_aperf) { 1075 unsigned long long tsc_before, tsc_between, tsc_after, aperf_time, mperf_time; 1076 1077 /* 1078 * The TSC, APERF and MPERF must be read together for 1079 * APERF/MPERF and MPERF/TSC to give accurate results. 1080 * 1081 * Unfortunately, APERF and MPERF are read by 1082 * individual system call, so delays may occur 1083 * between them. If the time to read them 1084 * varies by a large amount, we re-read them. 1085 */ 1086 1087 /* 1088 * This initial dummy APERF read has been seen to 1089 * reduce jitter in the subsequent reads. 1090 */ 1091 1092 if (get_msr(cpu, MSR_IA32_APERF, &t->aperf)) 1093 return -3; 1094 1095 t->tsc = rdtsc(); /* re-read close to APERF */ 1096 1097 tsc_before = t->tsc; 1098 1099 if (get_msr(cpu, MSR_IA32_APERF, &t->aperf)) 1100 return -3; 1101 1102 tsc_between = rdtsc(); 1103 1104 if (get_msr(cpu, MSR_IA32_MPERF, &t->mperf)) 1105 return -4; 1106 1107 tsc_after = rdtsc(); 1108 1109 aperf_time = tsc_between - tsc_before; 1110 mperf_time = tsc_after - tsc_between; 1111 1112 /* 1113 * If the system call latency to read APERF and MPERF 1114 * differ by more than 2x, then try again. 1115 */ 1116 if ((aperf_time > (2 * mperf_time)) || (mperf_time > (2 * aperf_time))) { 1117 aperf_mperf_retry_count++; 1118 if (aperf_mperf_retry_count < 5) 1119 goto retry; 1120 else 1121 warnx("cpu%d jitter %lld %lld", 1122 cpu, aperf_time, mperf_time); 1123 } 1124 aperf_mperf_retry_count = 0; 1125 1126 t->aperf = t->aperf * aperf_mperf_multiplier; 1127 t->mperf = t->mperf * aperf_mperf_multiplier; 1128 } 1129 1130 if (do_irq) 1131 t->irq_count = irqs_per_cpu[cpu]; 1132 if (do_smi) { 1133 if (get_msr(cpu, MSR_SMI_COUNT, &msr)) 1134 return -5; 1135 t->smi_count = msr & 0xFFFFFFFF; 1136 } 1137 if (extra_delta_offset32) { 1138 if (get_msr(cpu, extra_delta_offset32, &msr)) 1139 return -5; 1140 t->extra_delta32 = msr & 0xFFFFFFFF; 1141 } 1142 1143 if (extra_delta_offset64) 1144 if (get_msr(cpu, extra_delta_offset64, &t->extra_delta64)) 1145 return -5; 1146 1147 if (extra_msr_offset32) { 1148 if (get_msr(cpu, extra_msr_offset32, &msr)) 1149 return -5; 1150 t->extra_msr32 = msr & 0xFFFFFFFF; 1151 } 1152 1153 if (extra_msr_offset64) 1154 if (get_msr(cpu, extra_msr_offset64, &t->extra_msr64)) 1155 return -5; 1156 1157 if (use_c1_residency_msr) { 1158 if (get_msr(cpu, MSR_CORE_C1_RES, &t->c1)) 1159 return -6; 1160 } 1161 1162 /* collect core counters only for 1st thread in core */ 1163 if (!(t->flags & CPU_IS_FIRST_THREAD_IN_CORE)) 1164 return 0; 1165 1166 if (do_nhm_cstates && !do_slm_cstates && !do_knl_cstates) { 1167 if (get_msr(cpu, MSR_CORE_C3_RESIDENCY, &c->c3)) 1168 return -6; 1169 } 1170 1171 if (do_nhm_cstates && !do_knl_cstates) { 1172 if (get_msr(cpu, MSR_CORE_C6_RESIDENCY, &c->c6)) 1173 return -7; 1174 } else if (do_knl_cstates) { 1175 if (get_msr(cpu, MSR_KNL_CORE_C6_RESIDENCY, &c->c6)) 1176 return -7; 1177 } 1178 1179 if (do_snb_cstates) 1180 if (get_msr(cpu, MSR_CORE_C7_RESIDENCY, &c->c7)) 1181 return -8; 1182 1183 if (do_dts) { 1184 if (get_msr(cpu, MSR_IA32_THERM_STATUS, &msr)) 1185 return -9; 1186 c->core_temp_c = tcc_activation_temp - ((msr >> 16) & 0x7F); 1187 } 1188 1189 1190 /* collect package counters only for 1st core in package */ 1191 if (!(t->flags & CPU_IS_FIRST_CORE_IN_PACKAGE)) 1192 return 0; 1193 1194 if (do_skl_residency) { 1195 if (get_msr(cpu, MSR_PKG_WEIGHTED_CORE_C0_RES, &p->pkg_wtd_core_c0)) 1196 return -10; 1197 if (get_msr(cpu, MSR_PKG_ANY_CORE_C0_RES, &p->pkg_any_core_c0)) 1198 return -11; 1199 if (get_msr(cpu, MSR_PKG_ANY_GFXE_C0_RES, &p->pkg_any_gfxe_c0)) 1200 return -12; 1201 if (get_msr(cpu, MSR_PKG_BOTH_CORE_GFXE_C0_RES, &p->pkg_both_core_gfxe_c0)) 1202 return -13; 1203 } 1204 if (do_pc3) 1205 if (get_msr(cpu, MSR_PKG_C3_RESIDENCY, &p->pc3)) 1206 return -9; 1207 if (do_pc6) 1208 if (get_msr(cpu, MSR_PKG_C6_RESIDENCY, &p->pc6)) 1209 return -10; 1210 if (do_pc2) 1211 if (get_msr(cpu, MSR_PKG_C2_RESIDENCY, &p->pc2)) 1212 return -11; 1213 if (do_pc7) 1214 if (get_msr(cpu, MSR_PKG_C7_RESIDENCY, &p->pc7)) 1215 return -12; 1216 if (do_c8_c9_c10) { 1217 if (get_msr(cpu, MSR_PKG_C8_RESIDENCY, &p->pc8)) 1218 return -13; 1219 if (get_msr(cpu, MSR_PKG_C9_RESIDENCY, &p->pc9)) 1220 return -13; 1221 if (get_msr(cpu, MSR_PKG_C10_RESIDENCY, &p->pc10)) 1222 return -13; 1223 } 1224 if (do_rapl & RAPL_PKG) { 1225 if (get_msr(cpu, MSR_PKG_ENERGY_STATUS, &msr)) 1226 return -13; 1227 p->energy_pkg = msr & 0xFFFFFFFF; 1228 } 1229 if (do_rapl & RAPL_CORES) { 1230 if (get_msr(cpu, MSR_PP0_ENERGY_STATUS, &msr)) 1231 return -14; 1232 p->energy_cores = msr & 0xFFFFFFFF; 1233 } 1234 if (do_rapl & RAPL_DRAM) { 1235 if (get_msr(cpu, MSR_DRAM_ENERGY_STATUS, &msr)) 1236 return -15; 1237 p->energy_dram = msr & 0xFFFFFFFF; 1238 } 1239 if (do_rapl & RAPL_GFX) { 1240 if (get_msr(cpu, MSR_PP1_ENERGY_STATUS, &msr)) 1241 return -16; 1242 p->energy_gfx = msr & 0xFFFFFFFF; 1243 } 1244 if (do_rapl & RAPL_PKG_PERF_STATUS) { 1245 if (get_msr(cpu, MSR_PKG_PERF_STATUS, &msr)) 1246 return -16; 1247 p->rapl_pkg_perf_status = msr & 0xFFFFFFFF; 1248 } 1249 if (do_rapl & RAPL_DRAM_PERF_STATUS) { 1250 if (get_msr(cpu, MSR_DRAM_PERF_STATUS, &msr)) 1251 return -16; 1252 p->rapl_dram_perf_status = msr & 0xFFFFFFFF; 1253 } 1254 if (do_ptm) { 1255 if (get_msr(cpu, MSR_IA32_PACKAGE_THERM_STATUS, &msr)) 1256 return -17; 1257 p->pkg_temp_c = tcc_activation_temp - ((msr >> 16) & 0x7F); 1258 } 1259 1260 if (do_gfx_rc6_ms) 1261 p->gfx_rc6_ms = gfx_cur_rc6_ms; 1262 1263 if (do_gfx_mhz) 1264 p->gfx_mhz = gfx_cur_mhz; 1265 1266 return 0; 1267 } 1268 1269 /* 1270 * MSR_PKG_CST_CONFIG_CONTROL decoding for pkg_cstate_limit: 1271 * If you change the values, note they are used both in comparisons 1272 * (>= PCL__7) and to index pkg_cstate_limit_strings[]. 1273 */ 1274 1275 #define PCLUKN 0 /* Unknown */ 1276 #define PCLRSV 1 /* Reserved */ 1277 #define PCL__0 2 /* PC0 */ 1278 #define PCL__1 3 /* PC1 */ 1279 #define PCL__2 4 /* PC2 */ 1280 #define PCL__3 5 /* PC3 */ 1281 #define PCL__4 6 /* PC4 */ 1282 #define PCL__6 7 /* PC6 */ 1283 #define PCL_6N 8 /* PC6 No Retention */ 1284 #define PCL_6R 9 /* PC6 Retention */ 1285 #define PCL__7 10 /* PC7 */ 1286 #define PCL_7S 11 /* PC7 Shrink */ 1287 #define PCL__8 12 /* PC8 */ 1288 #define PCL__9 13 /* PC9 */ 1289 #define PCLUNL 14 /* Unlimited */ 1290 1291 int pkg_cstate_limit = PCLUKN; 1292 char *pkg_cstate_limit_strings[] = { "reserved", "unknown", "pc0", "pc1", "pc2", 1293 "pc3", "pc4", "pc6", "pc6n", "pc6r", "pc7", "pc7s", "pc8", "pc9", "unlimited"}; 1294 1295 int nhm_pkg_cstate_limits[16] = {PCL__0, PCL__1, PCL__3, PCL__6, PCL__7, PCLRSV, PCLRSV, PCLUNL, PCLRSV, PCLRSV, PCLRSV, PCLRSV, PCLRSV, PCLRSV, PCLRSV, PCLRSV}; 1296 int snb_pkg_cstate_limits[16] = {PCL__0, PCL__2, PCL_6N, PCL_6R, PCL__7, PCL_7S, PCLRSV, PCLUNL, PCLRSV, PCLRSV, PCLRSV, PCLRSV, PCLRSV, PCLRSV, PCLRSV, PCLRSV}; 1297 int hsw_pkg_cstate_limits[16] = {PCL__0, PCL__2, PCL__3, PCL__6, PCL__7, PCL_7S, PCL__8, PCL__9, PCLUNL, PCLRSV, PCLRSV, PCLRSV, PCLRSV, PCLRSV, PCLRSV, PCLRSV}; 1298 int slv_pkg_cstate_limits[16] = {PCL__0, PCL__1, PCLRSV, PCLRSV, PCL__4, PCLRSV, PCL__6, PCL__7, PCLRSV, PCLRSV, PCLRSV, PCLRSV, PCLRSV, PCLRSV, PCLRSV, PCLRSV}; 1299 int amt_pkg_cstate_limits[16] = {PCL__0, PCL__1, PCL__2, PCLRSV, PCLRSV, PCLRSV, PCL__6, PCL__7, PCLRSV, PCLRSV, PCLRSV, PCLRSV, PCLRSV, PCLRSV, PCLRSV, PCLRSV}; 1300 int phi_pkg_cstate_limits[16] = {PCL__0, PCL__2, PCL_6N, PCL_6R, PCLRSV, PCLRSV, PCLRSV, PCLUNL, PCLRSV, PCLRSV, PCLRSV, PCLRSV, PCLRSV, PCLRSV, PCLRSV, PCLRSV}; 1301 1302 1303 static void 1304 calculate_tsc_tweak() 1305 { 1306 tsc_tweak = base_hz / tsc_hz; 1307 } 1308 1309 static void 1310 dump_nhm_platform_info(void) 1311 { 1312 unsigned long long msr; 1313 unsigned int ratio; 1314 1315 get_msr(base_cpu, MSR_PLATFORM_INFO, &msr); 1316 1317 fprintf(outf, "cpu%d: MSR_PLATFORM_INFO: 0x%08llx\n", base_cpu, msr); 1318 1319 ratio = (msr >> 40) & 0xFF; 1320 fprintf(outf, "%d * %.0f = %.0f MHz max efficiency frequency\n", 1321 ratio, bclk, ratio * bclk); 1322 1323 ratio = (msr >> 8) & 0xFF; 1324 fprintf(outf, "%d * %.0f = %.0f MHz base frequency\n", 1325 ratio, bclk, ratio * bclk); 1326 1327 get_msr(base_cpu, MSR_IA32_POWER_CTL, &msr); 1328 fprintf(outf, "cpu%d: MSR_IA32_POWER_CTL: 0x%08llx (C1E auto-promotion: %sabled)\n", 1329 base_cpu, msr, msr & 0x2 ? "EN" : "DIS"); 1330 1331 return; 1332 } 1333 1334 static void 1335 dump_hsw_turbo_ratio_limits(void) 1336 { 1337 unsigned long long msr; 1338 unsigned int ratio; 1339 1340 get_msr(base_cpu, MSR_TURBO_RATIO_LIMIT2, &msr); 1341 1342 fprintf(outf, "cpu%d: MSR_TURBO_RATIO_LIMIT2: 0x%08llx\n", base_cpu, msr); 1343 1344 ratio = (msr >> 8) & 0xFF; 1345 if (ratio) 1346 fprintf(outf, "%d * %.0f = %.0f MHz max turbo 18 active cores\n", 1347 ratio, bclk, ratio * bclk); 1348 1349 ratio = (msr >> 0) & 0xFF; 1350 if (ratio) 1351 fprintf(outf, "%d * %.0f = %.0f MHz max turbo 17 active cores\n", 1352 ratio, bclk, ratio * bclk); 1353 return; 1354 } 1355 1356 static void 1357 dump_ivt_turbo_ratio_limits(void) 1358 { 1359 unsigned long long msr; 1360 unsigned int ratio; 1361 1362 get_msr(base_cpu, MSR_TURBO_RATIO_LIMIT1, &msr); 1363 1364 fprintf(outf, "cpu%d: MSR_TURBO_RATIO_LIMIT1: 0x%08llx\n", base_cpu, msr); 1365 1366 ratio = (msr >> 56) & 0xFF; 1367 if (ratio) 1368 fprintf(outf, "%d * %.0f = %.0f MHz max turbo 16 active cores\n", 1369 ratio, bclk, ratio * bclk); 1370 1371 ratio = (msr >> 48) & 0xFF; 1372 if (ratio) 1373 fprintf(outf, "%d * %.0f = %.0f MHz max turbo 15 active cores\n", 1374 ratio, bclk, ratio * bclk); 1375 1376 ratio = (msr >> 40) & 0xFF; 1377 if (ratio) 1378 fprintf(outf, "%d * %.0f = %.0f MHz max turbo 14 active cores\n", 1379 ratio, bclk, ratio * bclk); 1380 1381 ratio = (msr >> 32) & 0xFF; 1382 if (ratio) 1383 fprintf(outf, "%d * %.0f = %.0f MHz max turbo 13 active cores\n", 1384 ratio, bclk, ratio * bclk); 1385 1386 ratio = (msr >> 24) & 0xFF; 1387 if (ratio) 1388 fprintf(outf, "%d * %.0f = %.0f MHz max turbo 12 active cores\n", 1389 ratio, bclk, ratio * bclk); 1390 1391 ratio = (msr >> 16) & 0xFF; 1392 if (ratio) 1393 fprintf(outf, "%d * %.0f = %.0f MHz max turbo 11 active cores\n", 1394 ratio, bclk, ratio * bclk); 1395 1396 ratio = (msr >> 8) & 0xFF; 1397 if (ratio) 1398 fprintf(outf, "%d * %.0f = %.0f MHz max turbo 10 active cores\n", 1399 ratio, bclk, ratio * bclk); 1400 1401 ratio = (msr >> 0) & 0xFF; 1402 if (ratio) 1403 fprintf(outf, "%d * %.0f = %.0f MHz max turbo 9 active cores\n", 1404 ratio, bclk, ratio * bclk); 1405 return; 1406 } 1407 1408 static void 1409 dump_nhm_turbo_ratio_limits(void) 1410 { 1411 unsigned long long msr; 1412 unsigned int ratio; 1413 1414 get_msr(base_cpu, MSR_TURBO_RATIO_LIMIT, &msr); 1415 1416 fprintf(outf, "cpu%d: MSR_TURBO_RATIO_LIMIT: 0x%08llx\n", base_cpu, msr); 1417 1418 ratio = (msr >> 56) & 0xFF; 1419 if (ratio) 1420 fprintf(outf, "%d * %.0f = %.0f MHz max turbo 8 active cores\n", 1421 ratio, bclk, ratio * bclk); 1422 1423 ratio = (msr >> 48) & 0xFF; 1424 if (ratio) 1425 fprintf(outf, "%d * %.0f = %.0f MHz max turbo 7 active cores\n", 1426 ratio, bclk, ratio * bclk); 1427 1428 ratio = (msr >> 40) & 0xFF; 1429 if (ratio) 1430 fprintf(outf, "%d * %.0f = %.0f MHz max turbo 6 active cores\n", 1431 ratio, bclk, ratio * bclk); 1432 1433 ratio = (msr >> 32) & 0xFF; 1434 if (ratio) 1435 fprintf(outf, "%d * %.0f = %.0f MHz max turbo 5 active cores\n", 1436 ratio, bclk, ratio * bclk); 1437 1438 ratio = (msr >> 24) & 0xFF; 1439 if (ratio) 1440 fprintf(outf, "%d * %.0f = %.0f MHz max turbo 4 active cores\n", 1441 ratio, bclk, ratio * bclk); 1442 1443 ratio = (msr >> 16) & 0xFF; 1444 if (ratio) 1445 fprintf(outf, "%d * %.0f = %.0f MHz max turbo 3 active cores\n", 1446 ratio, bclk, ratio * bclk); 1447 1448 ratio = (msr >> 8) & 0xFF; 1449 if (ratio) 1450 fprintf(outf, "%d * %.0f = %.0f MHz max turbo 2 active cores\n", 1451 ratio, bclk, ratio * bclk); 1452 1453 ratio = (msr >> 0) & 0xFF; 1454 if (ratio) 1455 fprintf(outf, "%d * %.0f = %.0f MHz max turbo 1 active cores\n", 1456 ratio, bclk, ratio * bclk); 1457 return; 1458 } 1459 1460 static void 1461 dump_knl_turbo_ratio_limits(void) 1462 { 1463 const unsigned int buckets_no = 7; 1464 1465 unsigned long long msr; 1466 int delta_cores, delta_ratio; 1467 int i, b_nr; 1468 unsigned int cores[buckets_no]; 1469 unsigned int ratio[buckets_no]; 1470 1471 get_msr(base_cpu, MSR_NHM_TURBO_RATIO_LIMIT, &msr); 1472 1473 fprintf(outf, "cpu%d: MSR_TURBO_RATIO_LIMIT: 0x%08llx\n", 1474 base_cpu, msr); 1475 1476 /** 1477 * Turbo encoding in KNL is as follows: 1478 * [0] -- Reserved 1479 * [7:1] -- Base value of number of active cores of bucket 1. 1480 * [15:8] -- Base value of freq ratio of bucket 1. 1481 * [20:16] -- +ve delta of number of active cores of bucket 2. 1482 * i.e. active cores of bucket 2 = 1483 * active cores of bucket 1 + delta 1484 * [23:21] -- Negative delta of freq ratio of bucket 2. 1485 * i.e. freq ratio of bucket 2 = 1486 * freq ratio of bucket 1 - delta 1487 * [28:24]-- +ve delta of number of active cores of bucket 3. 1488 * [31:29]-- -ve delta of freq ratio of bucket 3. 1489 * [36:32]-- +ve delta of number of active cores of bucket 4. 1490 * [39:37]-- -ve delta of freq ratio of bucket 4. 1491 * [44:40]-- +ve delta of number of active cores of bucket 5. 1492 * [47:45]-- -ve delta of freq ratio of bucket 5. 1493 * [52:48]-- +ve delta of number of active cores of bucket 6. 1494 * [55:53]-- -ve delta of freq ratio of bucket 6. 1495 * [60:56]-- +ve delta of number of active cores of bucket 7. 1496 * [63:61]-- -ve delta of freq ratio of bucket 7. 1497 */ 1498 1499 b_nr = 0; 1500 cores[b_nr] = (msr & 0xFF) >> 1; 1501 ratio[b_nr] = (msr >> 8) & 0xFF; 1502 1503 for (i = 16; i < 64; i += 8) { 1504 delta_cores = (msr >> i) & 0x1F; 1505 delta_ratio = (msr >> (i + 5)) & 0x7; 1506 1507 cores[b_nr + 1] = cores[b_nr] + delta_cores; 1508 ratio[b_nr + 1] = ratio[b_nr] - delta_ratio; 1509 b_nr++; 1510 } 1511 1512 for (i = buckets_no - 1; i >= 0; i--) 1513 if (i > 0 ? ratio[i] != ratio[i - 1] : 1) 1514 fprintf(outf, 1515 "%d * %.0f = %.0f MHz max turbo %d active cores\n", 1516 ratio[i], bclk, ratio[i] * bclk, cores[i]); 1517 } 1518 1519 static void 1520 dump_nhm_cst_cfg(void) 1521 { 1522 unsigned long long msr; 1523 1524 get_msr(base_cpu, MSR_NHM_SNB_PKG_CST_CFG_CTL, &msr); 1525 1526 #define SNB_C1_AUTO_UNDEMOTE (1UL << 27) 1527 #define SNB_C3_AUTO_UNDEMOTE (1UL << 28) 1528 1529 fprintf(outf, "cpu%d: MSR_NHM_SNB_PKG_CST_CFG_CTL: 0x%08llx", base_cpu, msr); 1530 1531 fprintf(outf, " (%s%s%s%s%slocked: pkg-cstate-limit=%d: %s)\n", 1532 (msr & SNB_C3_AUTO_UNDEMOTE) ? "UNdemote-C3, " : "", 1533 (msr & SNB_C1_AUTO_UNDEMOTE) ? "UNdemote-C1, " : "", 1534 (msr & NHM_C3_AUTO_DEMOTE) ? "demote-C3, " : "", 1535 (msr & NHM_C1_AUTO_DEMOTE) ? "demote-C1, " : "", 1536 (msr & (1 << 15)) ? "" : "UN", 1537 (unsigned int)msr & 0xF, 1538 pkg_cstate_limit_strings[pkg_cstate_limit]); 1539 return; 1540 } 1541 1542 static void 1543 dump_config_tdp(void) 1544 { 1545 unsigned long long msr; 1546 1547 get_msr(base_cpu, MSR_CONFIG_TDP_NOMINAL, &msr); 1548 fprintf(outf, "cpu%d: MSR_CONFIG_TDP_NOMINAL: 0x%08llx", base_cpu, msr); 1549 fprintf(outf, " (base_ratio=%d)\n", (unsigned int)msr & 0xFF); 1550 1551 get_msr(base_cpu, MSR_CONFIG_TDP_LEVEL_1, &msr); 1552 fprintf(outf, "cpu%d: MSR_CONFIG_TDP_LEVEL_1: 0x%08llx (", base_cpu, msr); 1553 if (msr) { 1554 fprintf(outf, "PKG_MIN_PWR_LVL1=%d ", (unsigned int)(msr >> 48) & 0x7FFF); 1555 fprintf(outf, "PKG_MAX_PWR_LVL1=%d ", (unsigned int)(msr >> 32) & 0x7FFF); 1556 fprintf(outf, "LVL1_RATIO=%d ", (unsigned int)(msr >> 16) & 0xFF); 1557 fprintf(outf, "PKG_TDP_LVL1=%d", (unsigned int)(msr) & 0x7FFF); 1558 } 1559 fprintf(outf, ")\n"); 1560 1561 get_msr(base_cpu, MSR_CONFIG_TDP_LEVEL_2, &msr); 1562 fprintf(outf, "cpu%d: MSR_CONFIG_TDP_LEVEL_2: 0x%08llx (", base_cpu, msr); 1563 if (msr) { 1564 fprintf(outf, "PKG_MIN_PWR_LVL2=%d ", (unsigned int)(msr >> 48) & 0x7FFF); 1565 fprintf(outf, "PKG_MAX_PWR_LVL2=%d ", (unsigned int)(msr >> 32) & 0x7FFF); 1566 fprintf(outf, "LVL2_RATIO=%d ", (unsigned int)(msr >> 16) & 0xFF); 1567 fprintf(outf, "PKG_TDP_LVL2=%d", (unsigned int)(msr) & 0x7FFF); 1568 } 1569 fprintf(outf, ")\n"); 1570 1571 get_msr(base_cpu, MSR_CONFIG_TDP_CONTROL, &msr); 1572 fprintf(outf, "cpu%d: MSR_CONFIG_TDP_CONTROL: 0x%08llx (", base_cpu, msr); 1573 if ((msr) & 0x3) 1574 fprintf(outf, "TDP_LEVEL=%d ", (unsigned int)(msr) & 0x3); 1575 fprintf(outf, " lock=%d", (unsigned int)(msr >> 31) & 1); 1576 fprintf(outf, ")\n"); 1577 1578 get_msr(base_cpu, MSR_TURBO_ACTIVATION_RATIO, &msr); 1579 fprintf(outf, "cpu%d: MSR_TURBO_ACTIVATION_RATIO: 0x%08llx (", base_cpu, msr); 1580 fprintf(outf, "MAX_NON_TURBO_RATIO=%d", (unsigned int)(msr) & 0xFF); 1581 fprintf(outf, " lock=%d", (unsigned int)(msr >> 31) & 1); 1582 fprintf(outf, ")\n"); 1583 } 1584 1585 unsigned int irtl_time_units[] = {1, 32, 1024, 32768, 1048576, 33554432, 0, 0 }; 1586 1587 void print_irtl(void) 1588 { 1589 unsigned long long msr; 1590 1591 get_msr(base_cpu, MSR_PKGC3_IRTL, &msr); 1592 fprintf(outf, "cpu%d: MSR_PKGC3_IRTL: 0x%08llx (", base_cpu, msr); 1593 fprintf(outf, "%svalid, %lld ns)\n", msr & (1 << 15) ? "" : "NOT", 1594 (msr & 0x3FF) * irtl_time_units[(msr >> 10) & 0x3]); 1595 1596 get_msr(base_cpu, MSR_PKGC6_IRTL, &msr); 1597 fprintf(outf, "cpu%d: MSR_PKGC6_IRTL: 0x%08llx (", base_cpu, msr); 1598 fprintf(outf, "%svalid, %lld ns)\n", msr & (1 << 15) ? "" : "NOT", 1599 (msr & 0x3FF) * irtl_time_units[(msr >> 10) & 0x3]); 1600 1601 get_msr(base_cpu, MSR_PKGC7_IRTL, &msr); 1602 fprintf(outf, "cpu%d: MSR_PKGC7_IRTL: 0x%08llx (", base_cpu, msr); 1603 fprintf(outf, "%svalid, %lld ns)\n", msr & (1 << 15) ? "" : "NOT", 1604 (msr & 0x3FF) * irtl_time_units[(msr >> 10) & 0x3]); 1605 1606 if (!do_irtl_hsw) 1607 return; 1608 1609 get_msr(base_cpu, MSR_PKGC8_IRTL, &msr); 1610 fprintf(outf, "cpu%d: MSR_PKGC8_IRTL: 0x%08llx (", base_cpu, msr); 1611 fprintf(outf, "%svalid, %lld ns)\n", msr & (1 << 15) ? "" : "NOT", 1612 (msr & 0x3FF) * irtl_time_units[(msr >> 10) & 0x3]); 1613 1614 get_msr(base_cpu, MSR_PKGC9_IRTL, &msr); 1615 fprintf(outf, "cpu%d: MSR_PKGC9_IRTL: 0x%08llx (", base_cpu, msr); 1616 fprintf(outf, "%svalid, %lld ns)\n", msr & (1 << 15) ? "" : "NOT", 1617 (msr & 0x3FF) * irtl_time_units[(msr >> 10) & 0x3]); 1618 1619 get_msr(base_cpu, MSR_PKGC10_IRTL, &msr); 1620 fprintf(outf, "cpu%d: MSR_PKGC10_IRTL: 0x%08llx (", base_cpu, msr); 1621 fprintf(outf, "%svalid, %lld ns)\n", msr & (1 << 15) ? "" : "NOT", 1622 (msr & 0x3FF) * irtl_time_units[(msr >> 10) & 0x3]); 1623 1624 } 1625 void free_fd_percpu(void) 1626 { 1627 int i; 1628 1629 for (i = 0; i < topo.max_cpu_num; ++i) { 1630 if (fd_percpu[i] != 0) 1631 close(fd_percpu[i]); 1632 } 1633 1634 free(fd_percpu); 1635 } 1636 1637 void free_all_buffers(void) 1638 { 1639 CPU_FREE(cpu_present_set); 1640 cpu_present_set = NULL; 1641 cpu_present_setsize = 0; 1642 1643 CPU_FREE(cpu_affinity_set); 1644 cpu_affinity_set = NULL; 1645 cpu_affinity_setsize = 0; 1646 1647 free(thread_even); 1648 free(core_even); 1649 free(package_even); 1650 1651 thread_even = NULL; 1652 core_even = NULL; 1653 package_even = NULL; 1654 1655 free(thread_odd); 1656 free(core_odd); 1657 free(package_odd); 1658 1659 thread_odd = NULL; 1660 core_odd = NULL; 1661 package_odd = NULL; 1662 1663 free(output_buffer); 1664 output_buffer = NULL; 1665 outp = NULL; 1666 1667 free_fd_percpu(); 1668 1669 free(irq_column_2_cpu); 1670 free(irqs_per_cpu); 1671 } 1672 1673 /* 1674 * Open a file, and exit on failure 1675 */ 1676 FILE *fopen_or_die(const char *path, const char *mode) 1677 { 1678 FILE *filep = fopen(path, mode); 1679 if (!filep) 1680 err(1, "%s: open failed", path); 1681 return filep; 1682 } 1683 1684 /* 1685 * Parse a file containing a single int. 1686 */ 1687 int parse_int_file(const char *fmt, ...) 1688 { 1689 va_list args; 1690 char path[PATH_MAX]; 1691 FILE *filep; 1692 int value; 1693 1694 va_start(args, fmt); 1695 vsnprintf(path, sizeof(path), fmt, args); 1696 va_end(args); 1697 filep = fopen_or_die(path, "r"); 1698 if (fscanf(filep, "%d", &value) != 1) 1699 err(1, "%s: failed to parse number from file", path); 1700 fclose(filep); 1701 return value; 1702 } 1703 1704 /* 1705 * get_cpu_position_in_core(cpu) 1706 * return the position of the CPU among its HT siblings in the core 1707 * return -1 if the sibling is not in list 1708 */ 1709 int get_cpu_position_in_core(int cpu) 1710 { 1711 char path[64]; 1712 FILE *filep; 1713 int this_cpu; 1714 char character; 1715 int i; 1716 1717 sprintf(path, 1718 "/sys/devices/system/cpu/cpu%d/topology/thread_siblings_list", 1719 cpu); 1720 filep = fopen(path, "r"); 1721 if (filep == NULL) { 1722 perror(path); 1723 exit(1); 1724 } 1725 1726 for (i = 0; i < topo.num_threads_per_core; i++) { 1727 fscanf(filep, "%d", &this_cpu); 1728 if (this_cpu == cpu) { 1729 fclose(filep); 1730 return i; 1731 } 1732 1733 /* Account for no separator after last thread*/ 1734 if (i != (topo.num_threads_per_core - 1)) 1735 fscanf(filep, "%c", &character); 1736 } 1737 1738 fclose(filep); 1739 return -1; 1740 } 1741 1742 /* 1743 * cpu_is_first_core_in_package(cpu) 1744 * return 1 if given CPU is 1st core in package 1745 */ 1746 int cpu_is_first_core_in_package(int cpu) 1747 { 1748 return cpu == parse_int_file("/sys/devices/system/cpu/cpu%d/topology/core_siblings_list", cpu); 1749 } 1750 1751 int get_physical_package_id(int cpu) 1752 { 1753 return parse_int_file("/sys/devices/system/cpu/cpu%d/topology/physical_package_id", cpu); 1754 } 1755 1756 int get_core_id(int cpu) 1757 { 1758 return parse_int_file("/sys/devices/system/cpu/cpu%d/topology/core_id", cpu); 1759 } 1760 1761 int get_num_ht_siblings(int cpu) 1762 { 1763 char path[80]; 1764 FILE *filep; 1765 int sib1; 1766 int matches = 0; 1767 char character; 1768 char str[100]; 1769 char *ch; 1770 1771 sprintf(path, "/sys/devices/system/cpu/cpu%d/topology/thread_siblings_list", cpu); 1772 filep = fopen_or_die(path, "r"); 1773 1774 /* 1775 * file format: 1776 * A ',' separated or '-' separated set of numbers 1777 * (eg 1-2 or 1,3,4,5) 1778 */ 1779 fscanf(filep, "%d%c\n", &sib1, &character); 1780 fseek(filep, 0, SEEK_SET); 1781 fgets(str, 100, filep); 1782 ch = strchr(str, character); 1783 while (ch != NULL) { 1784 matches++; 1785 ch = strchr(ch+1, character); 1786 } 1787 1788 fclose(filep); 1789 return matches+1; 1790 } 1791 1792 /* 1793 * run func(thread, core, package) in topology order 1794 * skip non-present cpus 1795 */ 1796 1797 int for_all_cpus_2(int (func)(struct thread_data *, struct core_data *, 1798 struct pkg_data *, struct thread_data *, struct core_data *, 1799 struct pkg_data *), struct thread_data *thread_base, 1800 struct core_data *core_base, struct pkg_data *pkg_base, 1801 struct thread_data *thread_base2, struct core_data *core_base2, 1802 struct pkg_data *pkg_base2) 1803 { 1804 int retval, pkg_no, core_no, thread_no; 1805 1806 for (pkg_no = 0; pkg_no < topo.num_packages; ++pkg_no) { 1807 for (core_no = 0; core_no < topo.num_cores_per_pkg; ++core_no) { 1808 for (thread_no = 0; thread_no < 1809 topo.num_threads_per_core; ++thread_no) { 1810 struct thread_data *t, *t2; 1811 struct core_data *c, *c2; 1812 struct pkg_data *p, *p2; 1813 1814 t = GET_THREAD(thread_base, thread_no, core_no, pkg_no); 1815 1816 if (cpu_is_not_present(t->cpu_id)) 1817 continue; 1818 1819 t2 = GET_THREAD(thread_base2, thread_no, core_no, pkg_no); 1820 1821 c = GET_CORE(core_base, core_no, pkg_no); 1822 c2 = GET_CORE(core_base2, core_no, pkg_no); 1823 1824 p = GET_PKG(pkg_base, pkg_no); 1825 p2 = GET_PKG(pkg_base2, pkg_no); 1826 1827 retval = func(t, c, p, t2, c2, p2); 1828 if (retval) 1829 return retval; 1830 } 1831 } 1832 } 1833 return 0; 1834 } 1835 1836 /* 1837 * run func(cpu) on every cpu in /proc/stat 1838 * return max_cpu number 1839 */ 1840 int for_all_proc_cpus(int (func)(int)) 1841 { 1842 FILE *fp; 1843 int cpu_num; 1844 int retval; 1845 1846 fp = fopen_or_die(proc_stat, "r"); 1847 1848 retval = fscanf(fp, "cpu %*d %*d %*d %*d %*d %*d %*d %*d %*d %*d\n"); 1849 if (retval != 0) 1850 err(1, "%s: failed to parse format", proc_stat); 1851 1852 while (1) { 1853 retval = fscanf(fp, "cpu%u %*d %*d %*d %*d %*d %*d %*d %*d %*d %*d\n", &cpu_num); 1854 if (retval != 1) 1855 break; 1856 1857 retval = func(cpu_num); 1858 if (retval) { 1859 fclose(fp); 1860 return(retval); 1861 } 1862 } 1863 fclose(fp); 1864 return 0; 1865 } 1866 1867 void re_initialize(void) 1868 { 1869 free_all_buffers(); 1870 setup_all_buffers(); 1871 printf("turbostat: re-initialized with num_cpus %d\n", topo.num_cpus); 1872 } 1873 1874 1875 /* 1876 * count_cpus() 1877 * remember the last one seen, it will be the max 1878 */ 1879 int count_cpus(int cpu) 1880 { 1881 if (topo.max_cpu_num < cpu) 1882 topo.max_cpu_num = cpu; 1883 1884 topo.num_cpus += 1; 1885 return 0; 1886 } 1887 int mark_cpu_present(int cpu) 1888 { 1889 CPU_SET_S(cpu, cpu_present_setsize, cpu_present_set); 1890 return 0; 1891 } 1892 1893 /* 1894 * snapshot_proc_interrupts() 1895 * 1896 * read and record summary of /proc/interrupts 1897 * 1898 * return 1 if config change requires a restart, else return 0 1899 */ 1900 int snapshot_proc_interrupts(void) 1901 { 1902 static FILE *fp; 1903 int column, retval; 1904 1905 if (fp == NULL) 1906 fp = fopen_or_die("/proc/interrupts", "r"); 1907 else 1908 rewind(fp); 1909 1910 /* read 1st line of /proc/interrupts to get cpu* name for each column */ 1911 for (column = 0; column < topo.num_cpus; ++column) { 1912 int cpu_number; 1913 1914 retval = fscanf(fp, " CPU%d", &cpu_number); 1915 if (retval != 1) 1916 break; 1917 1918 if (cpu_number > topo.max_cpu_num) { 1919 warn("/proc/interrupts: cpu%d: > %d", cpu_number, topo.max_cpu_num); 1920 return 1; 1921 } 1922 1923 irq_column_2_cpu[column] = cpu_number; 1924 irqs_per_cpu[cpu_number] = 0; 1925 } 1926 1927 /* read /proc/interrupt count lines and sum up irqs per cpu */ 1928 while (1) { 1929 int column; 1930 char buf[64]; 1931 1932 retval = fscanf(fp, " %s:", buf); /* flush irq# "N:" */ 1933 if (retval != 1) 1934 break; 1935 1936 /* read the count per cpu */ 1937 for (column = 0; column < topo.num_cpus; ++column) { 1938 1939 int cpu_number, irq_count; 1940 1941 retval = fscanf(fp, " %d", &irq_count); 1942 if (retval != 1) 1943 break; 1944 1945 cpu_number = irq_column_2_cpu[column]; 1946 irqs_per_cpu[cpu_number] += irq_count; 1947 1948 } 1949 1950 while (getc(fp) != '\n') 1951 ; /* flush interrupt description */ 1952 1953 } 1954 return 0; 1955 } 1956 /* 1957 * snapshot_gfx_rc6_ms() 1958 * 1959 * record snapshot of 1960 * /sys/class/drm/card0/power/rc6_residency_ms 1961 * 1962 * return 1 if config change requires a restart, else return 0 1963 */ 1964 int snapshot_gfx_rc6_ms(void) 1965 { 1966 FILE *fp; 1967 int retval; 1968 1969 fp = fopen_or_die("/sys/class/drm/card0/power/rc6_residency_ms", "r"); 1970 1971 retval = fscanf(fp, "%lld", &gfx_cur_rc6_ms); 1972 if (retval != 1) 1973 err(1, "GFX rc6"); 1974 1975 fclose(fp); 1976 1977 return 0; 1978 } 1979 /* 1980 * snapshot_gfx_mhz() 1981 * 1982 * record snapshot of 1983 * /sys/class/graphics/fb0/device/drm/card0/gt_cur_freq_mhz 1984 * 1985 * return 1 if config change requires a restart, else return 0 1986 */ 1987 int snapshot_gfx_mhz(void) 1988 { 1989 static FILE *fp; 1990 int retval; 1991 1992 if (fp == NULL) 1993 fp = fopen_or_die("/sys/class/graphics/fb0/device/drm/card0/gt_cur_freq_mhz", "r"); 1994 else 1995 rewind(fp); 1996 1997 retval = fscanf(fp, "%d", &gfx_cur_mhz); 1998 if (retval != 1) 1999 err(1, "GFX MHz"); 2000 2001 return 0; 2002 } 2003 2004 /* 2005 * snapshot /proc and /sys files 2006 * 2007 * return 1 if configuration restart needed, else return 0 2008 */ 2009 int snapshot_proc_sysfs_files(void) 2010 { 2011 if (snapshot_proc_interrupts()) 2012 return 1; 2013 2014 if (do_gfx_rc6_ms) 2015 snapshot_gfx_rc6_ms(); 2016 2017 if (do_gfx_mhz) 2018 snapshot_gfx_mhz(); 2019 2020 return 0; 2021 } 2022 2023 void turbostat_loop() 2024 { 2025 int retval; 2026 int restarted = 0; 2027 2028 restart: 2029 restarted++; 2030 2031 snapshot_proc_sysfs_files(); 2032 retval = for_all_cpus(get_counters, EVEN_COUNTERS); 2033 if (retval < -1) { 2034 exit(retval); 2035 } else if (retval == -1) { 2036 if (restarted > 1) { 2037 exit(retval); 2038 } 2039 re_initialize(); 2040 goto restart; 2041 } 2042 restarted = 0; 2043 gettimeofday(&tv_even, (struct timezone *)NULL); 2044 2045 while (1) { 2046 if (for_all_proc_cpus(cpu_is_not_present)) { 2047 re_initialize(); 2048 goto restart; 2049 } 2050 nanosleep(&interval_ts, NULL); 2051 if (snapshot_proc_sysfs_files()) 2052 goto restart; 2053 retval = for_all_cpus(get_counters, ODD_COUNTERS); 2054 if (retval < -1) { 2055 exit(retval); 2056 } else if (retval == -1) { 2057 re_initialize(); 2058 goto restart; 2059 } 2060 gettimeofday(&tv_odd, (struct timezone *)NULL); 2061 timersub(&tv_odd, &tv_even, &tv_delta); 2062 for_all_cpus_2(delta_cpu, ODD_COUNTERS, EVEN_COUNTERS); 2063 compute_average(EVEN_COUNTERS); 2064 format_all_counters(EVEN_COUNTERS); 2065 flush_output_stdout(); 2066 nanosleep(&interval_ts, NULL); 2067 if (snapshot_proc_sysfs_files()) 2068 goto restart; 2069 retval = for_all_cpus(get_counters, EVEN_COUNTERS); 2070 if (retval < -1) { 2071 exit(retval); 2072 } else if (retval == -1) { 2073 re_initialize(); 2074 goto restart; 2075 } 2076 gettimeofday(&tv_even, (struct timezone *)NULL); 2077 timersub(&tv_even, &tv_odd, &tv_delta); 2078 for_all_cpus_2(delta_cpu, EVEN_COUNTERS, ODD_COUNTERS); 2079 compute_average(ODD_COUNTERS); 2080 format_all_counters(ODD_COUNTERS); 2081 flush_output_stdout(); 2082 } 2083 } 2084 2085 void check_dev_msr() 2086 { 2087 struct stat sb; 2088 char pathname[32]; 2089 2090 sprintf(pathname, "/dev/cpu/%d/msr", base_cpu); 2091 if (stat(pathname, &sb)) 2092 if (system("/sbin/modprobe msr > /dev/null 2>&1")) 2093 err(-5, "no /dev/cpu/0/msr, Try \"# modprobe msr\" "); 2094 } 2095 2096 void check_permissions() 2097 { 2098 struct __user_cap_header_struct cap_header_data; 2099 cap_user_header_t cap_header = &cap_header_data; 2100 struct __user_cap_data_struct cap_data_data; 2101 cap_user_data_t cap_data = &cap_data_data; 2102 extern int capget(cap_user_header_t hdrp, cap_user_data_t datap); 2103 int do_exit = 0; 2104 char pathname[32]; 2105 2106 /* check for CAP_SYS_RAWIO */ 2107 cap_header->pid = getpid(); 2108 cap_header->version = _LINUX_CAPABILITY_VERSION; 2109 if (capget(cap_header, cap_data) < 0) 2110 err(-6, "capget(2) failed"); 2111 2112 if ((cap_data->effective & (1 << CAP_SYS_RAWIO)) == 0) { 2113 do_exit++; 2114 warnx("capget(CAP_SYS_RAWIO) failed," 2115 " try \"# setcap cap_sys_rawio=ep %s\"", progname); 2116 } 2117 2118 /* test file permissions */ 2119 sprintf(pathname, "/dev/cpu/%d/msr", base_cpu); 2120 if (euidaccess(pathname, R_OK)) { 2121 do_exit++; 2122 warn("/dev/cpu/0/msr open failed, try chown or chmod +r /dev/cpu/*/msr"); 2123 } 2124 2125 /* if all else fails, thell them to be root */ 2126 if (do_exit) 2127 if (getuid() != 0) 2128 warnx("... or simply run as root"); 2129 2130 if (do_exit) 2131 exit(-6); 2132 } 2133 2134 /* 2135 * NHM adds support for additional MSRs: 2136 * 2137 * MSR_SMI_COUNT 0x00000034 2138 * 2139 * MSR_PLATFORM_INFO 0x000000ce 2140 * MSR_NHM_SNB_PKG_CST_CFG_CTL 0x000000e2 2141 * 2142 * MSR_PKG_C3_RESIDENCY 0x000003f8 2143 * MSR_PKG_C6_RESIDENCY 0x000003f9 2144 * MSR_CORE_C3_RESIDENCY 0x000003fc 2145 * MSR_CORE_C6_RESIDENCY 0x000003fd 2146 * 2147 * Side effect: 2148 * sets global pkg_cstate_limit to decode MSR_NHM_SNB_PKG_CST_CFG_CTL 2149 */ 2150 int probe_nhm_msrs(unsigned int family, unsigned int model) 2151 { 2152 unsigned long long msr; 2153 unsigned int base_ratio; 2154 int *pkg_cstate_limits; 2155 2156 if (!genuine_intel) 2157 return 0; 2158 2159 if (family != 6) 2160 return 0; 2161 2162 bclk = discover_bclk(family, model); 2163 2164 switch (model) { 2165 case 0x1A: /* Core i7, Xeon 5500 series - Bloomfield, Gainstown NHM-EP */ 2166 case 0x1E: /* Core i7 and i5 Processor - Clarksfield, Lynnfield, Jasper Forest */ 2167 case 0x1F: /* Core i7 and i5 Processor - Nehalem */ 2168 case 0x25: /* Westmere Client - Clarkdale, Arrandale */ 2169 case 0x2C: /* Westmere EP - Gulftown */ 2170 case 0x2E: /* Nehalem-EX Xeon - Beckton */ 2171 case 0x2F: /* Westmere-EX Xeon - Eagleton */ 2172 pkg_cstate_limits = nhm_pkg_cstate_limits; 2173 break; 2174 case 0x2A: /* SNB */ 2175 case 0x2D: /* SNB Xeon */ 2176 case 0x3A: /* IVB */ 2177 case 0x3E: /* IVB Xeon */ 2178 pkg_cstate_limits = snb_pkg_cstate_limits; 2179 break; 2180 case 0x3C: /* HSW */ 2181 case 0x3F: /* HSX */ 2182 case 0x45: /* HSW */ 2183 case 0x46: /* HSW */ 2184 case 0x3D: /* BDW */ 2185 case 0x47: /* BDW */ 2186 case 0x4F: /* BDX */ 2187 case 0x56: /* BDX-DE */ 2188 case 0x4E: /* SKL */ 2189 case 0x5E: /* SKL */ 2190 pkg_cstate_limits = hsw_pkg_cstate_limits; 2191 break; 2192 case 0x37: /* BYT */ 2193 case 0x4D: /* AVN */ 2194 pkg_cstate_limits = slv_pkg_cstate_limits; 2195 break; 2196 case 0x4C: /* AMT */ 2197 pkg_cstate_limits = amt_pkg_cstate_limits; 2198 break; 2199 case 0x57: /* PHI */ 2200 pkg_cstate_limits = phi_pkg_cstate_limits; 2201 break; 2202 default: 2203 return 0; 2204 } 2205 get_msr(base_cpu, MSR_NHM_SNB_PKG_CST_CFG_CTL, &msr); 2206 pkg_cstate_limit = pkg_cstate_limits[msr & 0xF]; 2207 2208 get_msr(base_cpu, MSR_PLATFORM_INFO, &msr); 2209 base_ratio = (msr >> 8) & 0xFF; 2210 2211 base_hz = base_ratio * bclk * 1000000; 2212 has_base_hz = 1; 2213 return 1; 2214 } 2215 int has_nhm_turbo_ratio_limit(unsigned int family, unsigned int model) 2216 { 2217 switch (model) { 2218 /* Nehalem compatible, but do not include turbo-ratio limit support */ 2219 case 0x2E: /* Nehalem-EX Xeon - Beckton */ 2220 case 0x2F: /* Westmere-EX Xeon - Eagleton */ 2221 case 0x57: /* PHI - Knights Landing (different MSR definition) */ 2222 return 0; 2223 default: 2224 return 1; 2225 } 2226 } 2227 int has_ivt_turbo_ratio_limit(unsigned int family, unsigned int model) 2228 { 2229 if (!genuine_intel) 2230 return 0; 2231 2232 if (family != 6) 2233 return 0; 2234 2235 switch (model) { 2236 case 0x3E: /* IVB Xeon */ 2237 case 0x3F: /* HSW Xeon */ 2238 return 1; 2239 default: 2240 return 0; 2241 } 2242 } 2243 int has_hsw_turbo_ratio_limit(unsigned int family, unsigned int model) 2244 { 2245 if (!genuine_intel) 2246 return 0; 2247 2248 if (family != 6) 2249 return 0; 2250 2251 switch (model) { 2252 case 0x3F: /* HSW Xeon */ 2253 return 1; 2254 default: 2255 return 0; 2256 } 2257 } 2258 2259 int has_knl_turbo_ratio_limit(unsigned int family, unsigned int model) 2260 { 2261 if (!genuine_intel) 2262 return 0; 2263 2264 if (family != 6) 2265 return 0; 2266 2267 switch (model) { 2268 case 0x57: /* Knights Landing */ 2269 return 1; 2270 default: 2271 return 0; 2272 } 2273 } 2274 int has_config_tdp(unsigned int family, unsigned int model) 2275 { 2276 if (!genuine_intel) 2277 return 0; 2278 2279 if (family != 6) 2280 return 0; 2281 2282 switch (model) { 2283 case 0x3A: /* IVB */ 2284 case 0x3C: /* HSW */ 2285 case 0x3F: /* HSX */ 2286 case 0x45: /* HSW */ 2287 case 0x46: /* HSW */ 2288 case 0x3D: /* BDW */ 2289 case 0x47: /* BDW */ 2290 case 0x4F: /* BDX */ 2291 case 0x56: /* BDX-DE */ 2292 case 0x4E: /* SKL */ 2293 case 0x5E: /* SKL */ 2294 2295 case 0x57: /* Knights Landing */ 2296 return 1; 2297 default: 2298 return 0; 2299 } 2300 } 2301 2302 static void 2303 dump_cstate_pstate_config_info(int family, int model) 2304 { 2305 if (!do_nhm_platform_info) 2306 return; 2307 2308 dump_nhm_platform_info(); 2309 2310 if (has_hsw_turbo_ratio_limit(family, model)) 2311 dump_hsw_turbo_ratio_limits(); 2312 2313 if (has_ivt_turbo_ratio_limit(family, model)) 2314 dump_ivt_turbo_ratio_limits(); 2315 2316 if (has_nhm_turbo_ratio_limit(family, model)) 2317 dump_nhm_turbo_ratio_limits(); 2318 2319 if (has_knl_turbo_ratio_limit(family, model)) 2320 dump_knl_turbo_ratio_limits(); 2321 2322 if (has_config_tdp(family, model)) 2323 dump_config_tdp(); 2324 2325 dump_nhm_cst_cfg(); 2326 } 2327 2328 2329 /* 2330 * print_epb() 2331 * Decode the ENERGY_PERF_BIAS MSR 2332 */ 2333 int print_epb(struct thread_data *t, struct core_data *c, struct pkg_data *p) 2334 { 2335 unsigned long long msr; 2336 char *epb_string; 2337 int cpu; 2338 2339 if (!has_epb) 2340 return 0; 2341 2342 cpu = t->cpu_id; 2343 2344 /* EPB is per-package */ 2345 if (!(t->flags & CPU_IS_FIRST_THREAD_IN_CORE) || !(t->flags & CPU_IS_FIRST_CORE_IN_PACKAGE)) 2346 return 0; 2347 2348 if (cpu_migrate(cpu)) { 2349 fprintf(outf, "Could not migrate to CPU %d\n", cpu); 2350 return -1; 2351 } 2352 2353 if (get_msr(cpu, MSR_IA32_ENERGY_PERF_BIAS, &msr)) 2354 return 0; 2355 2356 switch (msr & 0xF) { 2357 case ENERGY_PERF_BIAS_PERFORMANCE: 2358 epb_string = "performance"; 2359 break; 2360 case ENERGY_PERF_BIAS_NORMAL: 2361 epb_string = "balanced"; 2362 break; 2363 case ENERGY_PERF_BIAS_POWERSAVE: 2364 epb_string = "powersave"; 2365 break; 2366 default: 2367 epb_string = "custom"; 2368 break; 2369 } 2370 fprintf(outf, "cpu%d: MSR_IA32_ENERGY_PERF_BIAS: 0x%08llx (%s)\n", cpu, msr, epb_string); 2371 2372 return 0; 2373 } 2374 /* 2375 * print_hwp() 2376 * Decode the MSR_HWP_CAPABILITIES 2377 */ 2378 int print_hwp(struct thread_data *t, struct core_data *c, struct pkg_data *p) 2379 { 2380 unsigned long long msr; 2381 int cpu; 2382 2383 if (!has_hwp) 2384 return 0; 2385 2386 cpu = t->cpu_id; 2387 2388 /* MSR_HWP_CAPABILITIES is per-package */ 2389 if (!(t->flags & CPU_IS_FIRST_THREAD_IN_CORE) || !(t->flags & CPU_IS_FIRST_CORE_IN_PACKAGE)) 2390 return 0; 2391 2392 if (cpu_migrate(cpu)) { 2393 fprintf(outf, "Could not migrate to CPU %d\n", cpu); 2394 return -1; 2395 } 2396 2397 if (get_msr(cpu, MSR_PM_ENABLE, &msr)) 2398 return 0; 2399 2400 fprintf(outf, "cpu%d: MSR_PM_ENABLE: 0x%08llx (%sHWP)\n", 2401 cpu, msr, (msr & (1 << 0)) ? "" : "No-"); 2402 2403 /* MSR_PM_ENABLE[1] == 1 if HWP is enabled and MSRs visible */ 2404 if ((msr & (1 << 0)) == 0) 2405 return 0; 2406 2407 if (get_msr(cpu, MSR_HWP_CAPABILITIES, &msr)) 2408 return 0; 2409 2410 fprintf(outf, "cpu%d: MSR_HWP_CAPABILITIES: 0x%08llx " 2411 "(high 0x%x guar 0x%x eff 0x%x low 0x%x)\n", 2412 cpu, msr, 2413 (unsigned int)HWP_HIGHEST_PERF(msr), 2414 (unsigned int)HWP_GUARANTEED_PERF(msr), 2415 (unsigned int)HWP_MOSTEFFICIENT_PERF(msr), 2416 (unsigned int)HWP_LOWEST_PERF(msr)); 2417 2418 if (get_msr(cpu, MSR_HWP_REQUEST, &msr)) 2419 return 0; 2420 2421 fprintf(outf, "cpu%d: MSR_HWP_REQUEST: 0x%08llx " 2422 "(min 0x%x max 0x%x des 0x%x epp 0x%x window 0x%x pkg 0x%x)\n", 2423 cpu, msr, 2424 (unsigned int)(((msr) >> 0) & 0xff), 2425 (unsigned int)(((msr) >> 8) & 0xff), 2426 (unsigned int)(((msr) >> 16) & 0xff), 2427 (unsigned int)(((msr) >> 24) & 0xff), 2428 (unsigned int)(((msr) >> 32) & 0xff3), 2429 (unsigned int)(((msr) >> 42) & 0x1)); 2430 2431 if (has_hwp_pkg) { 2432 if (get_msr(cpu, MSR_HWP_REQUEST_PKG, &msr)) 2433 return 0; 2434 2435 fprintf(outf, "cpu%d: MSR_HWP_REQUEST_PKG: 0x%08llx " 2436 "(min 0x%x max 0x%x des 0x%x epp 0x%x window 0x%x)\n", 2437 cpu, msr, 2438 (unsigned int)(((msr) >> 0) & 0xff), 2439 (unsigned int)(((msr) >> 8) & 0xff), 2440 (unsigned int)(((msr) >> 16) & 0xff), 2441 (unsigned int)(((msr) >> 24) & 0xff), 2442 (unsigned int)(((msr) >> 32) & 0xff3)); 2443 } 2444 if (has_hwp_notify) { 2445 if (get_msr(cpu, MSR_HWP_INTERRUPT, &msr)) 2446 return 0; 2447 2448 fprintf(outf, "cpu%d: MSR_HWP_INTERRUPT: 0x%08llx " 2449 "(%s_Guaranteed_Perf_Change, %s_Excursion_Min)\n", 2450 cpu, msr, 2451 ((msr) & 0x1) ? "EN" : "Dis", 2452 ((msr) & 0x2) ? "EN" : "Dis"); 2453 } 2454 if (get_msr(cpu, MSR_HWP_STATUS, &msr)) 2455 return 0; 2456 2457 fprintf(outf, "cpu%d: MSR_HWP_STATUS: 0x%08llx " 2458 "(%sGuaranteed_Perf_Change, %sExcursion_Min)\n", 2459 cpu, msr, 2460 ((msr) & 0x1) ? "" : "No-", 2461 ((msr) & 0x2) ? "" : "No-"); 2462 2463 return 0; 2464 } 2465 2466 /* 2467 * print_perf_limit() 2468 */ 2469 int print_perf_limit(struct thread_data *t, struct core_data *c, struct pkg_data *p) 2470 { 2471 unsigned long long msr; 2472 int cpu; 2473 2474 cpu = t->cpu_id; 2475 2476 /* per-package */ 2477 if (!(t->flags & CPU_IS_FIRST_THREAD_IN_CORE) || !(t->flags & CPU_IS_FIRST_CORE_IN_PACKAGE)) 2478 return 0; 2479 2480 if (cpu_migrate(cpu)) { 2481 fprintf(outf, "Could not migrate to CPU %d\n", cpu); 2482 return -1; 2483 } 2484 2485 if (do_core_perf_limit_reasons) { 2486 get_msr(cpu, MSR_CORE_PERF_LIMIT_REASONS, &msr); 2487 fprintf(outf, "cpu%d: MSR_CORE_PERF_LIMIT_REASONS, 0x%08llx", cpu, msr); 2488 fprintf(outf, " (Active: %s%s%s%s%s%s%s%s%s%s%s%s%s%s)", 2489 (msr & 1 << 15) ? "bit15, " : "", 2490 (msr & 1 << 14) ? "bit14, " : "", 2491 (msr & 1 << 13) ? "Transitions, " : "", 2492 (msr & 1 << 12) ? "MultiCoreTurbo, " : "", 2493 (msr & 1 << 11) ? "PkgPwrL2, " : "", 2494 (msr & 1 << 10) ? "PkgPwrL1, " : "", 2495 (msr & 1 << 9) ? "CorePwr, " : "", 2496 (msr & 1 << 8) ? "Amps, " : "", 2497 (msr & 1 << 6) ? "VR-Therm, " : "", 2498 (msr & 1 << 5) ? "Auto-HWP, " : "", 2499 (msr & 1 << 4) ? "Graphics, " : "", 2500 (msr & 1 << 2) ? "bit2, " : "", 2501 (msr & 1 << 1) ? "ThermStatus, " : "", 2502 (msr & 1 << 0) ? "PROCHOT, " : ""); 2503 fprintf(outf, " (Logged: %s%s%s%s%s%s%s%s%s%s%s%s%s%s)\n", 2504 (msr & 1 << 31) ? "bit31, " : "", 2505 (msr & 1 << 30) ? "bit30, " : "", 2506 (msr & 1 << 29) ? "Transitions, " : "", 2507 (msr & 1 << 28) ? "MultiCoreTurbo, " : "", 2508 (msr & 1 << 27) ? "PkgPwrL2, " : "", 2509 (msr & 1 << 26) ? "PkgPwrL1, " : "", 2510 (msr & 1 << 25) ? "CorePwr, " : "", 2511 (msr & 1 << 24) ? "Amps, " : "", 2512 (msr & 1 << 22) ? "VR-Therm, " : "", 2513 (msr & 1 << 21) ? "Auto-HWP, " : "", 2514 (msr & 1 << 20) ? "Graphics, " : "", 2515 (msr & 1 << 18) ? "bit18, " : "", 2516 (msr & 1 << 17) ? "ThermStatus, " : "", 2517 (msr & 1 << 16) ? "PROCHOT, " : ""); 2518 2519 } 2520 if (do_gfx_perf_limit_reasons) { 2521 get_msr(cpu, MSR_GFX_PERF_LIMIT_REASONS, &msr); 2522 fprintf(outf, "cpu%d: MSR_GFX_PERF_LIMIT_REASONS, 0x%08llx", cpu, msr); 2523 fprintf(outf, " (Active: %s%s%s%s%s%s%s%s)", 2524 (msr & 1 << 0) ? "PROCHOT, " : "", 2525 (msr & 1 << 1) ? "ThermStatus, " : "", 2526 (msr & 1 << 4) ? "Graphics, " : "", 2527 (msr & 1 << 6) ? "VR-Therm, " : "", 2528 (msr & 1 << 8) ? "Amps, " : "", 2529 (msr & 1 << 9) ? "GFXPwr, " : "", 2530 (msr & 1 << 10) ? "PkgPwrL1, " : "", 2531 (msr & 1 << 11) ? "PkgPwrL2, " : ""); 2532 fprintf(outf, " (Logged: %s%s%s%s%s%s%s%s)\n", 2533 (msr & 1 << 16) ? "PROCHOT, " : "", 2534 (msr & 1 << 17) ? "ThermStatus, " : "", 2535 (msr & 1 << 20) ? "Graphics, " : "", 2536 (msr & 1 << 22) ? "VR-Therm, " : "", 2537 (msr & 1 << 24) ? "Amps, " : "", 2538 (msr & 1 << 25) ? "GFXPwr, " : "", 2539 (msr & 1 << 26) ? "PkgPwrL1, " : "", 2540 (msr & 1 << 27) ? "PkgPwrL2, " : ""); 2541 } 2542 if (do_ring_perf_limit_reasons) { 2543 get_msr(cpu, MSR_RING_PERF_LIMIT_REASONS, &msr); 2544 fprintf(outf, "cpu%d: MSR_RING_PERF_LIMIT_REASONS, 0x%08llx", cpu, msr); 2545 fprintf(outf, " (Active: %s%s%s%s%s%s)", 2546 (msr & 1 << 0) ? "PROCHOT, " : "", 2547 (msr & 1 << 1) ? "ThermStatus, " : "", 2548 (msr & 1 << 6) ? "VR-Therm, " : "", 2549 (msr & 1 << 8) ? "Amps, " : "", 2550 (msr & 1 << 10) ? "PkgPwrL1, " : "", 2551 (msr & 1 << 11) ? "PkgPwrL2, " : ""); 2552 fprintf(outf, " (Logged: %s%s%s%s%s%s)\n", 2553 (msr & 1 << 16) ? "PROCHOT, " : "", 2554 (msr & 1 << 17) ? "ThermStatus, " : "", 2555 (msr & 1 << 22) ? "VR-Therm, " : "", 2556 (msr & 1 << 24) ? "Amps, " : "", 2557 (msr & 1 << 26) ? "PkgPwrL1, " : "", 2558 (msr & 1 << 27) ? "PkgPwrL2, " : ""); 2559 } 2560 return 0; 2561 } 2562 2563 #define RAPL_POWER_GRANULARITY 0x7FFF /* 15 bit power granularity */ 2564 #define RAPL_TIME_GRANULARITY 0x3F /* 6 bit time granularity */ 2565 2566 double get_tdp(int model) 2567 { 2568 unsigned long long msr; 2569 2570 if (do_rapl & RAPL_PKG_POWER_INFO) 2571 if (!get_msr(base_cpu, MSR_PKG_POWER_INFO, &msr)) 2572 return ((msr >> 0) & RAPL_POWER_GRANULARITY) * rapl_power_units; 2573 2574 switch (model) { 2575 case 0x37: 2576 case 0x4D: 2577 return 30.0; 2578 default: 2579 return 135.0; 2580 } 2581 } 2582 2583 /* 2584 * rapl_dram_energy_units_probe() 2585 * Energy units are either hard-coded, or come from RAPL Energy Unit MSR. 2586 */ 2587 static double 2588 rapl_dram_energy_units_probe(int model, double rapl_energy_units) 2589 { 2590 /* only called for genuine_intel, family 6 */ 2591 2592 switch (model) { 2593 case 0x3F: /* HSX */ 2594 case 0x4F: /* BDX */ 2595 case 0x56: /* BDX-DE */ 2596 case 0x57: /* KNL */ 2597 return (rapl_dram_energy_units = 15.3 / 1000000); 2598 default: 2599 return (rapl_energy_units); 2600 } 2601 } 2602 2603 2604 /* 2605 * rapl_probe() 2606 * 2607 * sets do_rapl, rapl_power_units, rapl_energy_units, rapl_time_units 2608 */ 2609 void rapl_probe(unsigned int family, unsigned int model) 2610 { 2611 unsigned long long msr; 2612 unsigned int time_unit; 2613 double tdp; 2614 2615 if (!genuine_intel) 2616 return; 2617 2618 if (family != 6) 2619 return; 2620 2621 switch (model) { 2622 case 0x2A: 2623 case 0x3A: 2624 case 0x3C: /* HSW */ 2625 case 0x45: /* HSW */ 2626 case 0x46: /* HSW */ 2627 case 0x3D: /* BDW */ 2628 case 0x47: /* BDW */ 2629 do_rapl = RAPL_PKG | RAPL_CORES | RAPL_CORE_POLICY | RAPL_GFX | RAPL_PKG_POWER_INFO; 2630 break; 2631 case 0x4E: /* SKL */ 2632 case 0x5E: /* SKL */ 2633 do_rapl = RAPL_PKG | RAPL_DRAM | RAPL_DRAM_PERF_STATUS | RAPL_PKG_PERF_STATUS | RAPL_PKG_POWER_INFO; 2634 break; 2635 case 0x3F: /* HSX */ 2636 case 0x4F: /* BDX */ 2637 case 0x56: /* BDX-DE */ 2638 case 0x57: /* KNL */ 2639 do_rapl = RAPL_PKG | RAPL_DRAM | RAPL_DRAM_POWER_INFO | RAPL_DRAM_PERF_STATUS | RAPL_PKG_PERF_STATUS | RAPL_PKG_POWER_INFO; 2640 break; 2641 case 0x2D: 2642 case 0x3E: 2643 do_rapl = RAPL_PKG | RAPL_CORES | RAPL_CORE_POLICY | RAPL_DRAM | RAPL_DRAM_POWER_INFO | RAPL_PKG_PERF_STATUS | RAPL_DRAM_PERF_STATUS | RAPL_PKG_POWER_INFO; 2644 break; 2645 case 0x37: /* BYT */ 2646 case 0x4D: /* AVN */ 2647 do_rapl = RAPL_PKG | RAPL_CORES ; 2648 break; 2649 default: 2650 return; 2651 } 2652 2653 /* units on package 0, verify later other packages match */ 2654 if (get_msr(base_cpu, MSR_RAPL_POWER_UNIT, &msr)) 2655 return; 2656 2657 rapl_power_units = 1.0 / (1 << (msr & 0xF)); 2658 if (model == 0x37) 2659 rapl_energy_units = 1.0 * (1 << (msr >> 8 & 0x1F)) / 1000000; 2660 else 2661 rapl_energy_units = 1.0 / (1 << (msr >> 8 & 0x1F)); 2662 2663 rapl_dram_energy_units = rapl_dram_energy_units_probe(model, rapl_energy_units); 2664 2665 time_unit = msr >> 16 & 0xF; 2666 if (time_unit == 0) 2667 time_unit = 0xA; 2668 2669 rapl_time_units = 1.0 / (1 << (time_unit)); 2670 2671 tdp = get_tdp(model); 2672 2673 rapl_joule_counter_range = 0xFFFFFFFF * rapl_energy_units / tdp; 2674 if (debug) 2675 fprintf(outf, "RAPL: %.0f sec. Joule Counter Range, at %.0f Watts\n", rapl_joule_counter_range, tdp); 2676 2677 return; 2678 } 2679 2680 void perf_limit_reasons_probe(int family, int model) 2681 { 2682 if (!genuine_intel) 2683 return; 2684 2685 if (family != 6) 2686 return; 2687 2688 switch (model) { 2689 case 0x3C: /* HSW */ 2690 case 0x45: /* HSW */ 2691 case 0x46: /* HSW */ 2692 do_gfx_perf_limit_reasons = 1; 2693 case 0x3F: /* HSX */ 2694 do_core_perf_limit_reasons = 1; 2695 do_ring_perf_limit_reasons = 1; 2696 default: 2697 return; 2698 } 2699 } 2700 2701 int print_thermal(struct thread_data *t, struct core_data *c, struct pkg_data *p) 2702 { 2703 unsigned long long msr; 2704 unsigned int dts; 2705 int cpu; 2706 2707 if (!(do_dts || do_ptm)) 2708 return 0; 2709 2710 cpu = t->cpu_id; 2711 2712 /* DTS is per-core, no need to print for each thread */ 2713 if (!(t->flags & CPU_IS_FIRST_THREAD_IN_CORE)) 2714 return 0; 2715 2716 if (cpu_migrate(cpu)) { 2717 fprintf(outf, "Could not migrate to CPU %d\n", cpu); 2718 return -1; 2719 } 2720 2721 if (do_ptm && (t->flags & CPU_IS_FIRST_CORE_IN_PACKAGE)) { 2722 if (get_msr(cpu, MSR_IA32_PACKAGE_THERM_STATUS, &msr)) 2723 return 0; 2724 2725 dts = (msr >> 16) & 0x7F; 2726 fprintf(outf, "cpu%d: MSR_IA32_PACKAGE_THERM_STATUS: 0x%08llx (%d C)\n", 2727 cpu, msr, tcc_activation_temp - dts); 2728 2729 #ifdef THERM_DEBUG 2730 if (get_msr(cpu, MSR_IA32_PACKAGE_THERM_INTERRUPT, &msr)) 2731 return 0; 2732 2733 dts = (msr >> 16) & 0x7F; 2734 dts2 = (msr >> 8) & 0x7F; 2735 fprintf(outf, "cpu%d: MSR_IA32_PACKAGE_THERM_INTERRUPT: 0x%08llx (%d C, %d C)\n", 2736 cpu, msr, tcc_activation_temp - dts, tcc_activation_temp - dts2); 2737 #endif 2738 } 2739 2740 2741 if (do_dts) { 2742 unsigned int resolution; 2743 2744 if (get_msr(cpu, MSR_IA32_THERM_STATUS, &msr)) 2745 return 0; 2746 2747 dts = (msr >> 16) & 0x7F; 2748 resolution = (msr >> 27) & 0xF; 2749 fprintf(outf, "cpu%d: MSR_IA32_THERM_STATUS: 0x%08llx (%d C +/- %d)\n", 2750 cpu, msr, tcc_activation_temp - dts, resolution); 2751 2752 #ifdef THERM_DEBUG 2753 if (get_msr(cpu, MSR_IA32_THERM_INTERRUPT, &msr)) 2754 return 0; 2755 2756 dts = (msr >> 16) & 0x7F; 2757 dts2 = (msr >> 8) & 0x7F; 2758 fprintf(outf, "cpu%d: MSR_IA32_THERM_INTERRUPT: 0x%08llx (%d C, %d C)\n", 2759 cpu, msr, tcc_activation_temp - dts, tcc_activation_temp - dts2); 2760 #endif 2761 } 2762 2763 return 0; 2764 } 2765 2766 void print_power_limit_msr(int cpu, unsigned long long msr, char *label) 2767 { 2768 fprintf(outf, "cpu%d: %s: %sabled (%f Watts, %f sec, clamp %sabled)\n", 2769 cpu, label, 2770 ((msr >> 15) & 1) ? "EN" : "DIS", 2771 ((msr >> 0) & 0x7FFF) * rapl_power_units, 2772 (1.0 + (((msr >> 22) & 0x3)/4.0)) * (1 << ((msr >> 17) & 0x1F)) * rapl_time_units, 2773 (((msr >> 16) & 1) ? "EN" : "DIS")); 2774 2775 return; 2776 } 2777 2778 int print_rapl(struct thread_data *t, struct core_data *c, struct pkg_data *p) 2779 { 2780 unsigned long long msr; 2781 int cpu; 2782 2783 if (!do_rapl) 2784 return 0; 2785 2786 /* RAPL counters are per package, so print only for 1st thread/package */ 2787 if (!(t->flags & CPU_IS_FIRST_THREAD_IN_CORE) || !(t->flags & CPU_IS_FIRST_CORE_IN_PACKAGE)) 2788 return 0; 2789 2790 cpu = t->cpu_id; 2791 if (cpu_migrate(cpu)) { 2792 fprintf(outf, "Could not migrate to CPU %d\n", cpu); 2793 return -1; 2794 } 2795 2796 if (get_msr(cpu, MSR_RAPL_POWER_UNIT, &msr)) 2797 return -1; 2798 2799 if (debug) { 2800 fprintf(outf, "cpu%d: MSR_RAPL_POWER_UNIT: 0x%08llx " 2801 "(%f Watts, %f Joules, %f sec.)\n", cpu, msr, 2802 rapl_power_units, rapl_energy_units, rapl_time_units); 2803 } 2804 if (do_rapl & RAPL_PKG_POWER_INFO) { 2805 2806 if (get_msr(cpu, MSR_PKG_POWER_INFO, &msr)) 2807 return -5; 2808 2809 2810 fprintf(outf, "cpu%d: MSR_PKG_POWER_INFO: 0x%08llx (%.0f W TDP, RAPL %.0f - %.0f W, %f sec.)\n", 2811 cpu, msr, 2812 ((msr >> 0) & RAPL_POWER_GRANULARITY) * rapl_power_units, 2813 ((msr >> 16) & RAPL_POWER_GRANULARITY) * rapl_power_units, 2814 ((msr >> 32) & RAPL_POWER_GRANULARITY) * rapl_power_units, 2815 ((msr >> 48) & RAPL_TIME_GRANULARITY) * rapl_time_units); 2816 2817 } 2818 if (do_rapl & RAPL_PKG) { 2819 2820 if (get_msr(cpu, MSR_PKG_POWER_LIMIT, &msr)) 2821 return -9; 2822 2823 fprintf(outf, "cpu%d: MSR_PKG_POWER_LIMIT: 0x%08llx (%slocked)\n", 2824 cpu, msr, (msr >> 63) & 1 ? "": "UN"); 2825 2826 print_power_limit_msr(cpu, msr, "PKG Limit #1"); 2827 fprintf(outf, "cpu%d: PKG Limit #2: %sabled (%f Watts, %f* sec, clamp %sabled)\n", 2828 cpu, 2829 ((msr >> 47) & 1) ? "EN" : "DIS", 2830 ((msr >> 32) & 0x7FFF) * rapl_power_units, 2831 (1.0 + (((msr >> 54) & 0x3)/4.0)) * (1 << ((msr >> 49) & 0x1F)) * rapl_time_units, 2832 ((msr >> 48) & 1) ? "EN" : "DIS"); 2833 } 2834 2835 if (do_rapl & RAPL_DRAM_POWER_INFO) { 2836 if (get_msr(cpu, MSR_DRAM_POWER_INFO, &msr)) 2837 return -6; 2838 2839 fprintf(outf, "cpu%d: MSR_DRAM_POWER_INFO,: 0x%08llx (%.0f W TDP, RAPL %.0f - %.0f W, %f sec.)\n", 2840 cpu, msr, 2841 ((msr >> 0) & RAPL_POWER_GRANULARITY) * rapl_power_units, 2842 ((msr >> 16) & RAPL_POWER_GRANULARITY) * rapl_power_units, 2843 ((msr >> 32) & RAPL_POWER_GRANULARITY) * rapl_power_units, 2844 ((msr >> 48) & RAPL_TIME_GRANULARITY) * rapl_time_units); 2845 } 2846 if (do_rapl & RAPL_DRAM) { 2847 if (get_msr(cpu, MSR_DRAM_POWER_LIMIT, &msr)) 2848 return -9; 2849 fprintf(outf, "cpu%d: MSR_DRAM_POWER_LIMIT: 0x%08llx (%slocked)\n", 2850 cpu, msr, (msr >> 31) & 1 ? "": "UN"); 2851 2852 print_power_limit_msr(cpu, msr, "DRAM Limit"); 2853 } 2854 if (do_rapl & RAPL_CORE_POLICY) { 2855 if (debug) { 2856 if (get_msr(cpu, MSR_PP0_POLICY, &msr)) 2857 return -7; 2858 2859 fprintf(outf, "cpu%d: MSR_PP0_POLICY: %lld\n", cpu, msr & 0xF); 2860 } 2861 } 2862 if (do_rapl & RAPL_CORES) { 2863 if (debug) { 2864 2865 if (get_msr(cpu, MSR_PP0_POWER_LIMIT, &msr)) 2866 return -9; 2867 fprintf(outf, "cpu%d: MSR_PP0_POWER_LIMIT: 0x%08llx (%slocked)\n", 2868 cpu, msr, (msr >> 31) & 1 ? "": "UN"); 2869 print_power_limit_msr(cpu, msr, "Cores Limit"); 2870 } 2871 } 2872 if (do_rapl & RAPL_GFX) { 2873 if (debug) { 2874 if (get_msr(cpu, MSR_PP1_POLICY, &msr)) 2875 return -8; 2876 2877 fprintf(outf, "cpu%d: MSR_PP1_POLICY: %lld\n", cpu, msr & 0xF); 2878 2879 if (get_msr(cpu, MSR_PP1_POWER_LIMIT, &msr)) 2880 return -9; 2881 fprintf(outf, "cpu%d: MSR_PP1_POWER_LIMIT: 0x%08llx (%slocked)\n", 2882 cpu, msr, (msr >> 31) & 1 ? "": "UN"); 2883 print_power_limit_msr(cpu, msr, "GFX Limit"); 2884 } 2885 } 2886 return 0; 2887 } 2888 2889 /* 2890 * SNB adds support for additional MSRs: 2891 * 2892 * MSR_PKG_C7_RESIDENCY 0x000003fa 2893 * MSR_CORE_C7_RESIDENCY 0x000003fe 2894 * MSR_PKG_C2_RESIDENCY 0x0000060d 2895 */ 2896 2897 int has_snb_msrs(unsigned int family, unsigned int model) 2898 { 2899 if (!genuine_intel) 2900 return 0; 2901 2902 switch (model) { 2903 case 0x2A: 2904 case 0x2D: 2905 case 0x3A: /* IVB */ 2906 case 0x3E: /* IVB Xeon */ 2907 case 0x3C: /* HSW */ 2908 case 0x3F: /* HSW */ 2909 case 0x45: /* HSW */ 2910 case 0x46: /* HSW */ 2911 case 0x3D: /* BDW */ 2912 case 0x47: /* BDW */ 2913 case 0x4F: /* BDX */ 2914 case 0x56: /* BDX-DE */ 2915 case 0x4E: /* SKL */ 2916 case 0x5E: /* SKL */ 2917 return 1; 2918 } 2919 return 0; 2920 } 2921 2922 /* 2923 * HSW adds support for additional MSRs: 2924 * 2925 * MSR_PKG_C8_RESIDENCY 0x00000630 2926 * MSR_PKG_C9_RESIDENCY 0x00000631 2927 * MSR_PKG_C10_RESIDENCY 0x00000632 2928 * 2929 * MSR_PKGC8_IRTL 0x00000633 2930 * MSR_PKGC9_IRTL 0x00000634 2931 * MSR_PKGC10_IRTL 0x00000635 2932 * 2933 */ 2934 int has_hsw_msrs(unsigned int family, unsigned int model) 2935 { 2936 if (!genuine_intel) 2937 return 0; 2938 2939 switch (model) { 2940 case 0x45: /* HSW */ 2941 case 0x3D: /* BDW */ 2942 case 0x4E: /* SKL */ 2943 case 0x5E: /* SKL */ 2944 return 1; 2945 } 2946 return 0; 2947 } 2948 2949 /* 2950 * SKL adds support for additional MSRS: 2951 * 2952 * MSR_PKG_WEIGHTED_CORE_C0_RES 0x00000658 2953 * MSR_PKG_ANY_CORE_C0_RES 0x00000659 2954 * MSR_PKG_ANY_GFXE_C0_RES 0x0000065A 2955 * MSR_PKG_BOTH_CORE_GFXE_C0_RES 0x0000065B 2956 */ 2957 int has_skl_msrs(unsigned int family, unsigned int model) 2958 { 2959 if (!genuine_intel) 2960 return 0; 2961 2962 switch (model) { 2963 case 0x4E: /* SKL */ 2964 case 0x5E: /* SKL */ 2965 return 1; 2966 } 2967 return 0; 2968 } 2969 2970 2971 2972 int is_slm(unsigned int family, unsigned int model) 2973 { 2974 if (!genuine_intel) 2975 return 0; 2976 switch (model) { 2977 case 0x37: /* BYT */ 2978 case 0x4D: /* AVN */ 2979 return 1; 2980 } 2981 return 0; 2982 } 2983 2984 int is_knl(unsigned int family, unsigned int model) 2985 { 2986 if (!genuine_intel) 2987 return 0; 2988 switch (model) { 2989 case 0x57: /* KNL */ 2990 return 1; 2991 } 2992 return 0; 2993 } 2994 2995 unsigned int get_aperf_mperf_multiplier(unsigned int family, unsigned int model) 2996 { 2997 if (is_knl(family, model)) 2998 return 1024; 2999 return 1; 3000 } 3001 3002 #define SLM_BCLK_FREQS 5 3003 double slm_freq_table[SLM_BCLK_FREQS] = { 83.3, 100.0, 133.3, 116.7, 80.0}; 3004 3005 double slm_bclk(void) 3006 { 3007 unsigned long long msr = 3; 3008 unsigned int i; 3009 double freq; 3010 3011 if (get_msr(base_cpu, MSR_FSB_FREQ, &msr)) 3012 fprintf(outf, "SLM BCLK: unknown\n"); 3013 3014 i = msr & 0xf; 3015 if (i >= SLM_BCLK_FREQS) { 3016 fprintf(outf, "SLM BCLK[%d] invalid\n", i); 3017 msr = 3; 3018 } 3019 freq = slm_freq_table[i]; 3020 3021 fprintf(outf, "SLM BCLK: %.1f Mhz\n", freq); 3022 3023 return freq; 3024 } 3025 3026 double discover_bclk(unsigned int family, unsigned int model) 3027 { 3028 if (has_snb_msrs(family, model) || is_knl(family, model)) 3029 return 100.00; 3030 else if (is_slm(family, model)) 3031 return slm_bclk(); 3032 else 3033 return 133.33; 3034 } 3035 3036 /* 3037 * MSR_IA32_TEMPERATURE_TARGET indicates the temperature where 3038 * the Thermal Control Circuit (TCC) activates. 3039 * This is usually equal to tjMax. 3040 * 3041 * Older processors do not have this MSR, so there we guess, 3042 * but also allow cmdline over-ride with -T. 3043 * 3044 * Several MSR temperature values are in units of degrees-C 3045 * below this value, including the Digital Thermal Sensor (DTS), 3046 * Package Thermal Management Sensor (PTM), and thermal event thresholds. 3047 */ 3048 int set_temperature_target(struct thread_data *t, struct core_data *c, struct pkg_data *p) 3049 { 3050 unsigned long long msr; 3051 unsigned int target_c_local; 3052 int cpu; 3053 3054 /* tcc_activation_temp is used only for dts or ptm */ 3055 if (!(do_dts || do_ptm)) 3056 return 0; 3057 3058 /* this is a per-package concept */ 3059 if (!(t->flags & CPU_IS_FIRST_THREAD_IN_CORE) || !(t->flags & CPU_IS_FIRST_CORE_IN_PACKAGE)) 3060 return 0; 3061 3062 cpu = t->cpu_id; 3063 if (cpu_migrate(cpu)) { 3064 fprintf(outf, "Could not migrate to CPU %d\n", cpu); 3065 return -1; 3066 } 3067 3068 if (tcc_activation_temp_override != 0) { 3069 tcc_activation_temp = tcc_activation_temp_override; 3070 fprintf(outf, "cpu%d: Using cmdline TCC Target (%d C)\n", 3071 cpu, tcc_activation_temp); 3072 return 0; 3073 } 3074 3075 /* Temperature Target MSR is Nehalem and newer only */ 3076 if (!do_nhm_platform_info) 3077 goto guess; 3078 3079 if (get_msr(base_cpu, MSR_IA32_TEMPERATURE_TARGET, &msr)) 3080 goto guess; 3081 3082 target_c_local = (msr >> 16) & 0xFF; 3083 3084 if (debug) 3085 fprintf(outf, "cpu%d: MSR_IA32_TEMPERATURE_TARGET: 0x%08llx (%d C)\n", 3086 cpu, msr, target_c_local); 3087 3088 if (!target_c_local) 3089 goto guess; 3090 3091 tcc_activation_temp = target_c_local; 3092 3093 return 0; 3094 3095 guess: 3096 tcc_activation_temp = TJMAX_DEFAULT; 3097 fprintf(outf, "cpu%d: Guessing tjMax %d C, Please use -T to specify\n", 3098 cpu, tcc_activation_temp); 3099 3100 return 0; 3101 } 3102 3103 void decode_feature_control_msr(void) 3104 { 3105 unsigned long long msr; 3106 3107 if (!get_msr(base_cpu, MSR_IA32_FEATURE_CONTROL, &msr)) 3108 fprintf(outf, "cpu%d: MSR_IA32_FEATURE_CONTROL: 0x%08llx (%sLocked %s)\n", 3109 base_cpu, msr, 3110 msr & FEATURE_CONTROL_LOCKED ? "" : "UN-", 3111 msr & (1 << 18) ? "SGX" : ""); 3112 } 3113 3114 void decode_misc_enable_msr(void) 3115 { 3116 unsigned long long msr; 3117 3118 if (!get_msr(base_cpu, MSR_IA32_MISC_ENABLE, &msr)) 3119 fprintf(outf, "cpu%d: MSR_IA32_MISC_ENABLE: 0x%08llx (%s %s %s)\n", 3120 base_cpu, msr, 3121 msr & (1 << 3) ? "TCC" : "", 3122 msr & (1 << 16) ? "EIST" : "", 3123 msr & (1 << 18) ? "MONITOR" : ""); 3124 } 3125 3126 /* 3127 * Decode MSR_MISC_PWR_MGMT 3128 * 3129 * Decode the bits according to the Nehalem documentation 3130 * bit[0] seems to continue to have same meaning going forward 3131 * bit[1] less so... 3132 */ 3133 void decode_misc_pwr_mgmt_msr(void) 3134 { 3135 unsigned long long msr; 3136 3137 if (!do_nhm_platform_info) 3138 return; 3139 3140 if (!get_msr(base_cpu, MSR_MISC_PWR_MGMT, &msr)) 3141 fprintf(outf, "cpu%d: MSR_MISC_PWR_MGMT: 0x%08llx (%sable-EIST_Coordination %sable-EPB)\n", 3142 base_cpu, msr, 3143 msr & (1 << 0) ? "DIS" : "EN", 3144 msr & (1 << 1) ? "EN" : "DIS"); 3145 } 3146 3147 void process_cpuid() 3148 { 3149 unsigned int eax, ebx, ecx, edx, max_level, max_extended_level; 3150 unsigned int fms, family, model, stepping; 3151 3152 eax = ebx = ecx = edx = 0; 3153 3154 __cpuid(0, max_level, ebx, ecx, edx); 3155 3156 if (ebx == 0x756e6547 && edx == 0x49656e69 && ecx == 0x6c65746e) 3157 genuine_intel = 1; 3158 3159 if (debug) 3160 fprintf(outf, "CPUID(0): %.4s%.4s%.4s ", 3161 (char *)&ebx, (char *)&edx, (char *)&ecx); 3162 3163 __cpuid(1, fms, ebx, ecx, edx); 3164 family = (fms >> 8) & 0xf; 3165 model = (fms >> 4) & 0xf; 3166 stepping = fms & 0xf; 3167 if (family == 6 || family == 0xf) 3168 model += ((fms >> 16) & 0xf) << 4; 3169 3170 if (debug) { 3171 fprintf(outf, "%d CPUID levels; family:model:stepping 0x%x:%x:%x (%d:%d:%d)\n", 3172 max_level, family, model, stepping, family, model, stepping); 3173 fprintf(outf, "CPUID(1): %s %s %s %s %s %s %s %s %s\n", 3174 ecx & (1 << 0) ? "SSE3" : "-", 3175 ecx & (1 << 3) ? "MONITOR" : "-", 3176 ecx & (1 << 6) ? "SMX" : "-", 3177 ecx & (1 << 7) ? "EIST" : "-", 3178 ecx & (1 << 8) ? "TM2" : "-", 3179 edx & (1 << 4) ? "TSC" : "-", 3180 edx & (1 << 5) ? "MSR" : "-", 3181 edx & (1 << 22) ? "ACPI-TM" : "-", 3182 edx & (1 << 29) ? "TM" : "-"); 3183 } 3184 3185 if (!(edx & (1 << 5))) 3186 errx(1, "CPUID: no MSR"); 3187 3188 /* 3189 * check max extended function levels of CPUID. 3190 * This is needed to check for invariant TSC. 3191 * This check is valid for both Intel and AMD. 3192 */ 3193 ebx = ecx = edx = 0; 3194 __cpuid(0x80000000, max_extended_level, ebx, ecx, edx); 3195 3196 if (max_extended_level >= 0x80000007) { 3197 3198 /* 3199 * Non-Stop TSC is advertised by CPUID.EAX=0x80000007: EDX.bit8 3200 * this check is valid for both Intel and AMD 3201 */ 3202 __cpuid(0x80000007, eax, ebx, ecx, edx); 3203 has_invariant_tsc = edx & (1 << 8); 3204 } 3205 3206 /* 3207 * APERF/MPERF is advertised by CPUID.EAX=0x6: ECX.bit0 3208 * this check is valid for both Intel and AMD 3209 */ 3210 3211 __cpuid(0x6, eax, ebx, ecx, edx); 3212 has_aperf = ecx & (1 << 0); 3213 do_dts = eax & (1 << 0); 3214 do_ptm = eax & (1 << 6); 3215 has_hwp = eax & (1 << 7); 3216 has_hwp_notify = eax & (1 << 8); 3217 has_hwp_activity_window = eax & (1 << 9); 3218 has_hwp_epp = eax & (1 << 10); 3219 has_hwp_pkg = eax & (1 << 11); 3220 has_epb = ecx & (1 << 3); 3221 3222 if (debug) 3223 fprintf(outf, "CPUID(6): %sAPERF, %sDTS, %sPTM, %sHWP, " 3224 "%sHWPnotify, %sHWPwindow, %sHWPepp, %sHWPpkg, %sEPB\n", 3225 has_aperf ? "" : "No-", 3226 do_dts ? "" : "No-", 3227 do_ptm ? "" : "No-", 3228 has_hwp ? "" : "No-", 3229 has_hwp_notify ? "" : "No-", 3230 has_hwp_activity_window ? "" : "No-", 3231 has_hwp_epp ? "" : "No-", 3232 has_hwp_pkg ? "" : "No-", 3233 has_epb ? "" : "No-"); 3234 3235 if (debug) 3236 decode_misc_enable_msr(); 3237 3238 if (max_level >= 0x7 && debug) { 3239 int has_sgx; 3240 3241 ecx = 0; 3242 3243 __cpuid_count(0x7, 0, eax, ebx, ecx, edx); 3244 3245 has_sgx = ebx & (1 << 2); 3246 fprintf(outf, "CPUID(7): %sSGX\n", has_sgx ? "" : "No-"); 3247 3248 if (has_sgx) 3249 decode_feature_control_msr(); 3250 } 3251 3252 if (max_level >= 0x15) { 3253 unsigned int eax_crystal; 3254 unsigned int ebx_tsc; 3255 3256 /* 3257 * CPUID 15H TSC/Crystal ratio, possibly Crystal Hz 3258 */ 3259 eax_crystal = ebx_tsc = crystal_hz = edx = 0; 3260 __cpuid(0x15, eax_crystal, ebx_tsc, crystal_hz, edx); 3261 3262 if (ebx_tsc != 0) { 3263 3264 if (debug && (ebx != 0)) 3265 fprintf(outf, "CPUID(0x15): eax_crystal: %d ebx_tsc: %d ecx_crystal_hz: %d\n", 3266 eax_crystal, ebx_tsc, crystal_hz); 3267 3268 if (crystal_hz == 0) 3269 switch(model) { 3270 case 0x4E: /* SKL */ 3271 case 0x5E: /* SKL */ 3272 crystal_hz = 24000000; /* 24 MHz */ 3273 break; 3274 default: 3275 crystal_hz = 0; 3276 } 3277 3278 if (crystal_hz) { 3279 tsc_hz = (unsigned long long) crystal_hz * ebx_tsc / eax_crystal; 3280 if (debug) 3281 fprintf(outf, "TSC: %lld MHz (%d Hz * %d / %d / 1000000)\n", 3282 tsc_hz / 1000000, crystal_hz, ebx_tsc, eax_crystal); 3283 } 3284 } 3285 } 3286 if (max_level >= 0x16) { 3287 unsigned int base_mhz, max_mhz, bus_mhz, edx; 3288 3289 /* 3290 * CPUID 16H Base MHz, Max MHz, Bus MHz 3291 */ 3292 base_mhz = max_mhz = bus_mhz = edx = 0; 3293 3294 __cpuid(0x16, base_mhz, max_mhz, bus_mhz, edx); 3295 if (debug) 3296 fprintf(outf, "CPUID(0x16): base_mhz: %d max_mhz: %d bus_mhz: %d\n", 3297 base_mhz, max_mhz, bus_mhz); 3298 } 3299 3300 if (has_aperf) 3301 aperf_mperf_multiplier = get_aperf_mperf_multiplier(family, model); 3302 3303 do_nhm_platform_info = do_nhm_cstates = do_smi = probe_nhm_msrs(family, model); 3304 do_snb_cstates = has_snb_msrs(family, model); 3305 do_irtl_snb = has_snb_msrs(family, model); 3306 do_pc2 = do_snb_cstates && (pkg_cstate_limit >= PCL__2); 3307 do_pc3 = (pkg_cstate_limit >= PCL__3); 3308 do_pc6 = (pkg_cstate_limit >= PCL__6); 3309 do_pc7 = do_snb_cstates && (pkg_cstate_limit >= PCL__7); 3310 do_c8_c9_c10 = has_hsw_msrs(family, model); 3311 do_irtl_hsw = has_hsw_msrs(family, model); 3312 do_skl_residency = has_skl_msrs(family, model); 3313 do_slm_cstates = is_slm(family, model); 3314 do_knl_cstates = is_knl(family, model); 3315 3316 if (debug) 3317 decode_misc_pwr_mgmt_msr(); 3318 3319 rapl_probe(family, model); 3320 perf_limit_reasons_probe(family, model); 3321 3322 if (debug) 3323 dump_cstate_pstate_config_info(family, model); 3324 3325 if (has_skl_msrs(family, model)) 3326 calculate_tsc_tweak(); 3327 3328 do_gfx_rc6_ms = !access("/sys/class/drm/card0/power/rc6_residency_ms", R_OK); 3329 3330 do_gfx_mhz = !access("/sys/class/graphics/fb0/device/drm/card0/gt_cur_freq_mhz", R_OK); 3331 3332 return; 3333 } 3334 3335 void help() 3336 { 3337 fprintf(outf, 3338 "Usage: turbostat [OPTIONS][(--interval seconds) | COMMAND ...]\n" 3339 "\n" 3340 "Turbostat forks the specified COMMAND and prints statistics\n" 3341 "when COMMAND completes.\n" 3342 "If no COMMAND is specified, turbostat wakes every 5-seconds\n" 3343 "to print statistics, until interrupted.\n" 3344 "--debug run in \"debug\" mode\n" 3345 "--interval sec Override default 5-second measurement interval\n" 3346 "--help print this help message\n" 3347 "--counter msr print 32-bit counter at address \"msr\"\n" 3348 "--Counter msr print 64-bit Counter at address \"msr\"\n" 3349 "--out file create or truncate \"file\" for all output\n" 3350 "--msr msr print 32-bit value at address \"msr\"\n" 3351 "--MSR msr print 64-bit Value at address \"msr\"\n" 3352 "--version print version information\n" 3353 "\n" 3354 "For more help, run \"man turbostat\"\n"); 3355 } 3356 3357 3358 /* 3359 * in /dev/cpu/ return success for names that are numbers 3360 * ie. filter out ".", "..", "microcode". 3361 */ 3362 int dir_filter(const struct dirent *dirp) 3363 { 3364 if (isdigit(dirp->d_name[0])) 3365 return 1; 3366 else 3367 return 0; 3368 } 3369 3370 int open_dev_cpu_msr(int dummy1) 3371 { 3372 return 0; 3373 } 3374 3375 void topology_probe() 3376 { 3377 int i; 3378 int max_core_id = 0; 3379 int max_package_id = 0; 3380 int max_siblings = 0; 3381 struct cpu_topology { 3382 int core_id; 3383 int physical_package_id; 3384 } *cpus; 3385 3386 /* Initialize num_cpus, max_cpu_num */ 3387 topo.num_cpus = 0; 3388 topo.max_cpu_num = 0; 3389 for_all_proc_cpus(count_cpus); 3390 if (!summary_only && topo.num_cpus > 1) 3391 show_cpu = 1; 3392 3393 if (debug > 1) 3394 fprintf(outf, "num_cpus %d max_cpu_num %d\n", topo.num_cpus, topo.max_cpu_num); 3395 3396 cpus = calloc(1, (topo.max_cpu_num + 1) * sizeof(struct cpu_topology)); 3397 if (cpus == NULL) 3398 err(1, "calloc cpus"); 3399 3400 /* 3401 * Allocate and initialize cpu_present_set 3402 */ 3403 cpu_present_set = CPU_ALLOC((topo.max_cpu_num + 1)); 3404 if (cpu_present_set == NULL) 3405 err(3, "CPU_ALLOC"); 3406 cpu_present_setsize = CPU_ALLOC_SIZE((topo.max_cpu_num + 1)); 3407 CPU_ZERO_S(cpu_present_setsize, cpu_present_set); 3408 for_all_proc_cpus(mark_cpu_present); 3409 3410 /* 3411 * Allocate and initialize cpu_affinity_set 3412 */ 3413 cpu_affinity_set = CPU_ALLOC((topo.max_cpu_num + 1)); 3414 if (cpu_affinity_set == NULL) 3415 err(3, "CPU_ALLOC"); 3416 cpu_affinity_setsize = CPU_ALLOC_SIZE((topo.max_cpu_num + 1)); 3417 CPU_ZERO_S(cpu_affinity_setsize, cpu_affinity_set); 3418 3419 3420 /* 3421 * For online cpus 3422 * find max_core_id, max_package_id 3423 */ 3424 for (i = 0; i <= topo.max_cpu_num; ++i) { 3425 int siblings; 3426 3427 if (cpu_is_not_present(i)) { 3428 if (debug > 1) 3429 fprintf(outf, "cpu%d NOT PRESENT\n", i); 3430 continue; 3431 } 3432 cpus[i].core_id = get_core_id(i); 3433 if (cpus[i].core_id > max_core_id) 3434 max_core_id = cpus[i].core_id; 3435 3436 cpus[i].physical_package_id = get_physical_package_id(i); 3437 if (cpus[i].physical_package_id > max_package_id) 3438 max_package_id = cpus[i].physical_package_id; 3439 3440 siblings = get_num_ht_siblings(i); 3441 if (siblings > max_siblings) 3442 max_siblings = siblings; 3443 if (debug > 1) 3444 fprintf(outf, "cpu %d pkg %d core %d\n", 3445 i, cpus[i].physical_package_id, cpus[i].core_id); 3446 } 3447 topo.num_cores_per_pkg = max_core_id + 1; 3448 if (debug > 1) 3449 fprintf(outf, "max_core_id %d, sizing for %d cores per package\n", 3450 max_core_id, topo.num_cores_per_pkg); 3451 if (debug && !summary_only && topo.num_cores_per_pkg > 1) 3452 show_core = 1; 3453 3454 topo.num_packages = max_package_id + 1; 3455 if (debug > 1) 3456 fprintf(outf, "max_package_id %d, sizing for %d packages\n", 3457 max_package_id, topo.num_packages); 3458 if (debug && !summary_only && topo.num_packages > 1) 3459 show_pkg = 1; 3460 3461 topo.num_threads_per_core = max_siblings; 3462 if (debug > 1) 3463 fprintf(outf, "max_siblings %d\n", max_siblings); 3464 3465 free(cpus); 3466 } 3467 3468 void 3469 allocate_counters(struct thread_data **t, struct core_data **c, struct pkg_data **p) 3470 { 3471 int i; 3472 3473 *t = calloc(topo.num_threads_per_core * topo.num_cores_per_pkg * 3474 topo.num_packages, sizeof(struct thread_data)); 3475 if (*t == NULL) 3476 goto error; 3477 3478 for (i = 0; i < topo.num_threads_per_core * 3479 topo.num_cores_per_pkg * topo.num_packages; i++) 3480 (*t)[i].cpu_id = -1; 3481 3482 *c = calloc(topo.num_cores_per_pkg * topo.num_packages, 3483 sizeof(struct core_data)); 3484 if (*c == NULL) 3485 goto error; 3486 3487 for (i = 0; i < topo.num_cores_per_pkg * topo.num_packages; i++) 3488 (*c)[i].core_id = -1; 3489 3490 *p = calloc(topo.num_packages, sizeof(struct pkg_data)); 3491 if (*p == NULL) 3492 goto error; 3493 3494 for (i = 0; i < topo.num_packages; i++) 3495 (*p)[i].package_id = i; 3496 3497 return; 3498 error: 3499 err(1, "calloc counters"); 3500 } 3501 /* 3502 * init_counter() 3503 * 3504 * set cpu_id, core_num, pkg_num 3505 * set FIRST_THREAD_IN_CORE and FIRST_CORE_IN_PACKAGE 3506 * 3507 * increment topo.num_cores when 1st core in pkg seen 3508 */ 3509 void init_counter(struct thread_data *thread_base, struct core_data *core_base, 3510 struct pkg_data *pkg_base, int thread_num, int core_num, 3511 int pkg_num, int cpu_id) 3512 { 3513 struct thread_data *t; 3514 struct core_data *c; 3515 struct pkg_data *p; 3516 3517 t = GET_THREAD(thread_base, thread_num, core_num, pkg_num); 3518 c = GET_CORE(core_base, core_num, pkg_num); 3519 p = GET_PKG(pkg_base, pkg_num); 3520 3521 t->cpu_id = cpu_id; 3522 if (thread_num == 0) { 3523 t->flags |= CPU_IS_FIRST_THREAD_IN_CORE; 3524 if (cpu_is_first_core_in_package(cpu_id)) 3525 t->flags |= CPU_IS_FIRST_CORE_IN_PACKAGE; 3526 } 3527 3528 c->core_id = core_num; 3529 p->package_id = pkg_num; 3530 } 3531 3532 3533 int initialize_counters(int cpu_id) 3534 { 3535 int my_thread_id, my_core_id, my_package_id; 3536 3537 my_package_id = get_physical_package_id(cpu_id); 3538 my_core_id = get_core_id(cpu_id); 3539 my_thread_id = get_cpu_position_in_core(cpu_id); 3540 if (!my_thread_id) 3541 topo.num_cores++; 3542 3543 init_counter(EVEN_COUNTERS, my_thread_id, my_core_id, my_package_id, cpu_id); 3544 init_counter(ODD_COUNTERS, my_thread_id, my_core_id, my_package_id, cpu_id); 3545 return 0; 3546 } 3547 3548 void allocate_output_buffer() 3549 { 3550 output_buffer = calloc(1, (1 + topo.num_cpus) * 1024); 3551 outp = output_buffer; 3552 if (outp == NULL) 3553 err(-1, "calloc output buffer"); 3554 } 3555 void allocate_fd_percpu(void) 3556 { 3557 fd_percpu = calloc(topo.max_cpu_num, sizeof(int)); 3558 if (fd_percpu == NULL) 3559 err(-1, "calloc fd_percpu"); 3560 } 3561 void allocate_irq_buffers(void) 3562 { 3563 irq_column_2_cpu = calloc(topo.num_cpus, sizeof(int)); 3564 if (irq_column_2_cpu == NULL) 3565 err(-1, "calloc %d", topo.num_cpus); 3566 3567 irqs_per_cpu = calloc(topo.max_cpu_num, sizeof(int)); 3568 if (irqs_per_cpu == NULL) 3569 err(-1, "calloc %d", topo.max_cpu_num); 3570 } 3571 void setup_all_buffers(void) 3572 { 3573 topology_probe(); 3574 allocate_irq_buffers(); 3575 allocate_fd_percpu(); 3576 allocate_counters(&thread_even, &core_even, &package_even); 3577 allocate_counters(&thread_odd, &core_odd, &package_odd); 3578 allocate_output_buffer(); 3579 for_all_proc_cpus(initialize_counters); 3580 } 3581 3582 void set_base_cpu(void) 3583 { 3584 base_cpu = sched_getcpu(); 3585 if (base_cpu < 0) 3586 err(-ENODEV, "No valid cpus found"); 3587 3588 if (debug > 1) 3589 fprintf(outf, "base_cpu = %d\n", base_cpu); 3590 } 3591 3592 void turbostat_init() 3593 { 3594 setup_all_buffers(); 3595 set_base_cpu(); 3596 check_dev_msr(); 3597 check_permissions(); 3598 process_cpuid(); 3599 3600 3601 if (debug) 3602 for_all_cpus(print_hwp, ODD_COUNTERS); 3603 3604 if (debug) 3605 for_all_cpus(print_epb, ODD_COUNTERS); 3606 3607 if (debug) 3608 for_all_cpus(print_perf_limit, ODD_COUNTERS); 3609 3610 if (debug) 3611 for_all_cpus(print_rapl, ODD_COUNTERS); 3612 3613 for_all_cpus(set_temperature_target, ODD_COUNTERS); 3614 3615 if (debug) 3616 for_all_cpus(print_thermal, ODD_COUNTERS); 3617 3618 if (debug && do_irtl_snb) 3619 print_irtl(); 3620 } 3621 3622 int fork_it(char **argv) 3623 { 3624 pid_t child_pid; 3625 int status; 3626 3627 status = for_all_cpus(get_counters, EVEN_COUNTERS); 3628 if (status) 3629 exit(status); 3630 /* clear affinity side-effect of get_counters() */ 3631 sched_setaffinity(0, cpu_present_setsize, cpu_present_set); 3632 gettimeofday(&tv_even, (struct timezone *)NULL); 3633 3634 child_pid = fork(); 3635 if (!child_pid) { 3636 /* child */ 3637 execvp(argv[0], argv); 3638 } else { 3639 3640 /* parent */ 3641 if (child_pid == -1) 3642 err(1, "fork"); 3643 3644 signal(SIGINT, SIG_IGN); 3645 signal(SIGQUIT, SIG_IGN); 3646 if (waitpid(child_pid, &status, 0) == -1) 3647 err(status, "waitpid"); 3648 } 3649 /* 3650 * n.b. fork_it() does not check for errors from for_all_cpus() 3651 * because re-starting is problematic when forking 3652 */ 3653 for_all_cpus(get_counters, ODD_COUNTERS); 3654 gettimeofday(&tv_odd, (struct timezone *)NULL); 3655 timersub(&tv_odd, &tv_even, &tv_delta); 3656 for_all_cpus_2(delta_cpu, ODD_COUNTERS, EVEN_COUNTERS); 3657 compute_average(EVEN_COUNTERS); 3658 format_all_counters(EVEN_COUNTERS); 3659 3660 fprintf(outf, "%.6f sec\n", tv_delta.tv_sec + tv_delta.tv_usec/1000000.0); 3661 3662 flush_output_stderr(); 3663 3664 return status; 3665 } 3666 3667 int get_and_dump_counters(void) 3668 { 3669 int status; 3670 3671 status = for_all_cpus(get_counters, ODD_COUNTERS); 3672 if (status) 3673 return status; 3674 3675 status = for_all_cpus(dump_counters, ODD_COUNTERS); 3676 if (status) 3677 return status; 3678 3679 flush_output_stdout(); 3680 3681 return status; 3682 } 3683 3684 void print_version() { 3685 fprintf(outf, "turbostat version 4.11 27 Feb 2016" 3686 " - Len Brown <lenb@kernel.org>\n"); 3687 } 3688 3689 void cmdline(int argc, char **argv) 3690 { 3691 int opt; 3692 int option_index = 0; 3693 static struct option long_options[] = { 3694 {"Counter", required_argument, 0, 'C'}, 3695 {"counter", required_argument, 0, 'c'}, 3696 {"Dump", no_argument, 0, 'D'}, 3697 {"debug", no_argument, 0, 'd'}, 3698 {"interval", required_argument, 0, 'i'}, 3699 {"help", no_argument, 0, 'h'}, 3700 {"Joules", no_argument, 0, 'J'}, 3701 {"MSR", required_argument, 0, 'M'}, 3702 {"msr", required_argument, 0, 'm'}, 3703 {"out", required_argument, 0, 'o'}, 3704 {"Package", no_argument, 0, 'p'}, 3705 {"processor", no_argument, 0, 'p'}, 3706 {"Summary", no_argument, 0, 'S'}, 3707 {"TCC", required_argument, 0, 'T'}, 3708 {"version", no_argument, 0, 'v' }, 3709 {0, 0, 0, 0 } 3710 }; 3711 3712 progname = argv[0]; 3713 3714 while ((opt = getopt_long_only(argc, argv, "+C:c:Ddhi:JM:m:o:PpST:v", 3715 long_options, &option_index)) != -1) { 3716 switch (opt) { 3717 case 'C': 3718 sscanf(optarg, "%x", &extra_delta_offset64); 3719 break; 3720 case 'c': 3721 sscanf(optarg, "%x", &extra_delta_offset32); 3722 break; 3723 case 'D': 3724 dump_only++; 3725 break; 3726 case 'd': 3727 debug++; 3728 break; 3729 case 'h': 3730 default: 3731 help(); 3732 exit(1); 3733 case 'i': 3734 { 3735 double interval = strtod(optarg, NULL); 3736 3737 if (interval < 0.001) { 3738 fprintf(outf, "interval %f seconds is too small\n", 3739 interval); 3740 exit(2); 3741 } 3742 3743 interval_ts.tv_sec = interval; 3744 interval_ts.tv_nsec = (interval - interval_ts.tv_sec) * 1000000000; 3745 } 3746 break; 3747 case 'J': 3748 rapl_joules++; 3749 break; 3750 case 'M': 3751 sscanf(optarg, "%x", &extra_msr_offset64); 3752 break; 3753 case 'm': 3754 sscanf(optarg, "%x", &extra_msr_offset32); 3755 break; 3756 case 'o': 3757 outf = fopen_or_die(optarg, "w"); 3758 break; 3759 case 'P': 3760 show_pkg_only++; 3761 break; 3762 case 'p': 3763 show_core_only++; 3764 break; 3765 case 'S': 3766 summary_only++; 3767 break; 3768 case 'T': 3769 tcc_activation_temp_override = atoi(optarg); 3770 break; 3771 case 'v': 3772 print_version(); 3773 exit(0); 3774 break; 3775 } 3776 } 3777 } 3778 3779 int main(int argc, char **argv) 3780 { 3781 outf = stderr; 3782 3783 cmdline(argc, argv); 3784 3785 if (debug) 3786 print_version(); 3787 3788 turbostat_init(); 3789 3790 /* dump counters and exit */ 3791 if (dump_only) 3792 return get_and_dump_counters(); 3793 3794 /* 3795 * if any params left, it must be a command to fork 3796 */ 3797 if (argc - optind) 3798 return fork_it(argv + optind); 3799 else 3800 turbostat_loop(); 3801 3802 return 0; 3803 } 3804