1 /* 2 * turbostat -- show CPU frequency and C-state residency 3 * on modern Intel turbo-capable processors. 4 * 5 * Copyright (c) 2013 Intel Corporation. 6 * Len Brown <len.brown@intel.com> 7 * 8 * This program is free software; you can redistribute it and/or modify it 9 * under the terms and conditions of the GNU General Public License, 10 * version 2, as published by the Free Software Foundation. 11 * 12 * This program is distributed in the hope it will be useful, but WITHOUT 13 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 14 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 15 * more details. 16 * 17 * You should have received a copy of the GNU General Public License along with 18 * this program; if not, write to the Free Software Foundation, Inc., 19 * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. 20 */ 21 22 #define _GNU_SOURCE 23 #include MSRHEADER 24 #include INTEL_FAMILY_HEADER 25 #include <stdarg.h> 26 #include <stdio.h> 27 #include <err.h> 28 #include <unistd.h> 29 #include <sys/types.h> 30 #include <sys/wait.h> 31 #include <sys/stat.h> 32 #include <sys/select.h> 33 #include <sys/resource.h> 34 #include <fcntl.h> 35 #include <signal.h> 36 #include <sys/time.h> 37 #include <stdlib.h> 38 #include <getopt.h> 39 #include <dirent.h> 40 #include <string.h> 41 #include <ctype.h> 42 #include <sched.h> 43 #include <time.h> 44 #include <cpuid.h> 45 #include <linux/capability.h> 46 #include <errno.h> 47 48 char *proc_stat = "/proc/stat"; 49 FILE *outf; 50 int *fd_percpu; 51 struct timeval interval_tv = {5, 0}; 52 struct timespec interval_ts = {5, 0}; 53 struct timespec one_msec = {0, 1000000}; 54 unsigned int num_iterations; 55 unsigned int debug; 56 unsigned int quiet; 57 unsigned int shown; 58 unsigned int sums_need_wide_columns; 59 unsigned int rapl_joules; 60 unsigned int summary_only; 61 unsigned int list_header_only; 62 unsigned int dump_only; 63 unsigned int do_snb_cstates; 64 unsigned int do_knl_cstates; 65 unsigned int do_slm_cstates; 66 unsigned int do_cnl_cstates; 67 unsigned int use_c1_residency_msr; 68 unsigned int has_aperf; 69 unsigned int has_epb; 70 unsigned int do_irtl_snb; 71 unsigned int do_irtl_hsw; 72 unsigned int units = 1000000; /* MHz etc */ 73 unsigned int genuine_intel; 74 unsigned int has_invariant_tsc; 75 unsigned int do_nhm_platform_info; 76 unsigned int no_MSR_MISC_PWR_MGMT; 77 unsigned int aperf_mperf_multiplier = 1; 78 double bclk; 79 double base_hz; 80 unsigned int has_base_hz; 81 double tsc_tweak = 1.0; 82 unsigned int show_pkg_only; 83 unsigned int show_core_only; 84 char *output_buffer, *outp; 85 unsigned int do_rapl; 86 unsigned int do_dts; 87 unsigned int do_ptm; 88 unsigned long long gfx_cur_rc6_ms; 89 unsigned long long cpuidle_cur_cpu_lpi_us; 90 unsigned long long cpuidle_cur_sys_lpi_us; 91 unsigned int gfx_cur_mhz; 92 unsigned int tcc_activation_temp; 93 unsigned int tcc_activation_temp_override; 94 double rapl_power_units, rapl_time_units; 95 double rapl_dram_energy_units, rapl_energy_units; 96 double rapl_joule_counter_range; 97 unsigned int do_core_perf_limit_reasons; 98 unsigned int has_automatic_cstate_conversion; 99 unsigned int do_gfx_perf_limit_reasons; 100 unsigned int do_ring_perf_limit_reasons; 101 unsigned int crystal_hz; 102 unsigned long long tsc_hz; 103 int base_cpu; 104 double discover_bclk(unsigned int family, unsigned int model); 105 unsigned int has_hwp; /* IA32_PM_ENABLE, IA32_HWP_CAPABILITIES */ 106 /* IA32_HWP_REQUEST, IA32_HWP_STATUS */ 107 unsigned int has_hwp_notify; /* IA32_HWP_INTERRUPT */ 108 unsigned int has_hwp_activity_window; /* IA32_HWP_REQUEST[bits 41:32] */ 109 unsigned int has_hwp_epp; /* IA32_HWP_REQUEST[bits 31:24] */ 110 unsigned int has_hwp_pkg; /* IA32_HWP_REQUEST_PKG */ 111 unsigned int has_misc_feature_control; 112 unsigned int first_counter_read = 1; 113 114 #define RAPL_PKG (1 << 0) 115 /* 0x610 MSR_PKG_POWER_LIMIT */ 116 /* 0x611 MSR_PKG_ENERGY_STATUS */ 117 #define RAPL_PKG_PERF_STATUS (1 << 1) 118 /* 0x613 MSR_PKG_PERF_STATUS */ 119 #define RAPL_PKG_POWER_INFO (1 << 2) 120 /* 0x614 MSR_PKG_POWER_INFO */ 121 122 #define RAPL_DRAM (1 << 3) 123 /* 0x618 MSR_DRAM_POWER_LIMIT */ 124 /* 0x619 MSR_DRAM_ENERGY_STATUS */ 125 #define RAPL_DRAM_PERF_STATUS (1 << 4) 126 /* 0x61b MSR_DRAM_PERF_STATUS */ 127 #define RAPL_DRAM_POWER_INFO (1 << 5) 128 /* 0x61c MSR_DRAM_POWER_INFO */ 129 130 #define RAPL_CORES_POWER_LIMIT (1 << 6) 131 /* 0x638 MSR_PP0_POWER_LIMIT */ 132 #define RAPL_CORE_POLICY (1 << 7) 133 /* 0x63a MSR_PP0_POLICY */ 134 135 #define RAPL_GFX (1 << 8) 136 /* 0x640 MSR_PP1_POWER_LIMIT */ 137 /* 0x641 MSR_PP1_ENERGY_STATUS */ 138 /* 0x642 MSR_PP1_POLICY */ 139 140 #define RAPL_CORES_ENERGY_STATUS (1 << 9) 141 /* 0x639 MSR_PP0_ENERGY_STATUS */ 142 #define RAPL_CORES (RAPL_CORES_ENERGY_STATUS | RAPL_CORES_POWER_LIMIT) 143 #define TJMAX_DEFAULT 100 144 145 #define MAX(a, b) ((a) > (b) ? (a) : (b)) 146 147 /* 148 * buffer size used by sscanf() for added column names 149 * Usually truncated to 7 characters, but also handles 18 columns for raw 64-bit counters 150 */ 151 #define NAME_BYTES 20 152 #define PATH_BYTES 128 153 154 int backwards_count; 155 char *progname; 156 157 #define CPU_SUBSET_MAXCPUS 1024 /* need to use before probe... */ 158 cpu_set_t *cpu_present_set, *cpu_affinity_set, *cpu_subset; 159 size_t cpu_present_setsize, cpu_affinity_setsize, cpu_subset_size; 160 #define MAX_ADDED_COUNTERS 8 161 #define MAX_ADDED_THREAD_COUNTERS 24 162 #define BITMASK_SIZE 32 163 164 struct thread_data { 165 struct timeval tv_begin; 166 struct timeval tv_end; 167 unsigned long long tsc; 168 unsigned long long aperf; 169 unsigned long long mperf; 170 unsigned long long c1; 171 unsigned long long irq_count; 172 unsigned int smi_count; 173 unsigned int cpu_id; 174 unsigned int apic_id; 175 unsigned int x2apic_id; 176 unsigned int flags; 177 #define CPU_IS_FIRST_THREAD_IN_CORE 0x2 178 #define CPU_IS_FIRST_CORE_IN_PACKAGE 0x4 179 unsigned long long counter[MAX_ADDED_THREAD_COUNTERS]; 180 } *thread_even, *thread_odd; 181 182 struct core_data { 183 unsigned long long c3; 184 unsigned long long c6; 185 unsigned long long c7; 186 unsigned long long mc6_us; /* duplicate as per-core for now, even though per module */ 187 unsigned int core_temp_c; 188 unsigned int core_id; 189 unsigned long long counter[MAX_ADDED_COUNTERS]; 190 } *core_even, *core_odd; 191 192 struct pkg_data { 193 unsigned long long pc2; 194 unsigned long long pc3; 195 unsigned long long pc6; 196 unsigned long long pc7; 197 unsigned long long pc8; 198 unsigned long long pc9; 199 unsigned long long pc10; 200 unsigned long long cpu_lpi; 201 unsigned long long sys_lpi; 202 unsigned long long pkg_wtd_core_c0; 203 unsigned long long pkg_any_core_c0; 204 unsigned long long pkg_any_gfxe_c0; 205 unsigned long long pkg_both_core_gfxe_c0; 206 long long gfx_rc6_ms; 207 unsigned int gfx_mhz; 208 unsigned int package_id; 209 unsigned int energy_pkg; /* MSR_PKG_ENERGY_STATUS */ 210 unsigned int energy_dram; /* MSR_DRAM_ENERGY_STATUS */ 211 unsigned int energy_cores; /* MSR_PP0_ENERGY_STATUS */ 212 unsigned int energy_gfx; /* MSR_PP1_ENERGY_STATUS */ 213 unsigned int rapl_pkg_perf_status; /* MSR_PKG_PERF_STATUS */ 214 unsigned int rapl_dram_perf_status; /* MSR_DRAM_PERF_STATUS */ 215 unsigned int pkg_temp_c; 216 unsigned long long counter[MAX_ADDED_COUNTERS]; 217 } *package_even, *package_odd; 218 219 #define ODD_COUNTERS thread_odd, core_odd, package_odd 220 #define EVEN_COUNTERS thread_even, core_even, package_even 221 222 #define GET_THREAD(thread_base, thread_no, core_no, node_no, pkg_no) \ 223 ((thread_base) + \ 224 ((pkg_no) * \ 225 topo.nodes_per_pkg * topo.cores_per_node * topo.threads_per_core) + \ 226 ((node_no) * topo.cores_per_node * topo.threads_per_core) + \ 227 ((core_no) * topo.threads_per_core) + \ 228 (thread_no)) 229 230 #define GET_CORE(core_base, core_no, node_no, pkg_no) \ 231 ((core_base) + \ 232 ((pkg_no) * topo.nodes_per_pkg * topo.cores_per_node) + \ 233 ((node_no) * topo.cores_per_node) + \ 234 (core_no)) 235 236 237 #define GET_PKG(pkg_base, pkg_no) (pkg_base + pkg_no) 238 239 enum counter_scope {SCOPE_CPU, SCOPE_CORE, SCOPE_PACKAGE}; 240 enum counter_type {COUNTER_ITEMS, COUNTER_CYCLES, COUNTER_SECONDS, COUNTER_USEC}; 241 enum counter_format {FORMAT_RAW, FORMAT_DELTA, FORMAT_PERCENT}; 242 243 struct msr_counter { 244 unsigned int msr_num; 245 char name[NAME_BYTES]; 246 char path[PATH_BYTES]; 247 unsigned int width; 248 enum counter_type type; 249 enum counter_format format; 250 struct msr_counter *next; 251 unsigned int flags; 252 #define FLAGS_HIDE (1 << 0) 253 #define FLAGS_SHOW (1 << 1) 254 #define SYSFS_PERCPU (1 << 1) 255 }; 256 257 struct sys_counters { 258 unsigned int added_thread_counters; 259 unsigned int added_core_counters; 260 unsigned int added_package_counters; 261 struct msr_counter *tp; 262 struct msr_counter *cp; 263 struct msr_counter *pp; 264 } sys; 265 266 struct system_summary { 267 struct thread_data threads; 268 struct core_data cores; 269 struct pkg_data packages; 270 } average; 271 272 struct cpu_topology { 273 int physical_package_id; 274 int logical_cpu_id; 275 int physical_node_id; 276 int logical_node_id; /* 0-based count within the package */ 277 int physical_core_id; 278 int thread_id; 279 cpu_set_t *put_ids; /* Processing Unit/Thread IDs */ 280 } *cpus; 281 282 struct topo_params { 283 int num_packages; 284 int num_cpus; 285 int num_cores; 286 int max_cpu_num; 287 int max_node_num; 288 int nodes_per_pkg; 289 int cores_per_node; 290 int threads_per_core; 291 } topo; 292 293 struct timeval tv_even, tv_odd, tv_delta; 294 295 int *irq_column_2_cpu; /* /proc/interrupts column numbers */ 296 int *irqs_per_cpu; /* indexed by cpu_num */ 297 298 void setup_all_buffers(void); 299 300 int cpu_is_not_present(int cpu) 301 { 302 return !CPU_ISSET_S(cpu, cpu_present_setsize, cpu_present_set); 303 } 304 /* 305 * run func(thread, core, package) in topology order 306 * skip non-present cpus 307 */ 308 309 int for_all_cpus(int (func)(struct thread_data *, struct core_data *, struct pkg_data *), 310 struct thread_data *thread_base, struct core_data *core_base, struct pkg_data *pkg_base) 311 { 312 int retval, pkg_no, core_no, thread_no, node_no; 313 314 for (pkg_no = 0; pkg_no < topo.num_packages; ++pkg_no) { 315 for (core_no = 0; core_no < topo.cores_per_node; ++core_no) { 316 for (node_no = 0; node_no < topo.nodes_per_pkg; 317 node_no++) { 318 for (thread_no = 0; thread_no < 319 topo.threads_per_core; ++thread_no) { 320 struct thread_data *t; 321 struct core_data *c; 322 struct pkg_data *p; 323 324 t = GET_THREAD(thread_base, thread_no, 325 core_no, node_no, 326 pkg_no); 327 328 if (cpu_is_not_present(t->cpu_id)) 329 continue; 330 331 c = GET_CORE(core_base, core_no, 332 node_no, pkg_no); 333 p = GET_PKG(pkg_base, pkg_no); 334 335 retval = func(t, c, p); 336 if (retval) 337 return retval; 338 } 339 } 340 } 341 } 342 return 0; 343 } 344 345 int cpu_migrate(int cpu) 346 { 347 CPU_ZERO_S(cpu_affinity_setsize, cpu_affinity_set); 348 CPU_SET_S(cpu, cpu_affinity_setsize, cpu_affinity_set); 349 if (sched_setaffinity(0, cpu_affinity_setsize, cpu_affinity_set) == -1) 350 return -1; 351 else 352 return 0; 353 } 354 int get_msr_fd(int cpu) 355 { 356 char pathname[32]; 357 int fd; 358 359 fd = fd_percpu[cpu]; 360 361 if (fd) 362 return fd; 363 364 sprintf(pathname, "/dev/cpu/%d/msr", cpu); 365 fd = open(pathname, O_RDONLY); 366 if (fd < 0) 367 err(-1, "%s open failed, try chown or chmod +r /dev/cpu/*/msr, or run as root", pathname); 368 369 fd_percpu[cpu] = fd; 370 371 return fd; 372 } 373 374 int get_msr(int cpu, off_t offset, unsigned long long *msr) 375 { 376 ssize_t retval; 377 378 retval = pread(get_msr_fd(cpu), msr, sizeof(*msr), offset); 379 380 if (retval != sizeof *msr) 381 err(-1, "cpu%d: msr offset 0x%llx read failed", cpu, (unsigned long long)offset); 382 383 return 0; 384 } 385 386 /* 387 * This list matches the column headers, except 388 * 1. built-in only, the sysfs counters are not here -- we learn of those at run-time 389 * 2. Core and CPU are moved to the end, we can't have strings that contain them 390 * matching on them for --show and --hide. 391 */ 392 struct msr_counter bic[] = { 393 { 0x0, "usec" }, 394 { 0x0, "Time_Of_Day_Seconds" }, 395 { 0x0, "Package" }, 396 { 0x0, "Node" }, 397 { 0x0, "Avg_MHz" }, 398 { 0x0, "Busy%" }, 399 { 0x0, "Bzy_MHz" }, 400 { 0x0, "TSC_MHz" }, 401 { 0x0, "IRQ" }, 402 { 0x0, "SMI", "", 32, 0, FORMAT_DELTA, NULL}, 403 { 0x0, "sysfs" }, 404 { 0x0, "CPU%c1" }, 405 { 0x0, "CPU%c3" }, 406 { 0x0, "CPU%c6" }, 407 { 0x0, "CPU%c7" }, 408 { 0x0, "ThreadC" }, 409 { 0x0, "CoreTmp" }, 410 { 0x0, "CoreCnt" }, 411 { 0x0, "PkgTmp" }, 412 { 0x0, "GFX%rc6" }, 413 { 0x0, "GFXMHz" }, 414 { 0x0, "Pkg%pc2" }, 415 { 0x0, "Pkg%pc3" }, 416 { 0x0, "Pkg%pc6" }, 417 { 0x0, "Pkg%pc7" }, 418 { 0x0, "Pkg%pc8" }, 419 { 0x0, "Pkg%pc9" }, 420 { 0x0, "Pk%pc10" }, 421 { 0x0, "CPU%LPI" }, 422 { 0x0, "SYS%LPI" }, 423 { 0x0, "PkgWatt" }, 424 { 0x0, "CorWatt" }, 425 { 0x0, "GFXWatt" }, 426 { 0x0, "PkgCnt" }, 427 { 0x0, "RAMWatt" }, 428 { 0x0, "PKG_%" }, 429 { 0x0, "RAM_%" }, 430 { 0x0, "Pkg_J" }, 431 { 0x0, "Cor_J" }, 432 { 0x0, "GFX_J" }, 433 { 0x0, "RAM_J" }, 434 { 0x0, "Mod%c6" }, 435 { 0x0, "Totl%C0" }, 436 { 0x0, "Any%C0" }, 437 { 0x0, "GFX%C0" }, 438 { 0x0, "CPUGFX%" }, 439 { 0x0, "Core" }, 440 { 0x0, "CPU" }, 441 { 0x0, "APIC" }, 442 { 0x0, "X2APIC" }, 443 }; 444 445 #define MAX_BIC (sizeof(bic) / sizeof(struct msr_counter)) 446 #define BIC_USEC (1ULL << 0) 447 #define BIC_TOD (1ULL << 1) 448 #define BIC_Package (1ULL << 2) 449 #define BIC_Node (1ULL << 3) 450 #define BIC_Avg_MHz (1ULL << 4) 451 #define BIC_Busy (1ULL << 5) 452 #define BIC_Bzy_MHz (1ULL << 6) 453 #define BIC_TSC_MHz (1ULL << 7) 454 #define BIC_IRQ (1ULL << 8) 455 #define BIC_SMI (1ULL << 9) 456 #define BIC_sysfs (1ULL << 10) 457 #define BIC_CPU_c1 (1ULL << 11) 458 #define BIC_CPU_c3 (1ULL << 12) 459 #define BIC_CPU_c6 (1ULL << 13) 460 #define BIC_CPU_c7 (1ULL << 14) 461 #define BIC_ThreadC (1ULL << 15) 462 #define BIC_CoreTmp (1ULL << 16) 463 #define BIC_CoreCnt (1ULL << 17) 464 #define BIC_PkgTmp (1ULL << 18) 465 #define BIC_GFX_rc6 (1ULL << 19) 466 #define BIC_GFXMHz (1ULL << 20) 467 #define BIC_Pkgpc2 (1ULL << 21) 468 #define BIC_Pkgpc3 (1ULL << 22) 469 #define BIC_Pkgpc6 (1ULL << 23) 470 #define BIC_Pkgpc7 (1ULL << 24) 471 #define BIC_Pkgpc8 (1ULL << 25) 472 #define BIC_Pkgpc9 (1ULL << 26) 473 #define BIC_Pkgpc10 (1ULL << 27) 474 #define BIC_CPU_LPI (1ULL << 28) 475 #define BIC_SYS_LPI (1ULL << 29) 476 #define BIC_PkgWatt (1ULL << 30) 477 #define BIC_CorWatt (1ULL << 31) 478 #define BIC_GFXWatt (1ULL << 32) 479 #define BIC_PkgCnt (1ULL << 33) 480 #define BIC_RAMWatt (1ULL << 34) 481 #define BIC_PKG__ (1ULL << 35) 482 #define BIC_RAM__ (1ULL << 36) 483 #define BIC_Pkg_J (1ULL << 37) 484 #define BIC_Cor_J (1ULL << 38) 485 #define BIC_GFX_J (1ULL << 39) 486 #define BIC_RAM_J (1ULL << 40) 487 #define BIC_Mod_c6 (1ULL << 41) 488 #define BIC_Totl_c0 (1ULL << 42) 489 #define BIC_Any_c0 (1ULL << 43) 490 #define BIC_GFX_c0 (1ULL << 44) 491 #define BIC_CPUGFX (1ULL << 45) 492 #define BIC_Core (1ULL << 46) 493 #define BIC_CPU (1ULL << 47) 494 #define BIC_APIC (1ULL << 48) 495 #define BIC_X2APIC (1ULL << 49) 496 497 #define BIC_DISABLED_BY_DEFAULT (BIC_USEC | BIC_TOD | BIC_APIC | BIC_X2APIC) 498 499 unsigned long long bic_enabled = (0xFFFFFFFFFFFFFFFFULL & ~BIC_DISABLED_BY_DEFAULT); 500 unsigned long long bic_present = BIC_USEC | BIC_TOD | BIC_sysfs | BIC_APIC | BIC_X2APIC; 501 502 #define DO_BIC(COUNTER_NAME) (bic_enabled & bic_present & COUNTER_NAME) 503 #define ENABLE_BIC(COUNTER_NAME) (bic_enabled |= COUNTER_NAME) 504 #define BIC_PRESENT(COUNTER_BIT) (bic_present |= COUNTER_BIT) 505 #define BIC_NOT_PRESENT(COUNTER_BIT) (bic_present &= ~COUNTER_BIT) 506 507 508 #define MAX_DEFERRED 16 509 char *deferred_skip_names[MAX_DEFERRED]; 510 int deferred_skip_index; 511 512 /* 513 * HIDE_LIST - hide this list of counters, show the rest [default] 514 * SHOW_LIST - show this list of counters, hide the rest 515 */ 516 enum show_hide_mode { SHOW_LIST, HIDE_LIST } global_show_hide_mode = HIDE_LIST; 517 518 void help(void) 519 { 520 fprintf(outf, 521 "Usage: turbostat [OPTIONS][(--interval seconds) | COMMAND ...]\n" 522 "\n" 523 "Turbostat forks the specified COMMAND and prints statistics\n" 524 "when COMMAND completes.\n" 525 "If no COMMAND is specified, turbostat wakes every 5-seconds\n" 526 "to print statistics, until interrupted.\n" 527 " -a, --add add a counter\n" 528 " eg. --add msr0x10,u64,cpu,delta,MY_TSC\n" 529 " -c, --cpu cpu-set limit output to summary plus cpu-set:\n" 530 " {core | package | j,k,l..m,n-p }\n" 531 " -d, --debug displays usec, Time_Of_Day_Seconds and more debugging\n" 532 " -D, --Dump displays the raw counter values\n" 533 " -e, --enable [all | column]\n" 534 " shows all or the specified disabled column\n" 535 " -H, --hide [column|column,column,...]\n" 536 " hide the specified column(s)\n" 537 " -i, --interval sec.subsec\n" 538 " Override default 5-second measurement interval\n" 539 " -J, --Joules displays energy in Joules instead of Watts\n" 540 " -l, --list list column headers only\n" 541 " -n, --num_iterations num\n" 542 " number of the measurement iterations\n" 543 " -o, --out file\n" 544 " create or truncate \"file\" for all output\n" 545 " -q, --quiet skip decoding system configuration header\n" 546 " -s, --show [column|column,column,...]\n" 547 " show only the specified column(s)\n" 548 " -S, --Summary\n" 549 " limits output to 1-line system summary per interval\n" 550 " -T, --TCC temperature\n" 551 " sets the Thermal Control Circuit temperature in\n" 552 " degrees Celsius\n" 553 " -h, --help print this help message\n" 554 " -v, --version print version information\n" 555 "\n" 556 "For more help, run \"man turbostat\"\n"); 557 } 558 559 /* 560 * bic_lookup 561 * for all the strings in comma separate name_list, 562 * set the approprate bit in return value. 563 */ 564 unsigned long long bic_lookup(char *name_list, enum show_hide_mode mode) 565 { 566 int i; 567 unsigned long long retval = 0; 568 569 while (name_list) { 570 char *comma; 571 572 comma = strchr(name_list, ','); 573 574 if (comma) 575 *comma = '\0'; 576 577 if (!strcmp(name_list, "all")) 578 return ~0; 579 580 for (i = 0; i < MAX_BIC; ++i) { 581 if (!strcmp(name_list, bic[i].name)) { 582 retval |= (1ULL << i); 583 break; 584 } 585 } 586 if (i == MAX_BIC) { 587 if (mode == SHOW_LIST) { 588 fprintf(stderr, "Invalid counter name: %s\n", name_list); 589 exit(-1); 590 } 591 deferred_skip_names[deferred_skip_index++] = name_list; 592 if (debug) 593 fprintf(stderr, "deferred \"%s\"\n", name_list); 594 if (deferred_skip_index >= MAX_DEFERRED) { 595 fprintf(stderr, "More than max %d un-recognized --skip options '%s'\n", 596 MAX_DEFERRED, name_list); 597 help(); 598 exit(1); 599 } 600 } 601 602 name_list = comma; 603 if (name_list) 604 name_list++; 605 606 } 607 return retval; 608 } 609 610 611 void print_header(char *delim) 612 { 613 struct msr_counter *mp; 614 int printed = 0; 615 616 if (DO_BIC(BIC_USEC)) 617 outp += sprintf(outp, "%susec", (printed++ ? delim : "")); 618 if (DO_BIC(BIC_TOD)) 619 outp += sprintf(outp, "%sTime_Of_Day_Seconds", (printed++ ? delim : "")); 620 if (DO_BIC(BIC_Package)) 621 outp += sprintf(outp, "%sPackage", (printed++ ? delim : "")); 622 if (DO_BIC(BIC_Node)) 623 outp += sprintf(outp, "%sNode", (printed++ ? delim : "")); 624 if (DO_BIC(BIC_Core)) 625 outp += sprintf(outp, "%sCore", (printed++ ? delim : "")); 626 if (DO_BIC(BIC_CPU)) 627 outp += sprintf(outp, "%sCPU", (printed++ ? delim : "")); 628 if (DO_BIC(BIC_APIC)) 629 outp += sprintf(outp, "%sAPIC", (printed++ ? delim : "")); 630 if (DO_BIC(BIC_X2APIC)) 631 outp += sprintf(outp, "%sX2APIC", (printed++ ? delim : "")); 632 if (DO_BIC(BIC_Avg_MHz)) 633 outp += sprintf(outp, "%sAvg_MHz", (printed++ ? delim : "")); 634 if (DO_BIC(BIC_Busy)) 635 outp += sprintf(outp, "%sBusy%%", (printed++ ? delim : "")); 636 if (DO_BIC(BIC_Bzy_MHz)) 637 outp += sprintf(outp, "%sBzy_MHz", (printed++ ? delim : "")); 638 if (DO_BIC(BIC_TSC_MHz)) 639 outp += sprintf(outp, "%sTSC_MHz", (printed++ ? delim : "")); 640 641 if (DO_BIC(BIC_IRQ)) { 642 if (sums_need_wide_columns) 643 outp += sprintf(outp, "%s IRQ", (printed++ ? delim : "")); 644 else 645 outp += sprintf(outp, "%sIRQ", (printed++ ? delim : "")); 646 } 647 648 if (DO_BIC(BIC_SMI)) 649 outp += sprintf(outp, "%sSMI", (printed++ ? delim : "")); 650 651 for (mp = sys.tp; mp; mp = mp->next) { 652 653 if (mp->format == FORMAT_RAW) { 654 if (mp->width == 64) 655 outp += sprintf(outp, "%s%18.18s", (printed++ ? delim : ""), mp->name); 656 else 657 outp += sprintf(outp, "%s%10.10s", (printed++ ? delim : ""), mp->name); 658 } else { 659 if ((mp->type == COUNTER_ITEMS) && sums_need_wide_columns) 660 outp += sprintf(outp, "%s%8s", (printed++ ? delim : ""), mp->name); 661 else 662 outp += sprintf(outp, "%s%s", (printed++ ? delim : ""), mp->name); 663 } 664 } 665 666 if (DO_BIC(BIC_CPU_c1)) 667 outp += sprintf(outp, "%sCPU%%c1", (printed++ ? delim : "")); 668 if (DO_BIC(BIC_CPU_c3) && !do_slm_cstates && !do_knl_cstates && !do_cnl_cstates) 669 outp += sprintf(outp, "%sCPU%%c3", (printed++ ? delim : "")); 670 if (DO_BIC(BIC_CPU_c6)) 671 outp += sprintf(outp, "%sCPU%%c6", (printed++ ? delim : "")); 672 if (DO_BIC(BIC_CPU_c7)) 673 outp += sprintf(outp, "%sCPU%%c7", (printed++ ? delim : "")); 674 675 if (DO_BIC(BIC_Mod_c6)) 676 outp += sprintf(outp, "%sMod%%c6", (printed++ ? delim : "")); 677 678 if (DO_BIC(BIC_CoreTmp)) 679 outp += sprintf(outp, "%sCoreTmp", (printed++ ? delim : "")); 680 681 for (mp = sys.cp; mp; mp = mp->next) { 682 if (mp->format == FORMAT_RAW) { 683 if (mp->width == 64) 684 outp += sprintf(outp, "%s%18.18s", delim, mp->name); 685 else 686 outp += sprintf(outp, "%s%10.10s", delim, mp->name); 687 } else { 688 if ((mp->type == COUNTER_ITEMS) && sums_need_wide_columns) 689 outp += sprintf(outp, "%s%8s", delim, mp->name); 690 else 691 outp += sprintf(outp, "%s%s", delim, mp->name); 692 } 693 } 694 695 if (DO_BIC(BIC_PkgTmp)) 696 outp += sprintf(outp, "%sPkgTmp", (printed++ ? delim : "")); 697 698 if (DO_BIC(BIC_GFX_rc6)) 699 outp += sprintf(outp, "%sGFX%%rc6", (printed++ ? delim : "")); 700 701 if (DO_BIC(BIC_GFXMHz)) 702 outp += sprintf(outp, "%sGFXMHz", (printed++ ? delim : "")); 703 704 if (DO_BIC(BIC_Totl_c0)) 705 outp += sprintf(outp, "%sTotl%%C0", (printed++ ? delim : "")); 706 if (DO_BIC(BIC_Any_c0)) 707 outp += sprintf(outp, "%sAny%%C0", (printed++ ? delim : "")); 708 if (DO_BIC(BIC_GFX_c0)) 709 outp += sprintf(outp, "%sGFX%%C0", (printed++ ? delim : "")); 710 if (DO_BIC(BIC_CPUGFX)) 711 outp += sprintf(outp, "%sCPUGFX%%", (printed++ ? delim : "")); 712 713 if (DO_BIC(BIC_Pkgpc2)) 714 outp += sprintf(outp, "%sPkg%%pc2", (printed++ ? delim : "")); 715 if (DO_BIC(BIC_Pkgpc3)) 716 outp += sprintf(outp, "%sPkg%%pc3", (printed++ ? delim : "")); 717 if (DO_BIC(BIC_Pkgpc6)) 718 outp += sprintf(outp, "%sPkg%%pc6", (printed++ ? delim : "")); 719 if (DO_BIC(BIC_Pkgpc7)) 720 outp += sprintf(outp, "%sPkg%%pc7", (printed++ ? delim : "")); 721 if (DO_BIC(BIC_Pkgpc8)) 722 outp += sprintf(outp, "%sPkg%%pc8", (printed++ ? delim : "")); 723 if (DO_BIC(BIC_Pkgpc9)) 724 outp += sprintf(outp, "%sPkg%%pc9", (printed++ ? delim : "")); 725 if (DO_BIC(BIC_Pkgpc10)) 726 outp += sprintf(outp, "%sPk%%pc10", (printed++ ? delim : "")); 727 if (DO_BIC(BIC_CPU_LPI)) 728 outp += sprintf(outp, "%sCPU%%LPI", (printed++ ? delim : "")); 729 if (DO_BIC(BIC_SYS_LPI)) 730 outp += sprintf(outp, "%sSYS%%LPI", (printed++ ? delim : "")); 731 732 if (do_rapl && !rapl_joules) { 733 if (DO_BIC(BIC_PkgWatt)) 734 outp += sprintf(outp, "%sPkgWatt", (printed++ ? delim : "")); 735 if (DO_BIC(BIC_CorWatt)) 736 outp += sprintf(outp, "%sCorWatt", (printed++ ? delim : "")); 737 if (DO_BIC(BIC_GFXWatt)) 738 outp += sprintf(outp, "%sGFXWatt", (printed++ ? delim : "")); 739 if (DO_BIC(BIC_RAMWatt)) 740 outp += sprintf(outp, "%sRAMWatt", (printed++ ? delim : "")); 741 if (DO_BIC(BIC_PKG__)) 742 outp += sprintf(outp, "%sPKG_%%", (printed++ ? delim : "")); 743 if (DO_BIC(BIC_RAM__)) 744 outp += sprintf(outp, "%sRAM_%%", (printed++ ? delim : "")); 745 } else if (do_rapl && rapl_joules) { 746 if (DO_BIC(BIC_Pkg_J)) 747 outp += sprintf(outp, "%sPkg_J", (printed++ ? delim : "")); 748 if (DO_BIC(BIC_Cor_J)) 749 outp += sprintf(outp, "%sCor_J", (printed++ ? delim : "")); 750 if (DO_BIC(BIC_GFX_J)) 751 outp += sprintf(outp, "%sGFX_J", (printed++ ? delim : "")); 752 if (DO_BIC(BIC_RAM_J)) 753 outp += sprintf(outp, "%sRAM_J", (printed++ ? delim : "")); 754 if (DO_BIC(BIC_PKG__)) 755 outp += sprintf(outp, "%sPKG_%%", (printed++ ? delim : "")); 756 if (DO_BIC(BIC_RAM__)) 757 outp += sprintf(outp, "%sRAM_%%", (printed++ ? delim : "")); 758 } 759 for (mp = sys.pp; mp; mp = mp->next) { 760 if (mp->format == FORMAT_RAW) { 761 if (mp->width == 64) 762 outp += sprintf(outp, "%s%18.18s", delim, mp->name); 763 else 764 outp += sprintf(outp, "%s%10.10s", delim, mp->name); 765 } else { 766 if ((mp->type == COUNTER_ITEMS) && sums_need_wide_columns) 767 outp += sprintf(outp, "%s%8s", delim, mp->name); 768 else 769 outp += sprintf(outp, "%s%s", delim, mp->name); 770 } 771 } 772 773 outp += sprintf(outp, "\n"); 774 } 775 776 int dump_counters(struct thread_data *t, struct core_data *c, 777 struct pkg_data *p) 778 { 779 int i; 780 struct msr_counter *mp; 781 782 outp += sprintf(outp, "t %p, c %p, p %p\n", t, c, p); 783 784 if (t) { 785 outp += sprintf(outp, "CPU: %d flags 0x%x\n", 786 t->cpu_id, t->flags); 787 outp += sprintf(outp, "TSC: %016llX\n", t->tsc); 788 outp += sprintf(outp, "aperf: %016llX\n", t->aperf); 789 outp += sprintf(outp, "mperf: %016llX\n", t->mperf); 790 outp += sprintf(outp, "c1: %016llX\n", t->c1); 791 792 if (DO_BIC(BIC_IRQ)) 793 outp += sprintf(outp, "IRQ: %lld\n", t->irq_count); 794 if (DO_BIC(BIC_SMI)) 795 outp += sprintf(outp, "SMI: %d\n", t->smi_count); 796 797 for (i = 0, mp = sys.tp; mp; i++, mp = mp->next) { 798 outp += sprintf(outp, "tADDED [%d] msr0x%x: %08llX\n", 799 i, mp->msr_num, t->counter[i]); 800 } 801 } 802 803 if (c) { 804 outp += sprintf(outp, "core: %d\n", c->core_id); 805 outp += sprintf(outp, "c3: %016llX\n", c->c3); 806 outp += sprintf(outp, "c6: %016llX\n", c->c6); 807 outp += sprintf(outp, "c7: %016llX\n", c->c7); 808 outp += sprintf(outp, "DTS: %dC\n", c->core_temp_c); 809 810 for (i = 0, mp = sys.cp; mp; i++, mp = mp->next) { 811 outp += sprintf(outp, "cADDED [%d] msr0x%x: %08llX\n", 812 i, mp->msr_num, c->counter[i]); 813 } 814 outp += sprintf(outp, "mc6_us: %016llX\n", c->mc6_us); 815 } 816 817 if (p) { 818 outp += sprintf(outp, "package: %d\n", p->package_id); 819 820 outp += sprintf(outp, "Weighted cores: %016llX\n", p->pkg_wtd_core_c0); 821 outp += sprintf(outp, "Any cores: %016llX\n", p->pkg_any_core_c0); 822 outp += sprintf(outp, "Any GFX: %016llX\n", p->pkg_any_gfxe_c0); 823 outp += sprintf(outp, "CPU + GFX: %016llX\n", p->pkg_both_core_gfxe_c0); 824 825 outp += sprintf(outp, "pc2: %016llX\n", p->pc2); 826 if (DO_BIC(BIC_Pkgpc3)) 827 outp += sprintf(outp, "pc3: %016llX\n", p->pc3); 828 if (DO_BIC(BIC_Pkgpc6)) 829 outp += sprintf(outp, "pc6: %016llX\n", p->pc6); 830 if (DO_BIC(BIC_Pkgpc7)) 831 outp += sprintf(outp, "pc7: %016llX\n", p->pc7); 832 outp += sprintf(outp, "pc8: %016llX\n", p->pc8); 833 outp += sprintf(outp, "pc9: %016llX\n", p->pc9); 834 outp += sprintf(outp, "pc10: %016llX\n", p->pc10); 835 outp += sprintf(outp, "pc10: %016llX\n", p->pc10); 836 outp += sprintf(outp, "cpu_lpi: %016llX\n", p->cpu_lpi); 837 outp += sprintf(outp, "sys_lpi: %016llX\n", p->sys_lpi); 838 outp += sprintf(outp, "Joules PKG: %0X\n", p->energy_pkg); 839 outp += sprintf(outp, "Joules COR: %0X\n", p->energy_cores); 840 outp += sprintf(outp, "Joules GFX: %0X\n", p->energy_gfx); 841 outp += sprintf(outp, "Joules RAM: %0X\n", p->energy_dram); 842 outp += sprintf(outp, "Throttle PKG: %0X\n", 843 p->rapl_pkg_perf_status); 844 outp += sprintf(outp, "Throttle RAM: %0X\n", 845 p->rapl_dram_perf_status); 846 outp += sprintf(outp, "PTM: %dC\n", p->pkg_temp_c); 847 848 for (i = 0, mp = sys.pp; mp; i++, mp = mp->next) { 849 outp += sprintf(outp, "pADDED [%d] msr0x%x: %08llX\n", 850 i, mp->msr_num, p->counter[i]); 851 } 852 } 853 854 outp += sprintf(outp, "\n"); 855 856 return 0; 857 } 858 859 /* 860 * column formatting convention & formats 861 */ 862 int format_counters(struct thread_data *t, struct core_data *c, 863 struct pkg_data *p) 864 { 865 double interval_float, tsc; 866 char *fmt8; 867 int i; 868 struct msr_counter *mp; 869 char *delim = "\t"; 870 int printed = 0; 871 872 /* if showing only 1st thread in core and this isn't one, bail out */ 873 if (show_core_only && !(t->flags & CPU_IS_FIRST_THREAD_IN_CORE)) 874 return 0; 875 876 /* if showing only 1st thread in pkg and this isn't one, bail out */ 877 if (show_pkg_only && !(t->flags & CPU_IS_FIRST_CORE_IN_PACKAGE)) 878 return 0; 879 880 /*if not summary line and --cpu is used */ 881 if ((t != &average.threads) && 882 (cpu_subset && !CPU_ISSET_S(t->cpu_id, cpu_subset_size, cpu_subset))) 883 return 0; 884 885 if (DO_BIC(BIC_USEC)) { 886 /* on each row, print how many usec each timestamp took to gather */ 887 struct timeval tv; 888 889 timersub(&t->tv_end, &t->tv_begin, &tv); 890 outp += sprintf(outp, "%5ld\t", tv.tv_sec * 1000000 + tv.tv_usec); 891 } 892 893 /* Time_Of_Day_Seconds: on each row, print sec.usec last timestamp taken */ 894 if (DO_BIC(BIC_TOD)) 895 outp += sprintf(outp, "%10ld.%06ld\t", t->tv_end.tv_sec, t->tv_end.tv_usec); 896 897 interval_float = tv_delta.tv_sec + tv_delta.tv_usec/1000000.0; 898 899 tsc = t->tsc * tsc_tweak; 900 901 /* topo columns, print blanks on 1st (average) line */ 902 if (t == &average.threads) { 903 if (DO_BIC(BIC_Package)) 904 outp += sprintf(outp, "%s-", (printed++ ? delim : "")); 905 if (DO_BIC(BIC_Node)) 906 outp += sprintf(outp, "%s-", (printed++ ? delim : "")); 907 if (DO_BIC(BIC_Core)) 908 outp += sprintf(outp, "%s-", (printed++ ? delim : "")); 909 if (DO_BIC(BIC_CPU)) 910 outp += sprintf(outp, "%s-", (printed++ ? delim : "")); 911 if (DO_BIC(BIC_APIC)) 912 outp += sprintf(outp, "%s-", (printed++ ? delim : "")); 913 if (DO_BIC(BIC_X2APIC)) 914 outp += sprintf(outp, "%s-", (printed++ ? delim : "")); 915 } else { 916 if (DO_BIC(BIC_Package)) { 917 if (p) 918 outp += sprintf(outp, "%s%d", (printed++ ? delim : ""), p->package_id); 919 else 920 outp += sprintf(outp, "%s-", (printed++ ? delim : "")); 921 } 922 if (DO_BIC(BIC_Node)) { 923 if (t) 924 outp += sprintf(outp, "%s%d", 925 (printed++ ? delim : ""), 926 cpus[t->cpu_id].physical_node_id); 927 else 928 outp += sprintf(outp, "%s-", 929 (printed++ ? delim : "")); 930 } 931 if (DO_BIC(BIC_Core)) { 932 if (c) 933 outp += sprintf(outp, "%s%d", (printed++ ? delim : ""), c->core_id); 934 else 935 outp += sprintf(outp, "%s-", (printed++ ? delim : "")); 936 } 937 if (DO_BIC(BIC_CPU)) 938 outp += sprintf(outp, "%s%d", (printed++ ? delim : ""), t->cpu_id); 939 if (DO_BIC(BIC_APIC)) 940 outp += sprintf(outp, "%s%d", (printed++ ? delim : ""), t->apic_id); 941 if (DO_BIC(BIC_X2APIC)) 942 outp += sprintf(outp, "%s%d", (printed++ ? delim : ""), t->x2apic_id); 943 } 944 945 if (DO_BIC(BIC_Avg_MHz)) 946 outp += sprintf(outp, "%s%.0f", (printed++ ? delim : ""), 947 1.0 / units * t->aperf / interval_float); 948 949 if (DO_BIC(BIC_Busy)) 950 outp += sprintf(outp, "%s%.2f", (printed++ ? delim : ""), 100.0 * t->mperf/tsc); 951 952 if (DO_BIC(BIC_Bzy_MHz)) { 953 if (has_base_hz) 954 outp += sprintf(outp, "%s%.0f", (printed++ ? delim : ""), base_hz / units * t->aperf / t->mperf); 955 else 956 outp += sprintf(outp, "%s%.0f", (printed++ ? delim : ""), 957 tsc / units * t->aperf / t->mperf / interval_float); 958 } 959 960 if (DO_BIC(BIC_TSC_MHz)) 961 outp += sprintf(outp, "%s%.0f", (printed++ ? delim : ""), 1.0 * t->tsc/units/interval_float); 962 963 /* IRQ */ 964 if (DO_BIC(BIC_IRQ)) { 965 if (sums_need_wide_columns) 966 outp += sprintf(outp, "%s%8lld", (printed++ ? delim : ""), t->irq_count); 967 else 968 outp += sprintf(outp, "%s%lld", (printed++ ? delim : ""), t->irq_count); 969 } 970 971 /* SMI */ 972 if (DO_BIC(BIC_SMI)) 973 outp += sprintf(outp, "%s%d", (printed++ ? delim : ""), t->smi_count); 974 975 /* Added counters */ 976 for (i = 0, mp = sys.tp; mp; i++, mp = mp->next) { 977 if (mp->format == FORMAT_RAW) { 978 if (mp->width == 32) 979 outp += sprintf(outp, "%s0x%08x", (printed++ ? delim : ""), (unsigned int) t->counter[i]); 980 else 981 outp += sprintf(outp, "%s0x%016llx", (printed++ ? delim : ""), t->counter[i]); 982 } else if (mp->format == FORMAT_DELTA) { 983 if ((mp->type == COUNTER_ITEMS) && sums_need_wide_columns) 984 outp += sprintf(outp, "%s%8lld", (printed++ ? delim : ""), t->counter[i]); 985 else 986 outp += sprintf(outp, "%s%lld", (printed++ ? delim : ""), t->counter[i]); 987 } else if (mp->format == FORMAT_PERCENT) { 988 if (mp->type == COUNTER_USEC) 989 outp += sprintf(outp, "%s%.2f", (printed++ ? delim : ""), t->counter[i]/interval_float/10000); 990 else 991 outp += sprintf(outp, "%s%.2f", (printed++ ? delim : ""), 100.0 * t->counter[i]/tsc); 992 } 993 } 994 995 /* C1 */ 996 if (DO_BIC(BIC_CPU_c1)) 997 outp += sprintf(outp, "%s%.2f", (printed++ ? delim : ""), 100.0 * t->c1/tsc); 998 999 1000 /* print per-core data only for 1st thread in core */ 1001 if (!(t->flags & CPU_IS_FIRST_THREAD_IN_CORE)) 1002 goto done; 1003 1004 if (DO_BIC(BIC_CPU_c3) && !do_slm_cstates && !do_knl_cstates && !do_cnl_cstates) 1005 outp += sprintf(outp, "%s%.2f", (printed++ ? delim : ""), 100.0 * c->c3/tsc); 1006 if (DO_BIC(BIC_CPU_c6)) 1007 outp += sprintf(outp, "%s%.2f", (printed++ ? delim : ""), 100.0 * c->c6/tsc); 1008 if (DO_BIC(BIC_CPU_c7)) 1009 outp += sprintf(outp, "%s%.2f", (printed++ ? delim : ""), 100.0 * c->c7/tsc); 1010 1011 /* Mod%c6 */ 1012 if (DO_BIC(BIC_Mod_c6)) 1013 outp += sprintf(outp, "%s%.2f", (printed++ ? delim : ""), 100.0 * c->mc6_us / tsc); 1014 1015 if (DO_BIC(BIC_CoreTmp)) 1016 outp += sprintf(outp, "%s%d", (printed++ ? delim : ""), c->core_temp_c); 1017 1018 for (i = 0, mp = sys.cp; mp; i++, mp = mp->next) { 1019 if (mp->format == FORMAT_RAW) { 1020 if (mp->width == 32) 1021 outp += sprintf(outp, "%s0x%08x", (printed++ ? delim : ""), (unsigned int) c->counter[i]); 1022 else 1023 outp += sprintf(outp, "%s0x%016llx", (printed++ ? delim : ""), c->counter[i]); 1024 } else if (mp->format == FORMAT_DELTA) { 1025 if ((mp->type == COUNTER_ITEMS) && sums_need_wide_columns) 1026 outp += sprintf(outp, "%s%8lld", (printed++ ? delim : ""), c->counter[i]); 1027 else 1028 outp += sprintf(outp, "%s%lld", (printed++ ? delim : ""), c->counter[i]); 1029 } else if (mp->format == FORMAT_PERCENT) { 1030 outp += sprintf(outp, "%s%.2f", (printed++ ? delim : ""), 100.0 * c->counter[i]/tsc); 1031 } 1032 } 1033 1034 /* print per-package data only for 1st core in package */ 1035 if (!(t->flags & CPU_IS_FIRST_CORE_IN_PACKAGE)) 1036 goto done; 1037 1038 /* PkgTmp */ 1039 if (DO_BIC(BIC_PkgTmp)) 1040 outp += sprintf(outp, "%s%d", (printed++ ? delim : ""), p->pkg_temp_c); 1041 1042 /* GFXrc6 */ 1043 if (DO_BIC(BIC_GFX_rc6)) { 1044 if (p->gfx_rc6_ms == -1) { /* detect GFX counter reset */ 1045 outp += sprintf(outp, "%s**.**", (printed++ ? delim : "")); 1046 } else { 1047 outp += sprintf(outp, "%s%.2f", (printed++ ? delim : ""), 1048 p->gfx_rc6_ms / 10.0 / interval_float); 1049 } 1050 } 1051 1052 /* GFXMHz */ 1053 if (DO_BIC(BIC_GFXMHz)) 1054 outp += sprintf(outp, "%s%d", (printed++ ? delim : ""), p->gfx_mhz); 1055 1056 /* Totl%C0, Any%C0 GFX%C0 CPUGFX% */ 1057 if (DO_BIC(BIC_Totl_c0)) 1058 outp += sprintf(outp, "%s%.2f", (printed++ ? delim : ""), 100.0 * p->pkg_wtd_core_c0/tsc); 1059 if (DO_BIC(BIC_Any_c0)) 1060 outp += sprintf(outp, "%s%.2f", (printed++ ? delim : ""), 100.0 * p->pkg_any_core_c0/tsc); 1061 if (DO_BIC(BIC_GFX_c0)) 1062 outp += sprintf(outp, "%s%.2f", (printed++ ? delim : ""), 100.0 * p->pkg_any_gfxe_c0/tsc); 1063 if (DO_BIC(BIC_CPUGFX)) 1064 outp += sprintf(outp, "%s%.2f", (printed++ ? delim : ""), 100.0 * p->pkg_both_core_gfxe_c0/tsc); 1065 1066 if (DO_BIC(BIC_Pkgpc2)) 1067 outp += sprintf(outp, "%s%.2f", (printed++ ? delim : ""), 100.0 * p->pc2/tsc); 1068 if (DO_BIC(BIC_Pkgpc3)) 1069 outp += sprintf(outp, "%s%.2f", (printed++ ? delim : ""), 100.0 * p->pc3/tsc); 1070 if (DO_BIC(BIC_Pkgpc6)) 1071 outp += sprintf(outp, "%s%.2f", (printed++ ? delim : ""), 100.0 * p->pc6/tsc); 1072 if (DO_BIC(BIC_Pkgpc7)) 1073 outp += sprintf(outp, "%s%.2f", (printed++ ? delim : ""), 100.0 * p->pc7/tsc); 1074 if (DO_BIC(BIC_Pkgpc8)) 1075 outp += sprintf(outp, "%s%.2f", (printed++ ? delim : ""), 100.0 * p->pc8/tsc); 1076 if (DO_BIC(BIC_Pkgpc9)) 1077 outp += sprintf(outp, "%s%.2f", (printed++ ? delim : ""), 100.0 * p->pc9/tsc); 1078 if (DO_BIC(BIC_Pkgpc10)) 1079 outp += sprintf(outp, "%s%.2f", (printed++ ? delim : ""), 100.0 * p->pc10/tsc); 1080 1081 if (DO_BIC(BIC_CPU_LPI)) 1082 outp += sprintf(outp, "%s%.2f", (printed++ ? delim : ""), 100.0 * p->cpu_lpi / 1000000.0 / interval_float); 1083 if (DO_BIC(BIC_SYS_LPI)) 1084 outp += sprintf(outp, "%s%.2f", (printed++ ? delim : ""), 100.0 * p->sys_lpi / 1000000.0 / interval_float); 1085 1086 /* 1087 * If measurement interval exceeds minimum RAPL Joule Counter range, 1088 * indicate that results are suspect by printing "**" in fraction place. 1089 */ 1090 if (interval_float < rapl_joule_counter_range) 1091 fmt8 = "%s%.2f"; 1092 else 1093 fmt8 = "%6.0f**"; 1094 1095 if (DO_BIC(BIC_PkgWatt)) 1096 outp += sprintf(outp, fmt8, (printed++ ? delim : ""), p->energy_pkg * rapl_energy_units / interval_float); 1097 if (DO_BIC(BIC_CorWatt)) 1098 outp += sprintf(outp, fmt8, (printed++ ? delim : ""), p->energy_cores * rapl_energy_units / interval_float); 1099 if (DO_BIC(BIC_GFXWatt)) 1100 outp += sprintf(outp, fmt8, (printed++ ? delim : ""), p->energy_gfx * rapl_energy_units / interval_float); 1101 if (DO_BIC(BIC_RAMWatt)) 1102 outp += sprintf(outp, fmt8, (printed++ ? delim : ""), p->energy_dram * rapl_dram_energy_units / interval_float); 1103 if (DO_BIC(BIC_Pkg_J)) 1104 outp += sprintf(outp, fmt8, (printed++ ? delim : ""), p->energy_pkg * rapl_energy_units); 1105 if (DO_BIC(BIC_Cor_J)) 1106 outp += sprintf(outp, fmt8, (printed++ ? delim : ""), p->energy_cores * rapl_energy_units); 1107 if (DO_BIC(BIC_GFX_J)) 1108 outp += sprintf(outp, fmt8, (printed++ ? delim : ""), p->energy_gfx * rapl_energy_units); 1109 if (DO_BIC(BIC_RAM_J)) 1110 outp += sprintf(outp, fmt8, (printed++ ? delim : ""), p->energy_dram * rapl_dram_energy_units); 1111 if (DO_BIC(BIC_PKG__)) 1112 outp += sprintf(outp, fmt8, (printed++ ? delim : ""), 100.0 * p->rapl_pkg_perf_status * rapl_time_units / interval_float); 1113 if (DO_BIC(BIC_RAM__)) 1114 outp += sprintf(outp, fmt8, (printed++ ? delim : ""), 100.0 * p->rapl_dram_perf_status * rapl_time_units / interval_float); 1115 1116 for (i = 0, mp = sys.pp; mp; i++, mp = mp->next) { 1117 if (mp->format == FORMAT_RAW) { 1118 if (mp->width == 32) 1119 outp += sprintf(outp, "%s0x%08x", (printed++ ? delim : ""), (unsigned int) p->counter[i]); 1120 else 1121 outp += sprintf(outp, "%s0x%016llx", (printed++ ? delim : ""), p->counter[i]); 1122 } else if (mp->format == FORMAT_DELTA) { 1123 if ((mp->type == COUNTER_ITEMS) && sums_need_wide_columns) 1124 outp += sprintf(outp, "%s%8lld", (printed++ ? delim : ""), p->counter[i]); 1125 else 1126 outp += sprintf(outp, "%s%lld", (printed++ ? delim : ""), p->counter[i]); 1127 } else if (mp->format == FORMAT_PERCENT) { 1128 outp += sprintf(outp, "%s%.2f", (printed++ ? delim : ""), 100.0 * p->counter[i]/tsc); 1129 } 1130 } 1131 1132 done: 1133 if (*(outp - 1) != '\n') 1134 outp += sprintf(outp, "\n"); 1135 1136 return 0; 1137 } 1138 1139 void flush_output_stdout(void) 1140 { 1141 FILE *filep; 1142 1143 if (outf == stderr) 1144 filep = stdout; 1145 else 1146 filep = outf; 1147 1148 fputs(output_buffer, filep); 1149 fflush(filep); 1150 1151 outp = output_buffer; 1152 } 1153 void flush_output_stderr(void) 1154 { 1155 fputs(output_buffer, outf); 1156 fflush(outf); 1157 outp = output_buffer; 1158 } 1159 void format_all_counters(struct thread_data *t, struct core_data *c, struct pkg_data *p) 1160 { 1161 static int printed; 1162 1163 if (!printed || !summary_only) 1164 print_header("\t"); 1165 1166 if (topo.num_cpus > 1) 1167 format_counters(&average.threads, &average.cores, 1168 &average.packages); 1169 1170 printed = 1; 1171 1172 if (summary_only) 1173 return; 1174 1175 for_all_cpus(format_counters, t, c, p); 1176 } 1177 1178 #define DELTA_WRAP32(new, old) \ 1179 if (new > old) { \ 1180 old = new - old; \ 1181 } else { \ 1182 old = 0x100000000 + new - old; \ 1183 } 1184 1185 int 1186 delta_package(struct pkg_data *new, struct pkg_data *old) 1187 { 1188 int i; 1189 struct msr_counter *mp; 1190 1191 1192 if (DO_BIC(BIC_Totl_c0)) 1193 old->pkg_wtd_core_c0 = new->pkg_wtd_core_c0 - old->pkg_wtd_core_c0; 1194 if (DO_BIC(BIC_Any_c0)) 1195 old->pkg_any_core_c0 = new->pkg_any_core_c0 - old->pkg_any_core_c0; 1196 if (DO_BIC(BIC_GFX_c0)) 1197 old->pkg_any_gfxe_c0 = new->pkg_any_gfxe_c0 - old->pkg_any_gfxe_c0; 1198 if (DO_BIC(BIC_CPUGFX)) 1199 old->pkg_both_core_gfxe_c0 = new->pkg_both_core_gfxe_c0 - old->pkg_both_core_gfxe_c0; 1200 1201 old->pc2 = new->pc2 - old->pc2; 1202 if (DO_BIC(BIC_Pkgpc3)) 1203 old->pc3 = new->pc3 - old->pc3; 1204 if (DO_BIC(BIC_Pkgpc6)) 1205 old->pc6 = new->pc6 - old->pc6; 1206 if (DO_BIC(BIC_Pkgpc7)) 1207 old->pc7 = new->pc7 - old->pc7; 1208 old->pc8 = new->pc8 - old->pc8; 1209 old->pc9 = new->pc9 - old->pc9; 1210 old->pc10 = new->pc10 - old->pc10; 1211 old->cpu_lpi = new->cpu_lpi - old->cpu_lpi; 1212 old->sys_lpi = new->sys_lpi - old->sys_lpi; 1213 old->pkg_temp_c = new->pkg_temp_c; 1214 1215 /* flag an error when rc6 counter resets/wraps */ 1216 if (old->gfx_rc6_ms > new->gfx_rc6_ms) 1217 old->gfx_rc6_ms = -1; 1218 else 1219 old->gfx_rc6_ms = new->gfx_rc6_ms - old->gfx_rc6_ms; 1220 1221 old->gfx_mhz = new->gfx_mhz; 1222 1223 DELTA_WRAP32(new->energy_pkg, old->energy_pkg); 1224 DELTA_WRAP32(new->energy_cores, old->energy_cores); 1225 DELTA_WRAP32(new->energy_gfx, old->energy_gfx); 1226 DELTA_WRAP32(new->energy_dram, old->energy_dram); 1227 DELTA_WRAP32(new->rapl_pkg_perf_status, old->rapl_pkg_perf_status); 1228 DELTA_WRAP32(new->rapl_dram_perf_status, old->rapl_dram_perf_status); 1229 1230 for (i = 0, mp = sys.pp; mp; i++, mp = mp->next) { 1231 if (mp->format == FORMAT_RAW) 1232 old->counter[i] = new->counter[i]; 1233 else 1234 old->counter[i] = new->counter[i] - old->counter[i]; 1235 } 1236 1237 return 0; 1238 } 1239 1240 void 1241 delta_core(struct core_data *new, struct core_data *old) 1242 { 1243 int i; 1244 struct msr_counter *mp; 1245 1246 old->c3 = new->c3 - old->c3; 1247 old->c6 = new->c6 - old->c6; 1248 old->c7 = new->c7 - old->c7; 1249 old->core_temp_c = new->core_temp_c; 1250 old->mc6_us = new->mc6_us - old->mc6_us; 1251 1252 for (i = 0, mp = sys.cp; mp; i++, mp = mp->next) { 1253 if (mp->format == FORMAT_RAW) 1254 old->counter[i] = new->counter[i]; 1255 else 1256 old->counter[i] = new->counter[i] - old->counter[i]; 1257 } 1258 } 1259 1260 /* 1261 * old = new - old 1262 */ 1263 int 1264 delta_thread(struct thread_data *new, struct thread_data *old, 1265 struct core_data *core_delta) 1266 { 1267 int i; 1268 struct msr_counter *mp; 1269 1270 /* we run cpuid just the 1st time, copy the results */ 1271 if (DO_BIC(BIC_APIC)) 1272 new->apic_id = old->apic_id; 1273 if (DO_BIC(BIC_X2APIC)) 1274 new->x2apic_id = old->x2apic_id; 1275 1276 /* 1277 * the timestamps from start of measurement interval are in "old" 1278 * the timestamp from end of measurement interval are in "new" 1279 * over-write old w/ new so we can print end of interval values 1280 */ 1281 1282 old->tv_begin = new->tv_begin; 1283 old->tv_end = new->tv_end; 1284 1285 old->tsc = new->tsc - old->tsc; 1286 1287 /* check for TSC < 1 Mcycles over interval */ 1288 if (old->tsc < (1000 * 1000)) 1289 errx(-3, "Insanely slow TSC rate, TSC stops in idle?\n" 1290 "You can disable all c-states by booting with \"idle=poll\"\n" 1291 "or just the deep ones with \"processor.max_cstate=1\""); 1292 1293 old->c1 = new->c1 - old->c1; 1294 1295 if (DO_BIC(BIC_Avg_MHz) || DO_BIC(BIC_Busy) || DO_BIC(BIC_Bzy_MHz)) { 1296 if ((new->aperf > old->aperf) && (new->mperf > old->mperf)) { 1297 old->aperf = new->aperf - old->aperf; 1298 old->mperf = new->mperf - old->mperf; 1299 } else { 1300 return -1; 1301 } 1302 } 1303 1304 1305 if (use_c1_residency_msr) { 1306 /* 1307 * Some models have a dedicated C1 residency MSR, 1308 * which should be more accurate than the derivation below. 1309 */ 1310 } else { 1311 /* 1312 * As counter collection is not atomic, 1313 * it is possible for mperf's non-halted cycles + idle states 1314 * to exceed TSC's all cycles: show c1 = 0% in that case. 1315 */ 1316 if ((old->mperf + core_delta->c3 + core_delta->c6 + core_delta->c7) > (old->tsc * tsc_tweak)) 1317 old->c1 = 0; 1318 else { 1319 /* normal case, derive c1 */ 1320 old->c1 = (old->tsc * tsc_tweak) - old->mperf - core_delta->c3 1321 - core_delta->c6 - core_delta->c7; 1322 } 1323 } 1324 1325 if (old->mperf == 0) { 1326 if (debug > 1) 1327 fprintf(outf, "cpu%d MPERF 0!\n", old->cpu_id); 1328 old->mperf = 1; /* divide by 0 protection */ 1329 } 1330 1331 if (DO_BIC(BIC_IRQ)) 1332 old->irq_count = new->irq_count - old->irq_count; 1333 1334 if (DO_BIC(BIC_SMI)) 1335 old->smi_count = new->smi_count - old->smi_count; 1336 1337 for (i = 0, mp = sys.tp; mp; i++, mp = mp->next) { 1338 if (mp->format == FORMAT_RAW) 1339 old->counter[i] = new->counter[i]; 1340 else 1341 old->counter[i] = new->counter[i] - old->counter[i]; 1342 } 1343 return 0; 1344 } 1345 1346 int delta_cpu(struct thread_data *t, struct core_data *c, 1347 struct pkg_data *p, struct thread_data *t2, 1348 struct core_data *c2, struct pkg_data *p2) 1349 { 1350 int retval = 0; 1351 1352 /* calculate core delta only for 1st thread in core */ 1353 if (t->flags & CPU_IS_FIRST_THREAD_IN_CORE) 1354 delta_core(c, c2); 1355 1356 /* always calculate thread delta */ 1357 retval = delta_thread(t, t2, c2); /* c2 is core delta */ 1358 if (retval) 1359 return retval; 1360 1361 /* calculate package delta only for 1st core in package */ 1362 if (t->flags & CPU_IS_FIRST_CORE_IN_PACKAGE) 1363 retval = delta_package(p, p2); 1364 1365 return retval; 1366 } 1367 1368 void clear_counters(struct thread_data *t, struct core_data *c, struct pkg_data *p) 1369 { 1370 int i; 1371 struct msr_counter *mp; 1372 1373 t->tv_begin.tv_sec = 0; 1374 t->tv_begin.tv_usec = 0; 1375 t->tv_end.tv_sec = 0; 1376 t->tv_end.tv_usec = 0; 1377 1378 t->tsc = 0; 1379 t->aperf = 0; 1380 t->mperf = 0; 1381 t->c1 = 0; 1382 1383 t->irq_count = 0; 1384 t->smi_count = 0; 1385 1386 /* tells format_counters to dump all fields from this set */ 1387 t->flags = CPU_IS_FIRST_THREAD_IN_CORE | CPU_IS_FIRST_CORE_IN_PACKAGE; 1388 1389 c->c3 = 0; 1390 c->c6 = 0; 1391 c->c7 = 0; 1392 c->mc6_us = 0; 1393 c->core_temp_c = 0; 1394 1395 p->pkg_wtd_core_c0 = 0; 1396 p->pkg_any_core_c0 = 0; 1397 p->pkg_any_gfxe_c0 = 0; 1398 p->pkg_both_core_gfxe_c0 = 0; 1399 1400 p->pc2 = 0; 1401 if (DO_BIC(BIC_Pkgpc3)) 1402 p->pc3 = 0; 1403 if (DO_BIC(BIC_Pkgpc6)) 1404 p->pc6 = 0; 1405 if (DO_BIC(BIC_Pkgpc7)) 1406 p->pc7 = 0; 1407 p->pc8 = 0; 1408 p->pc9 = 0; 1409 p->pc10 = 0; 1410 p->cpu_lpi = 0; 1411 p->sys_lpi = 0; 1412 1413 p->energy_pkg = 0; 1414 p->energy_dram = 0; 1415 p->energy_cores = 0; 1416 p->energy_gfx = 0; 1417 p->rapl_pkg_perf_status = 0; 1418 p->rapl_dram_perf_status = 0; 1419 p->pkg_temp_c = 0; 1420 1421 p->gfx_rc6_ms = 0; 1422 p->gfx_mhz = 0; 1423 for (i = 0, mp = sys.tp; mp; i++, mp = mp->next) 1424 t->counter[i] = 0; 1425 1426 for (i = 0, mp = sys.cp; mp; i++, mp = mp->next) 1427 c->counter[i] = 0; 1428 1429 for (i = 0, mp = sys.pp; mp; i++, mp = mp->next) 1430 p->counter[i] = 0; 1431 } 1432 int sum_counters(struct thread_data *t, struct core_data *c, 1433 struct pkg_data *p) 1434 { 1435 int i; 1436 struct msr_counter *mp; 1437 1438 /* copy un-changing apic_id's */ 1439 if (DO_BIC(BIC_APIC)) 1440 average.threads.apic_id = t->apic_id; 1441 if (DO_BIC(BIC_X2APIC)) 1442 average.threads.x2apic_id = t->x2apic_id; 1443 1444 /* remember first tv_begin */ 1445 if (average.threads.tv_begin.tv_sec == 0) 1446 average.threads.tv_begin = t->tv_begin; 1447 1448 /* remember last tv_end */ 1449 average.threads.tv_end = t->tv_end; 1450 1451 average.threads.tsc += t->tsc; 1452 average.threads.aperf += t->aperf; 1453 average.threads.mperf += t->mperf; 1454 average.threads.c1 += t->c1; 1455 1456 average.threads.irq_count += t->irq_count; 1457 average.threads.smi_count += t->smi_count; 1458 1459 for (i = 0, mp = sys.tp; mp; i++, mp = mp->next) { 1460 if (mp->format == FORMAT_RAW) 1461 continue; 1462 average.threads.counter[i] += t->counter[i]; 1463 } 1464 1465 /* sum per-core values only for 1st thread in core */ 1466 if (!(t->flags & CPU_IS_FIRST_THREAD_IN_CORE)) 1467 return 0; 1468 1469 average.cores.c3 += c->c3; 1470 average.cores.c6 += c->c6; 1471 average.cores.c7 += c->c7; 1472 average.cores.mc6_us += c->mc6_us; 1473 1474 average.cores.core_temp_c = MAX(average.cores.core_temp_c, c->core_temp_c); 1475 1476 for (i = 0, mp = sys.cp; mp; i++, mp = mp->next) { 1477 if (mp->format == FORMAT_RAW) 1478 continue; 1479 average.cores.counter[i] += c->counter[i]; 1480 } 1481 1482 /* sum per-pkg values only for 1st core in pkg */ 1483 if (!(t->flags & CPU_IS_FIRST_CORE_IN_PACKAGE)) 1484 return 0; 1485 1486 if (DO_BIC(BIC_Totl_c0)) 1487 average.packages.pkg_wtd_core_c0 += p->pkg_wtd_core_c0; 1488 if (DO_BIC(BIC_Any_c0)) 1489 average.packages.pkg_any_core_c0 += p->pkg_any_core_c0; 1490 if (DO_BIC(BIC_GFX_c0)) 1491 average.packages.pkg_any_gfxe_c0 += p->pkg_any_gfxe_c0; 1492 if (DO_BIC(BIC_CPUGFX)) 1493 average.packages.pkg_both_core_gfxe_c0 += p->pkg_both_core_gfxe_c0; 1494 1495 average.packages.pc2 += p->pc2; 1496 if (DO_BIC(BIC_Pkgpc3)) 1497 average.packages.pc3 += p->pc3; 1498 if (DO_BIC(BIC_Pkgpc6)) 1499 average.packages.pc6 += p->pc6; 1500 if (DO_BIC(BIC_Pkgpc7)) 1501 average.packages.pc7 += p->pc7; 1502 average.packages.pc8 += p->pc8; 1503 average.packages.pc9 += p->pc9; 1504 average.packages.pc10 += p->pc10; 1505 1506 average.packages.cpu_lpi = p->cpu_lpi; 1507 average.packages.sys_lpi = p->sys_lpi; 1508 1509 average.packages.energy_pkg += p->energy_pkg; 1510 average.packages.energy_dram += p->energy_dram; 1511 average.packages.energy_cores += p->energy_cores; 1512 average.packages.energy_gfx += p->energy_gfx; 1513 1514 average.packages.gfx_rc6_ms = p->gfx_rc6_ms; 1515 average.packages.gfx_mhz = p->gfx_mhz; 1516 1517 average.packages.pkg_temp_c = MAX(average.packages.pkg_temp_c, p->pkg_temp_c); 1518 1519 average.packages.rapl_pkg_perf_status += p->rapl_pkg_perf_status; 1520 average.packages.rapl_dram_perf_status += p->rapl_dram_perf_status; 1521 1522 for (i = 0, mp = sys.pp; mp; i++, mp = mp->next) { 1523 if (mp->format == FORMAT_RAW) 1524 continue; 1525 average.packages.counter[i] += p->counter[i]; 1526 } 1527 return 0; 1528 } 1529 /* 1530 * sum the counters for all cpus in the system 1531 * compute the weighted average 1532 */ 1533 void compute_average(struct thread_data *t, struct core_data *c, 1534 struct pkg_data *p) 1535 { 1536 int i; 1537 struct msr_counter *mp; 1538 1539 clear_counters(&average.threads, &average.cores, &average.packages); 1540 1541 for_all_cpus(sum_counters, t, c, p); 1542 1543 average.threads.tsc /= topo.num_cpus; 1544 average.threads.aperf /= topo.num_cpus; 1545 average.threads.mperf /= topo.num_cpus; 1546 average.threads.c1 /= topo.num_cpus; 1547 1548 if (average.threads.irq_count > 9999999) 1549 sums_need_wide_columns = 1; 1550 1551 average.cores.c3 /= topo.num_cores; 1552 average.cores.c6 /= topo.num_cores; 1553 average.cores.c7 /= topo.num_cores; 1554 average.cores.mc6_us /= topo.num_cores; 1555 1556 if (DO_BIC(BIC_Totl_c0)) 1557 average.packages.pkg_wtd_core_c0 /= topo.num_packages; 1558 if (DO_BIC(BIC_Any_c0)) 1559 average.packages.pkg_any_core_c0 /= topo.num_packages; 1560 if (DO_BIC(BIC_GFX_c0)) 1561 average.packages.pkg_any_gfxe_c0 /= topo.num_packages; 1562 if (DO_BIC(BIC_CPUGFX)) 1563 average.packages.pkg_both_core_gfxe_c0 /= topo.num_packages; 1564 1565 average.packages.pc2 /= topo.num_packages; 1566 if (DO_BIC(BIC_Pkgpc3)) 1567 average.packages.pc3 /= topo.num_packages; 1568 if (DO_BIC(BIC_Pkgpc6)) 1569 average.packages.pc6 /= topo.num_packages; 1570 if (DO_BIC(BIC_Pkgpc7)) 1571 average.packages.pc7 /= topo.num_packages; 1572 1573 average.packages.pc8 /= topo.num_packages; 1574 average.packages.pc9 /= topo.num_packages; 1575 average.packages.pc10 /= topo.num_packages; 1576 1577 for (i = 0, mp = sys.tp; mp; i++, mp = mp->next) { 1578 if (mp->format == FORMAT_RAW) 1579 continue; 1580 if (mp->type == COUNTER_ITEMS) { 1581 if (average.threads.counter[i] > 9999999) 1582 sums_need_wide_columns = 1; 1583 continue; 1584 } 1585 average.threads.counter[i] /= topo.num_cpus; 1586 } 1587 for (i = 0, mp = sys.cp; mp; i++, mp = mp->next) { 1588 if (mp->format == FORMAT_RAW) 1589 continue; 1590 if (mp->type == COUNTER_ITEMS) { 1591 if (average.cores.counter[i] > 9999999) 1592 sums_need_wide_columns = 1; 1593 } 1594 average.cores.counter[i] /= topo.num_cores; 1595 } 1596 for (i = 0, mp = sys.pp; mp; i++, mp = mp->next) { 1597 if (mp->format == FORMAT_RAW) 1598 continue; 1599 if (mp->type == COUNTER_ITEMS) { 1600 if (average.packages.counter[i] > 9999999) 1601 sums_need_wide_columns = 1; 1602 } 1603 average.packages.counter[i] /= topo.num_packages; 1604 } 1605 } 1606 1607 static unsigned long long rdtsc(void) 1608 { 1609 unsigned int low, high; 1610 1611 asm volatile("rdtsc" : "=a" (low), "=d" (high)); 1612 1613 return low | ((unsigned long long)high) << 32; 1614 } 1615 1616 /* 1617 * Open a file, and exit on failure 1618 */ 1619 FILE *fopen_or_die(const char *path, const char *mode) 1620 { 1621 FILE *filep = fopen(path, mode); 1622 1623 if (!filep) 1624 err(1, "%s: open failed", path); 1625 return filep; 1626 } 1627 /* 1628 * snapshot_sysfs_counter() 1629 * 1630 * return snapshot of given counter 1631 */ 1632 unsigned long long snapshot_sysfs_counter(char *path) 1633 { 1634 FILE *fp; 1635 int retval; 1636 unsigned long long counter; 1637 1638 fp = fopen_or_die(path, "r"); 1639 1640 retval = fscanf(fp, "%lld", &counter); 1641 if (retval != 1) 1642 err(1, "snapshot_sysfs_counter(%s)", path); 1643 1644 fclose(fp); 1645 1646 return counter; 1647 } 1648 1649 int get_mp(int cpu, struct msr_counter *mp, unsigned long long *counterp) 1650 { 1651 if (mp->msr_num != 0) { 1652 if (get_msr(cpu, mp->msr_num, counterp)) 1653 return -1; 1654 } else { 1655 char path[128 + PATH_BYTES]; 1656 1657 if (mp->flags & SYSFS_PERCPU) { 1658 sprintf(path, "/sys/devices/system/cpu/cpu%d/%s", 1659 cpu, mp->path); 1660 1661 *counterp = snapshot_sysfs_counter(path); 1662 } else { 1663 *counterp = snapshot_sysfs_counter(mp->path); 1664 } 1665 } 1666 1667 return 0; 1668 } 1669 1670 void get_apic_id(struct thread_data *t) 1671 { 1672 unsigned int eax, ebx, ecx, edx, max_level; 1673 1674 eax = ebx = ecx = edx = 0; 1675 1676 if (!genuine_intel) 1677 return; 1678 1679 __cpuid(0, max_level, ebx, ecx, edx); 1680 1681 __cpuid(1, eax, ebx, ecx, edx); 1682 t->apic_id = (ebx >> 24) & 0xf; 1683 1684 if (max_level < 0xb) 1685 return; 1686 1687 if (!DO_BIC(BIC_X2APIC)) 1688 return; 1689 1690 ecx = 0; 1691 __cpuid(0xb, eax, ebx, ecx, edx); 1692 t->x2apic_id = edx; 1693 1694 if (debug && (t->apic_id != t->x2apic_id)) 1695 fprintf(stderr, "cpu%d: apic 0x%x x2apic 0x%x\n", t->cpu_id, t->apic_id, t->x2apic_id); 1696 } 1697 1698 /* 1699 * get_counters(...) 1700 * migrate to cpu 1701 * acquire and record local counters for that cpu 1702 */ 1703 int get_counters(struct thread_data *t, struct core_data *c, struct pkg_data *p) 1704 { 1705 int cpu = t->cpu_id; 1706 unsigned long long msr; 1707 int aperf_mperf_retry_count = 0; 1708 struct msr_counter *mp; 1709 int i; 1710 1711 gettimeofday(&t->tv_begin, (struct timezone *)NULL); 1712 1713 if (cpu_migrate(cpu)) { 1714 fprintf(outf, "Could not migrate to CPU %d\n", cpu); 1715 return -1; 1716 } 1717 1718 if (first_counter_read) 1719 get_apic_id(t); 1720 retry: 1721 t->tsc = rdtsc(); /* we are running on local CPU of interest */ 1722 1723 if (DO_BIC(BIC_Avg_MHz) || DO_BIC(BIC_Busy) || DO_BIC(BIC_Bzy_MHz)) { 1724 unsigned long long tsc_before, tsc_between, tsc_after, aperf_time, mperf_time; 1725 1726 /* 1727 * The TSC, APERF and MPERF must be read together for 1728 * APERF/MPERF and MPERF/TSC to give accurate results. 1729 * 1730 * Unfortunately, APERF and MPERF are read by 1731 * individual system call, so delays may occur 1732 * between them. If the time to read them 1733 * varies by a large amount, we re-read them. 1734 */ 1735 1736 /* 1737 * This initial dummy APERF read has been seen to 1738 * reduce jitter in the subsequent reads. 1739 */ 1740 1741 if (get_msr(cpu, MSR_IA32_APERF, &t->aperf)) 1742 return -3; 1743 1744 t->tsc = rdtsc(); /* re-read close to APERF */ 1745 1746 tsc_before = t->tsc; 1747 1748 if (get_msr(cpu, MSR_IA32_APERF, &t->aperf)) 1749 return -3; 1750 1751 tsc_between = rdtsc(); 1752 1753 if (get_msr(cpu, MSR_IA32_MPERF, &t->mperf)) 1754 return -4; 1755 1756 tsc_after = rdtsc(); 1757 1758 aperf_time = tsc_between - tsc_before; 1759 mperf_time = tsc_after - tsc_between; 1760 1761 /* 1762 * If the system call latency to read APERF and MPERF 1763 * differ by more than 2x, then try again. 1764 */ 1765 if ((aperf_time > (2 * mperf_time)) || (mperf_time > (2 * aperf_time))) { 1766 aperf_mperf_retry_count++; 1767 if (aperf_mperf_retry_count < 5) 1768 goto retry; 1769 else 1770 warnx("cpu%d jitter %lld %lld", 1771 cpu, aperf_time, mperf_time); 1772 } 1773 aperf_mperf_retry_count = 0; 1774 1775 t->aperf = t->aperf * aperf_mperf_multiplier; 1776 t->mperf = t->mperf * aperf_mperf_multiplier; 1777 } 1778 1779 if (DO_BIC(BIC_IRQ)) 1780 t->irq_count = irqs_per_cpu[cpu]; 1781 if (DO_BIC(BIC_SMI)) { 1782 if (get_msr(cpu, MSR_SMI_COUNT, &msr)) 1783 return -5; 1784 t->smi_count = msr & 0xFFFFFFFF; 1785 } 1786 if (DO_BIC(BIC_CPU_c1) && use_c1_residency_msr) { 1787 if (get_msr(cpu, MSR_CORE_C1_RES, &t->c1)) 1788 return -6; 1789 } 1790 1791 for (i = 0, mp = sys.tp; mp; i++, mp = mp->next) { 1792 if (get_mp(cpu, mp, &t->counter[i])) 1793 return -10; 1794 } 1795 1796 /* collect core counters only for 1st thread in core */ 1797 if (!(t->flags & CPU_IS_FIRST_THREAD_IN_CORE)) 1798 goto done; 1799 1800 if (DO_BIC(BIC_CPU_c3) && !do_slm_cstates && !do_knl_cstates && !do_cnl_cstates) { 1801 if (get_msr(cpu, MSR_CORE_C3_RESIDENCY, &c->c3)) 1802 return -6; 1803 } 1804 1805 if (DO_BIC(BIC_CPU_c6) && !do_knl_cstates) { 1806 if (get_msr(cpu, MSR_CORE_C6_RESIDENCY, &c->c6)) 1807 return -7; 1808 } else if (do_knl_cstates) { 1809 if (get_msr(cpu, MSR_KNL_CORE_C6_RESIDENCY, &c->c6)) 1810 return -7; 1811 } 1812 1813 if (DO_BIC(BIC_CPU_c7)) 1814 if (get_msr(cpu, MSR_CORE_C7_RESIDENCY, &c->c7)) 1815 return -8; 1816 1817 if (DO_BIC(BIC_Mod_c6)) 1818 if (get_msr(cpu, MSR_MODULE_C6_RES_MS, &c->mc6_us)) 1819 return -8; 1820 1821 if (DO_BIC(BIC_CoreTmp)) { 1822 if (get_msr(cpu, MSR_IA32_THERM_STATUS, &msr)) 1823 return -9; 1824 c->core_temp_c = tcc_activation_temp - ((msr >> 16) & 0x7F); 1825 } 1826 1827 for (i = 0, mp = sys.cp; mp; i++, mp = mp->next) { 1828 if (get_mp(cpu, mp, &c->counter[i])) 1829 return -10; 1830 } 1831 1832 /* collect package counters only for 1st core in package */ 1833 if (!(t->flags & CPU_IS_FIRST_CORE_IN_PACKAGE)) 1834 goto done; 1835 1836 if (DO_BIC(BIC_Totl_c0)) { 1837 if (get_msr(cpu, MSR_PKG_WEIGHTED_CORE_C0_RES, &p->pkg_wtd_core_c0)) 1838 return -10; 1839 } 1840 if (DO_BIC(BIC_Any_c0)) { 1841 if (get_msr(cpu, MSR_PKG_ANY_CORE_C0_RES, &p->pkg_any_core_c0)) 1842 return -11; 1843 } 1844 if (DO_BIC(BIC_GFX_c0)) { 1845 if (get_msr(cpu, MSR_PKG_ANY_GFXE_C0_RES, &p->pkg_any_gfxe_c0)) 1846 return -12; 1847 } 1848 if (DO_BIC(BIC_CPUGFX)) { 1849 if (get_msr(cpu, MSR_PKG_BOTH_CORE_GFXE_C0_RES, &p->pkg_both_core_gfxe_c0)) 1850 return -13; 1851 } 1852 if (DO_BIC(BIC_Pkgpc3)) 1853 if (get_msr(cpu, MSR_PKG_C3_RESIDENCY, &p->pc3)) 1854 return -9; 1855 if (DO_BIC(BIC_Pkgpc6)) { 1856 if (do_slm_cstates) { 1857 if (get_msr(cpu, MSR_ATOM_PKG_C6_RESIDENCY, &p->pc6)) 1858 return -10; 1859 } else { 1860 if (get_msr(cpu, MSR_PKG_C6_RESIDENCY, &p->pc6)) 1861 return -10; 1862 } 1863 } 1864 1865 if (DO_BIC(BIC_Pkgpc2)) 1866 if (get_msr(cpu, MSR_PKG_C2_RESIDENCY, &p->pc2)) 1867 return -11; 1868 if (DO_BIC(BIC_Pkgpc7)) 1869 if (get_msr(cpu, MSR_PKG_C7_RESIDENCY, &p->pc7)) 1870 return -12; 1871 if (DO_BIC(BIC_Pkgpc8)) 1872 if (get_msr(cpu, MSR_PKG_C8_RESIDENCY, &p->pc8)) 1873 return -13; 1874 if (DO_BIC(BIC_Pkgpc9)) 1875 if (get_msr(cpu, MSR_PKG_C9_RESIDENCY, &p->pc9)) 1876 return -13; 1877 if (DO_BIC(BIC_Pkgpc10)) 1878 if (get_msr(cpu, MSR_PKG_C10_RESIDENCY, &p->pc10)) 1879 return -13; 1880 1881 if (DO_BIC(BIC_CPU_LPI)) 1882 p->cpu_lpi = cpuidle_cur_cpu_lpi_us; 1883 if (DO_BIC(BIC_SYS_LPI)) 1884 p->sys_lpi = cpuidle_cur_sys_lpi_us; 1885 1886 if (do_rapl & RAPL_PKG) { 1887 if (get_msr(cpu, MSR_PKG_ENERGY_STATUS, &msr)) 1888 return -13; 1889 p->energy_pkg = msr & 0xFFFFFFFF; 1890 } 1891 if (do_rapl & RAPL_CORES_ENERGY_STATUS) { 1892 if (get_msr(cpu, MSR_PP0_ENERGY_STATUS, &msr)) 1893 return -14; 1894 p->energy_cores = msr & 0xFFFFFFFF; 1895 } 1896 if (do_rapl & RAPL_DRAM) { 1897 if (get_msr(cpu, MSR_DRAM_ENERGY_STATUS, &msr)) 1898 return -15; 1899 p->energy_dram = msr & 0xFFFFFFFF; 1900 } 1901 if (do_rapl & RAPL_GFX) { 1902 if (get_msr(cpu, MSR_PP1_ENERGY_STATUS, &msr)) 1903 return -16; 1904 p->energy_gfx = msr & 0xFFFFFFFF; 1905 } 1906 if (do_rapl & RAPL_PKG_PERF_STATUS) { 1907 if (get_msr(cpu, MSR_PKG_PERF_STATUS, &msr)) 1908 return -16; 1909 p->rapl_pkg_perf_status = msr & 0xFFFFFFFF; 1910 } 1911 if (do_rapl & RAPL_DRAM_PERF_STATUS) { 1912 if (get_msr(cpu, MSR_DRAM_PERF_STATUS, &msr)) 1913 return -16; 1914 p->rapl_dram_perf_status = msr & 0xFFFFFFFF; 1915 } 1916 if (DO_BIC(BIC_PkgTmp)) { 1917 if (get_msr(cpu, MSR_IA32_PACKAGE_THERM_STATUS, &msr)) 1918 return -17; 1919 p->pkg_temp_c = tcc_activation_temp - ((msr >> 16) & 0x7F); 1920 } 1921 1922 if (DO_BIC(BIC_GFX_rc6)) 1923 p->gfx_rc6_ms = gfx_cur_rc6_ms; 1924 1925 if (DO_BIC(BIC_GFXMHz)) 1926 p->gfx_mhz = gfx_cur_mhz; 1927 1928 for (i = 0, mp = sys.pp; mp; i++, mp = mp->next) { 1929 if (get_mp(cpu, mp, &p->counter[i])) 1930 return -10; 1931 } 1932 done: 1933 gettimeofday(&t->tv_end, (struct timezone *)NULL); 1934 1935 return 0; 1936 } 1937 1938 /* 1939 * MSR_PKG_CST_CONFIG_CONTROL decoding for pkg_cstate_limit: 1940 * If you change the values, note they are used both in comparisons 1941 * (>= PCL__7) and to index pkg_cstate_limit_strings[]. 1942 */ 1943 1944 #define PCLUKN 0 /* Unknown */ 1945 #define PCLRSV 1 /* Reserved */ 1946 #define PCL__0 2 /* PC0 */ 1947 #define PCL__1 3 /* PC1 */ 1948 #define PCL__2 4 /* PC2 */ 1949 #define PCL__3 5 /* PC3 */ 1950 #define PCL__4 6 /* PC4 */ 1951 #define PCL__6 7 /* PC6 */ 1952 #define PCL_6N 8 /* PC6 No Retention */ 1953 #define PCL_6R 9 /* PC6 Retention */ 1954 #define PCL__7 10 /* PC7 */ 1955 #define PCL_7S 11 /* PC7 Shrink */ 1956 #define PCL__8 12 /* PC8 */ 1957 #define PCL__9 13 /* PC9 */ 1958 #define PCLUNL 14 /* Unlimited */ 1959 1960 int pkg_cstate_limit = PCLUKN; 1961 char *pkg_cstate_limit_strings[] = { "reserved", "unknown", "pc0", "pc1", "pc2", 1962 "pc3", "pc4", "pc6", "pc6n", "pc6r", "pc7", "pc7s", "pc8", "pc9", "unlimited"}; 1963 1964 int nhm_pkg_cstate_limits[16] = {PCL__0, PCL__1, PCL__3, PCL__6, PCL__7, PCLRSV, PCLRSV, PCLUNL, PCLRSV, PCLRSV, PCLRSV, PCLRSV, PCLRSV, PCLRSV, PCLRSV, PCLRSV}; 1965 int snb_pkg_cstate_limits[16] = {PCL__0, PCL__2, PCL_6N, PCL_6R, PCL__7, PCL_7S, PCLRSV, PCLUNL, PCLRSV, PCLRSV, PCLRSV, PCLRSV, PCLRSV, PCLRSV, PCLRSV, PCLRSV}; 1966 int hsw_pkg_cstate_limits[16] = {PCL__0, PCL__2, PCL__3, PCL__6, PCL__7, PCL_7S, PCL__8, PCL__9, PCLUNL, PCLRSV, PCLRSV, PCLRSV, PCLRSV, PCLRSV, PCLRSV, PCLRSV}; 1967 int slv_pkg_cstate_limits[16] = {PCL__0, PCL__1, PCLRSV, PCLRSV, PCL__4, PCLRSV, PCL__6, PCL__7, PCLRSV, PCLRSV, PCLRSV, PCLRSV, PCLRSV, PCLRSV, PCL__6, PCL__7}; 1968 int amt_pkg_cstate_limits[16] = {PCLUNL, PCL__1, PCL__2, PCLRSV, PCLRSV, PCLRSV, PCL__6, PCL__7, PCLRSV, PCLRSV, PCLRSV, PCLRSV, PCLRSV, PCLRSV, PCLRSV, PCLRSV}; 1969 int phi_pkg_cstate_limits[16] = {PCL__0, PCL__2, PCL_6N, PCL_6R, PCLRSV, PCLRSV, PCLRSV, PCLUNL, PCLRSV, PCLRSV, PCLRSV, PCLRSV, PCLRSV, PCLRSV, PCLRSV, PCLRSV}; 1970 int bxt_pkg_cstate_limits[16] = {PCL__0, PCL__2, PCLUNL, PCLRSV, PCLRSV, PCLRSV, PCLRSV, PCLRSV, PCLRSV, PCLRSV, PCLRSV, PCLRSV, PCLRSV, PCLRSV, PCLRSV, PCLRSV}; 1971 int skx_pkg_cstate_limits[16] = {PCL__0, PCL__2, PCL_6N, PCL_6R, PCLRSV, PCLRSV, PCLRSV, PCLUNL, PCLRSV, PCLRSV, PCLRSV, PCLRSV, PCLRSV, PCLRSV, PCLRSV, PCLRSV}; 1972 1973 1974 static void 1975 calculate_tsc_tweak() 1976 { 1977 tsc_tweak = base_hz / tsc_hz; 1978 } 1979 1980 static void 1981 dump_nhm_platform_info(void) 1982 { 1983 unsigned long long msr; 1984 unsigned int ratio; 1985 1986 get_msr(base_cpu, MSR_PLATFORM_INFO, &msr); 1987 1988 fprintf(outf, "cpu%d: MSR_PLATFORM_INFO: 0x%08llx\n", base_cpu, msr); 1989 1990 ratio = (msr >> 40) & 0xFF; 1991 fprintf(outf, "%d * %.1f = %.1f MHz max efficiency frequency\n", 1992 ratio, bclk, ratio * bclk); 1993 1994 ratio = (msr >> 8) & 0xFF; 1995 fprintf(outf, "%d * %.1f = %.1f MHz base frequency\n", 1996 ratio, bclk, ratio * bclk); 1997 1998 get_msr(base_cpu, MSR_IA32_POWER_CTL, &msr); 1999 fprintf(outf, "cpu%d: MSR_IA32_POWER_CTL: 0x%08llx (C1E auto-promotion: %sabled)\n", 2000 base_cpu, msr, msr & 0x2 ? "EN" : "DIS"); 2001 2002 return; 2003 } 2004 2005 static void 2006 dump_hsw_turbo_ratio_limits(void) 2007 { 2008 unsigned long long msr; 2009 unsigned int ratio; 2010 2011 get_msr(base_cpu, MSR_TURBO_RATIO_LIMIT2, &msr); 2012 2013 fprintf(outf, "cpu%d: MSR_TURBO_RATIO_LIMIT2: 0x%08llx\n", base_cpu, msr); 2014 2015 ratio = (msr >> 8) & 0xFF; 2016 if (ratio) 2017 fprintf(outf, "%d * %.1f = %.1f MHz max turbo 18 active cores\n", 2018 ratio, bclk, ratio * bclk); 2019 2020 ratio = (msr >> 0) & 0xFF; 2021 if (ratio) 2022 fprintf(outf, "%d * %.1f = %.1f MHz max turbo 17 active cores\n", 2023 ratio, bclk, ratio * bclk); 2024 return; 2025 } 2026 2027 static void 2028 dump_ivt_turbo_ratio_limits(void) 2029 { 2030 unsigned long long msr; 2031 unsigned int ratio; 2032 2033 get_msr(base_cpu, MSR_TURBO_RATIO_LIMIT1, &msr); 2034 2035 fprintf(outf, "cpu%d: MSR_TURBO_RATIO_LIMIT1: 0x%08llx\n", base_cpu, msr); 2036 2037 ratio = (msr >> 56) & 0xFF; 2038 if (ratio) 2039 fprintf(outf, "%d * %.1f = %.1f MHz max turbo 16 active cores\n", 2040 ratio, bclk, ratio * bclk); 2041 2042 ratio = (msr >> 48) & 0xFF; 2043 if (ratio) 2044 fprintf(outf, "%d * %.1f = %.1f MHz max turbo 15 active cores\n", 2045 ratio, bclk, ratio * bclk); 2046 2047 ratio = (msr >> 40) & 0xFF; 2048 if (ratio) 2049 fprintf(outf, "%d * %.1f = %.1f MHz max turbo 14 active cores\n", 2050 ratio, bclk, ratio * bclk); 2051 2052 ratio = (msr >> 32) & 0xFF; 2053 if (ratio) 2054 fprintf(outf, "%d * %.1f = %.1f MHz max turbo 13 active cores\n", 2055 ratio, bclk, ratio * bclk); 2056 2057 ratio = (msr >> 24) & 0xFF; 2058 if (ratio) 2059 fprintf(outf, "%d * %.1f = %.1f MHz max turbo 12 active cores\n", 2060 ratio, bclk, ratio * bclk); 2061 2062 ratio = (msr >> 16) & 0xFF; 2063 if (ratio) 2064 fprintf(outf, "%d * %.1f = %.1f MHz max turbo 11 active cores\n", 2065 ratio, bclk, ratio * bclk); 2066 2067 ratio = (msr >> 8) & 0xFF; 2068 if (ratio) 2069 fprintf(outf, "%d * %.1f = %.1f MHz max turbo 10 active cores\n", 2070 ratio, bclk, ratio * bclk); 2071 2072 ratio = (msr >> 0) & 0xFF; 2073 if (ratio) 2074 fprintf(outf, "%d * %.1f = %.1f MHz max turbo 9 active cores\n", 2075 ratio, bclk, ratio * bclk); 2076 return; 2077 } 2078 int has_turbo_ratio_group_limits(int family, int model) 2079 { 2080 2081 if (!genuine_intel) 2082 return 0; 2083 2084 switch (model) { 2085 case INTEL_FAM6_ATOM_GOLDMONT: 2086 case INTEL_FAM6_SKYLAKE_X: 2087 case INTEL_FAM6_ATOM_DENVERTON: 2088 return 1; 2089 } 2090 return 0; 2091 } 2092 2093 static void 2094 dump_turbo_ratio_limits(int family, int model) 2095 { 2096 unsigned long long msr, core_counts; 2097 unsigned int ratio, group_size; 2098 2099 get_msr(base_cpu, MSR_TURBO_RATIO_LIMIT, &msr); 2100 fprintf(outf, "cpu%d: MSR_TURBO_RATIO_LIMIT: 0x%08llx\n", base_cpu, msr); 2101 2102 if (has_turbo_ratio_group_limits(family, model)) { 2103 get_msr(base_cpu, MSR_TURBO_RATIO_LIMIT1, &core_counts); 2104 fprintf(outf, "cpu%d: MSR_TURBO_RATIO_LIMIT1: 0x%08llx\n", base_cpu, core_counts); 2105 } else { 2106 core_counts = 0x0807060504030201; 2107 } 2108 2109 ratio = (msr >> 56) & 0xFF; 2110 group_size = (core_counts >> 56) & 0xFF; 2111 if (ratio) 2112 fprintf(outf, "%d * %.1f = %.1f MHz max turbo %d active cores\n", 2113 ratio, bclk, ratio * bclk, group_size); 2114 2115 ratio = (msr >> 48) & 0xFF; 2116 group_size = (core_counts >> 48) & 0xFF; 2117 if (ratio) 2118 fprintf(outf, "%d * %.1f = %.1f MHz max turbo %d active cores\n", 2119 ratio, bclk, ratio * bclk, group_size); 2120 2121 ratio = (msr >> 40) & 0xFF; 2122 group_size = (core_counts >> 40) & 0xFF; 2123 if (ratio) 2124 fprintf(outf, "%d * %.1f = %.1f MHz max turbo %d active cores\n", 2125 ratio, bclk, ratio * bclk, group_size); 2126 2127 ratio = (msr >> 32) & 0xFF; 2128 group_size = (core_counts >> 32) & 0xFF; 2129 if (ratio) 2130 fprintf(outf, "%d * %.1f = %.1f MHz max turbo %d active cores\n", 2131 ratio, bclk, ratio * bclk, group_size); 2132 2133 ratio = (msr >> 24) & 0xFF; 2134 group_size = (core_counts >> 24) & 0xFF; 2135 if (ratio) 2136 fprintf(outf, "%d * %.1f = %.1f MHz max turbo %d active cores\n", 2137 ratio, bclk, ratio * bclk, group_size); 2138 2139 ratio = (msr >> 16) & 0xFF; 2140 group_size = (core_counts >> 16) & 0xFF; 2141 if (ratio) 2142 fprintf(outf, "%d * %.1f = %.1f MHz max turbo %d active cores\n", 2143 ratio, bclk, ratio * bclk, group_size); 2144 2145 ratio = (msr >> 8) & 0xFF; 2146 group_size = (core_counts >> 8) & 0xFF; 2147 if (ratio) 2148 fprintf(outf, "%d * %.1f = %.1f MHz max turbo %d active cores\n", 2149 ratio, bclk, ratio * bclk, group_size); 2150 2151 ratio = (msr >> 0) & 0xFF; 2152 group_size = (core_counts >> 0) & 0xFF; 2153 if (ratio) 2154 fprintf(outf, "%d * %.1f = %.1f MHz max turbo %d active cores\n", 2155 ratio, bclk, ratio * bclk, group_size); 2156 return; 2157 } 2158 2159 static void 2160 dump_atom_turbo_ratio_limits(void) 2161 { 2162 unsigned long long msr; 2163 unsigned int ratio; 2164 2165 get_msr(base_cpu, MSR_ATOM_CORE_RATIOS, &msr); 2166 fprintf(outf, "cpu%d: MSR_ATOM_CORE_RATIOS: 0x%08llx\n", base_cpu, msr & 0xFFFFFFFF); 2167 2168 ratio = (msr >> 0) & 0x3F; 2169 if (ratio) 2170 fprintf(outf, "%d * %.1f = %.1f MHz minimum operating frequency\n", 2171 ratio, bclk, ratio * bclk); 2172 2173 ratio = (msr >> 8) & 0x3F; 2174 if (ratio) 2175 fprintf(outf, "%d * %.1f = %.1f MHz low frequency mode (LFM)\n", 2176 ratio, bclk, ratio * bclk); 2177 2178 ratio = (msr >> 16) & 0x3F; 2179 if (ratio) 2180 fprintf(outf, "%d * %.1f = %.1f MHz base frequency\n", 2181 ratio, bclk, ratio * bclk); 2182 2183 get_msr(base_cpu, MSR_ATOM_CORE_TURBO_RATIOS, &msr); 2184 fprintf(outf, "cpu%d: MSR_ATOM_CORE_TURBO_RATIOS: 0x%08llx\n", base_cpu, msr & 0xFFFFFFFF); 2185 2186 ratio = (msr >> 24) & 0x3F; 2187 if (ratio) 2188 fprintf(outf, "%d * %.1f = %.1f MHz max turbo 4 active cores\n", 2189 ratio, bclk, ratio * bclk); 2190 2191 ratio = (msr >> 16) & 0x3F; 2192 if (ratio) 2193 fprintf(outf, "%d * %.1f = %.1f MHz max turbo 3 active cores\n", 2194 ratio, bclk, ratio * bclk); 2195 2196 ratio = (msr >> 8) & 0x3F; 2197 if (ratio) 2198 fprintf(outf, "%d * %.1f = %.1f MHz max turbo 2 active cores\n", 2199 ratio, bclk, ratio * bclk); 2200 2201 ratio = (msr >> 0) & 0x3F; 2202 if (ratio) 2203 fprintf(outf, "%d * %.1f = %.1f MHz max turbo 1 active core\n", 2204 ratio, bclk, ratio * bclk); 2205 } 2206 2207 static void 2208 dump_knl_turbo_ratio_limits(void) 2209 { 2210 const unsigned int buckets_no = 7; 2211 2212 unsigned long long msr; 2213 int delta_cores, delta_ratio; 2214 int i, b_nr; 2215 unsigned int cores[buckets_no]; 2216 unsigned int ratio[buckets_no]; 2217 2218 get_msr(base_cpu, MSR_TURBO_RATIO_LIMIT, &msr); 2219 2220 fprintf(outf, "cpu%d: MSR_TURBO_RATIO_LIMIT: 0x%08llx\n", 2221 base_cpu, msr); 2222 2223 /** 2224 * Turbo encoding in KNL is as follows: 2225 * [0] -- Reserved 2226 * [7:1] -- Base value of number of active cores of bucket 1. 2227 * [15:8] -- Base value of freq ratio of bucket 1. 2228 * [20:16] -- +ve delta of number of active cores of bucket 2. 2229 * i.e. active cores of bucket 2 = 2230 * active cores of bucket 1 + delta 2231 * [23:21] -- Negative delta of freq ratio of bucket 2. 2232 * i.e. freq ratio of bucket 2 = 2233 * freq ratio of bucket 1 - delta 2234 * [28:24]-- +ve delta of number of active cores of bucket 3. 2235 * [31:29]-- -ve delta of freq ratio of bucket 3. 2236 * [36:32]-- +ve delta of number of active cores of bucket 4. 2237 * [39:37]-- -ve delta of freq ratio of bucket 4. 2238 * [44:40]-- +ve delta of number of active cores of bucket 5. 2239 * [47:45]-- -ve delta of freq ratio of bucket 5. 2240 * [52:48]-- +ve delta of number of active cores of bucket 6. 2241 * [55:53]-- -ve delta of freq ratio of bucket 6. 2242 * [60:56]-- +ve delta of number of active cores of bucket 7. 2243 * [63:61]-- -ve delta of freq ratio of bucket 7. 2244 */ 2245 2246 b_nr = 0; 2247 cores[b_nr] = (msr & 0xFF) >> 1; 2248 ratio[b_nr] = (msr >> 8) & 0xFF; 2249 2250 for (i = 16; i < 64; i += 8) { 2251 delta_cores = (msr >> i) & 0x1F; 2252 delta_ratio = (msr >> (i + 5)) & 0x7; 2253 2254 cores[b_nr + 1] = cores[b_nr] + delta_cores; 2255 ratio[b_nr + 1] = ratio[b_nr] - delta_ratio; 2256 b_nr++; 2257 } 2258 2259 for (i = buckets_no - 1; i >= 0; i--) 2260 if (i > 0 ? ratio[i] != ratio[i - 1] : 1) 2261 fprintf(outf, 2262 "%d * %.1f = %.1f MHz max turbo %d active cores\n", 2263 ratio[i], bclk, ratio[i] * bclk, cores[i]); 2264 } 2265 2266 static void 2267 dump_nhm_cst_cfg(void) 2268 { 2269 unsigned long long msr; 2270 2271 get_msr(base_cpu, MSR_PKG_CST_CONFIG_CONTROL, &msr); 2272 2273 fprintf(outf, "cpu%d: MSR_PKG_CST_CONFIG_CONTROL: 0x%08llx", base_cpu, msr); 2274 2275 fprintf(outf, " (%s%s%s%s%slocked, pkg-cstate-limit=%d (%s)", 2276 (msr & SNB_C3_AUTO_UNDEMOTE) ? "UNdemote-C3, " : "", 2277 (msr & SNB_C1_AUTO_UNDEMOTE) ? "UNdemote-C1, " : "", 2278 (msr & NHM_C3_AUTO_DEMOTE) ? "demote-C3, " : "", 2279 (msr & NHM_C1_AUTO_DEMOTE) ? "demote-C1, " : "", 2280 (msr & (1 << 15)) ? "" : "UN", 2281 (unsigned int)msr & 0xF, 2282 pkg_cstate_limit_strings[pkg_cstate_limit]); 2283 2284 #define AUTOMATIC_CSTATE_CONVERSION (1UL << 16) 2285 if (has_automatic_cstate_conversion) { 2286 fprintf(outf, ", automatic c-state conversion=%s", 2287 (msr & AUTOMATIC_CSTATE_CONVERSION) ? "on" : "off"); 2288 } 2289 2290 fprintf(outf, ")\n"); 2291 2292 return; 2293 } 2294 2295 static void 2296 dump_config_tdp(void) 2297 { 2298 unsigned long long msr; 2299 2300 get_msr(base_cpu, MSR_CONFIG_TDP_NOMINAL, &msr); 2301 fprintf(outf, "cpu%d: MSR_CONFIG_TDP_NOMINAL: 0x%08llx", base_cpu, msr); 2302 fprintf(outf, " (base_ratio=%d)\n", (unsigned int)msr & 0xFF); 2303 2304 get_msr(base_cpu, MSR_CONFIG_TDP_LEVEL_1, &msr); 2305 fprintf(outf, "cpu%d: MSR_CONFIG_TDP_LEVEL_1: 0x%08llx (", base_cpu, msr); 2306 if (msr) { 2307 fprintf(outf, "PKG_MIN_PWR_LVL1=%d ", (unsigned int)(msr >> 48) & 0x7FFF); 2308 fprintf(outf, "PKG_MAX_PWR_LVL1=%d ", (unsigned int)(msr >> 32) & 0x7FFF); 2309 fprintf(outf, "LVL1_RATIO=%d ", (unsigned int)(msr >> 16) & 0xFF); 2310 fprintf(outf, "PKG_TDP_LVL1=%d", (unsigned int)(msr) & 0x7FFF); 2311 } 2312 fprintf(outf, ")\n"); 2313 2314 get_msr(base_cpu, MSR_CONFIG_TDP_LEVEL_2, &msr); 2315 fprintf(outf, "cpu%d: MSR_CONFIG_TDP_LEVEL_2: 0x%08llx (", base_cpu, msr); 2316 if (msr) { 2317 fprintf(outf, "PKG_MIN_PWR_LVL2=%d ", (unsigned int)(msr >> 48) & 0x7FFF); 2318 fprintf(outf, "PKG_MAX_PWR_LVL2=%d ", (unsigned int)(msr >> 32) & 0x7FFF); 2319 fprintf(outf, "LVL2_RATIO=%d ", (unsigned int)(msr >> 16) & 0xFF); 2320 fprintf(outf, "PKG_TDP_LVL2=%d", (unsigned int)(msr) & 0x7FFF); 2321 } 2322 fprintf(outf, ")\n"); 2323 2324 get_msr(base_cpu, MSR_CONFIG_TDP_CONTROL, &msr); 2325 fprintf(outf, "cpu%d: MSR_CONFIG_TDP_CONTROL: 0x%08llx (", base_cpu, msr); 2326 if ((msr) & 0x3) 2327 fprintf(outf, "TDP_LEVEL=%d ", (unsigned int)(msr) & 0x3); 2328 fprintf(outf, " lock=%d", (unsigned int)(msr >> 31) & 1); 2329 fprintf(outf, ")\n"); 2330 2331 get_msr(base_cpu, MSR_TURBO_ACTIVATION_RATIO, &msr); 2332 fprintf(outf, "cpu%d: MSR_TURBO_ACTIVATION_RATIO: 0x%08llx (", base_cpu, msr); 2333 fprintf(outf, "MAX_NON_TURBO_RATIO=%d", (unsigned int)(msr) & 0xFF); 2334 fprintf(outf, " lock=%d", (unsigned int)(msr >> 31) & 1); 2335 fprintf(outf, ")\n"); 2336 } 2337 2338 unsigned int irtl_time_units[] = {1, 32, 1024, 32768, 1048576, 33554432, 0, 0 }; 2339 2340 void print_irtl(void) 2341 { 2342 unsigned long long msr; 2343 2344 get_msr(base_cpu, MSR_PKGC3_IRTL, &msr); 2345 fprintf(outf, "cpu%d: MSR_PKGC3_IRTL: 0x%08llx (", base_cpu, msr); 2346 fprintf(outf, "%svalid, %lld ns)\n", msr & (1 << 15) ? "" : "NOT", 2347 (msr & 0x3FF) * irtl_time_units[(msr >> 10) & 0x3]); 2348 2349 get_msr(base_cpu, MSR_PKGC6_IRTL, &msr); 2350 fprintf(outf, "cpu%d: MSR_PKGC6_IRTL: 0x%08llx (", base_cpu, msr); 2351 fprintf(outf, "%svalid, %lld ns)\n", msr & (1 << 15) ? "" : "NOT", 2352 (msr & 0x3FF) * irtl_time_units[(msr >> 10) & 0x3]); 2353 2354 get_msr(base_cpu, MSR_PKGC7_IRTL, &msr); 2355 fprintf(outf, "cpu%d: MSR_PKGC7_IRTL: 0x%08llx (", base_cpu, msr); 2356 fprintf(outf, "%svalid, %lld ns)\n", msr & (1 << 15) ? "" : "NOT", 2357 (msr & 0x3FF) * irtl_time_units[(msr >> 10) & 0x3]); 2358 2359 if (!do_irtl_hsw) 2360 return; 2361 2362 get_msr(base_cpu, MSR_PKGC8_IRTL, &msr); 2363 fprintf(outf, "cpu%d: MSR_PKGC8_IRTL: 0x%08llx (", base_cpu, msr); 2364 fprintf(outf, "%svalid, %lld ns)\n", msr & (1 << 15) ? "" : "NOT", 2365 (msr & 0x3FF) * irtl_time_units[(msr >> 10) & 0x3]); 2366 2367 get_msr(base_cpu, MSR_PKGC9_IRTL, &msr); 2368 fprintf(outf, "cpu%d: MSR_PKGC9_IRTL: 0x%08llx (", base_cpu, msr); 2369 fprintf(outf, "%svalid, %lld ns)\n", msr & (1 << 15) ? "" : "NOT", 2370 (msr & 0x3FF) * irtl_time_units[(msr >> 10) & 0x3]); 2371 2372 get_msr(base_cpu, MSR_PKGC10_IRTL, &msr); 2373 fprintf(outf, "cpu%d: MSR_PKGC10_IRTL: 0x%08llx (", base_cpu, msr); 2374 fprintf(outf, "%svalid, %lld ns)\n", msr & (1 << 15) ? "" : "NOT", 2375 (msr & 0x3FF) * irtl_time_units[(msr >> 10) & 0x3]); 2376 2377 } 2378 void free_fd_percpu(void) 2379 { 2380 int i; 2381 2382 for (i = 0; i < topo.max_cpu_num + 1; ++i) { 2383 if (fd_percpu[i] != 0) 2384 close(fd_percpu[i]); 2385 } 2386 2387 free(fd_percpu); 2388 } 2389 2390 void free_all_buffers(void) 2391 { 2392 int i; 2393 2394 CPU_FREE(cpu_present_set); 2395 cpu_present_set = NULL; 2396 cpu_present_setsize = 0; 2397 2398 CPU_FREE(cpu_affinity_set); 2399 cpu_affinity_set = NULL; 2400 cpu_affinity_setsize = 0; 2401 2402 free(thread_even); 2403 free(core_even); 2404 free(package_even); 2405 2406 thread_even = NULL; 2407 core_even = NULL; 2408 package_even = NULL; 2409 2410 free(thread_odd); 2411 free(core_odd); 2412 free(package_odd); 2413 2414 thread_odd = NULL; 2415 core_odd = NULL; 2416 package_odd = NULL; 2417 2418 free(output_buffer); 2419 output_buffer = NULL; 2420 outp = NULL; 2421 2422 free_fd_percpu(); 2423 2424 free(irq_column_2_cpu); 2425 free(irqs_per_cpu); 2426 2427 for (i = 0; i <= topo.max_cpu_num; ++i) { 2428 if (cpus[i].put_ids) 2429 CPU_FREE(cpus[i].put_ids); 2430 } 2431 free(cpus); 2432 } 2433 2434 2435 /* 2436 * Parse a file containing a single int. 2437 */ 2438 int parse_int_file(const char *fmt, ...) 2439 { 2440 va_list args; 2441 char path[PATH_MAX]; 2442 FILE *filep; 2443 int value; 2444 2445 va_start(args, fmt); 2446 vsnprintf(path, sizeof(path), fmt, args); 2447 va_end(args); 2448 filep = fopen_or_die(path, "r"); 2449 if (fscanf(filep, "%d", &value) != 1) 2450 err(1, "%s: failed to parse number from file", path); 2451 fclose(filep); 2452 return value; 2453 } 2454 2455 /* 2456 * cpu_is_first_core_in_package(cpu) 2457 * return 1 if given CPU is 1st core in package 2458 */ 2459 int cpu_is_first_core_in_package(int cpu) 2460 { 2461 return cpu == parse_int_file("/sys/devices/system/cpu/cpu%d/topology/core_siblings_list", cpu); 2462 } 2463 2464 int get_physical_package_id(int cpu) 2465 { 2466 return parse_int_file("/sys/devices/system/cpu/cpu%d/topology/physical_package_id", cpu); 2467 } 2468 2469 int get_core_id(int cpu) 2470 { 2471 return parse_int_file("/sys/devices/system/cpu/cpu%d/topology/core_id", cpu); 2472 } 2473 2474 void set_node_data(void) 2475 { 2476 char path[80]; 2477 FILE *filep; 2478 int pkg, node, cpu; 2479 2480 struct pkg_node_info { 2481 int count; 2482 int min; 2483 } *pni; 2484 2485 pni = calloc(topo.num_packages, sizeof(struct pkg_node_info)); 2486 if (!pni) 2487 err(1, "calloc pkg_node_count"); 2488 2489 for (pkg = 0; pkg < topo.num_packages; pkg++) 2490 pni[pkg].min = topo.num_cpus; 2491 2492 for (node = 0; node <= topo.max_node_num; node++) { 2493 /* find the "first" cpu in the node */ 2494 sprintf(path, "/sys/bus/node/devices/node%d/cpulist", node); 2495 filep = fopen(path, "r"); 2496 if (!filep) 2497 continue; 2498 fscanf(filep, "%d", &cpu); 2499 fclose(filep); 2500 2501 pkg = cpus[cpu].physical_package_id; 2502 pni[pkg].count++; 2503 2504 if (node < pni[pkg].min) 2505 pni[pkg].min = node; 2506 } 2507 2508 for (pkg = 0; pkg < topo.num_packages; pkg++) 2509 if (pni[pkg].count > topo.nodes_per_pkg) 2510 topo.nodes_per_pkg = pni[0].count; 2511 2512 /* Fake 1 node per pkg for machines that don't 2513 * expose nodes and thus avoid -nan results 2514 */ 2515 if (topo.nodes_per_pkg == 0) 2516 topo.nodes_per_pkg = 1; 2517 2518 for (cpu = 0; cpu < topo.num_cpus; cpu++) { 2519 pkg = cpus[cpu].physical_package_id; 2520 node = cpus[cpu].physical_node_id; 2521 cpus[cpu].logical_node_id = node - pni[pkg].min; 2522 } 2523 free(pni); 2524 2525 } 2526 2527 int get_physical_node_id(struct cpu_topology *thiscpu) 2528 { 2529 char path[80]; 2530 FILE *filep; 2531 int i; 2532 int cpu = thiscpu->logical_cpu_id; 2533 2534 for (i = 0; i <= topo.max_cpu_num; i++) { 2535 sprintf(path, "/sys/devices/system/cpu/cpu%d/node%i/cpulist", 2536 cpu, i); 2537 filep = fopen(path, "r"); 2538 if (!filep) 2539 continue; 2540 fclose(filep); 2541 return i; 2542 } 2543 return -1; 2544 } 2545 2546 int get_thread_siblings(struct cpu_topology *thiscpu) 2547 { 2548 char path[80], character; 2549 FILE *filep; 2550 unsigned long map; 2551 int so, shift, sib_core; 2552 int cpu = thiscpu->logical_cpu_id; 2553 int offset = topo.max_cpu_num + 1; 2554 size_t size; 2555 int thread_id = 0; 2556 2557 thiscpu->put_ids = CPU_ALLOC((topo.max_cpu_num + 1)); 2558 if (thiscpu->thread_id < 0) 2559 thiscpu->thread_id = thread_id++; 2560 if (!thiscpu->put_ids) 2561 return -1; 2562 2563 size = CPU_ALLOC_SIZE((topo.max_cpu_num + 1)); 2564 CPU_ZERO_S(size, thiscpu->put_ids); 2565 2566 sprintf(path, 2567 "/sys/devices/system/cpu/cpu%d/topology/thread_siblings", cpu); 2568 filep = fopen_or_die(path, "r"); 2569 do { 2570 offset -= BITMASK_SIZE; 2571 fscanf(filep, "%lx%c", &map, &character); 2572 for (shift = 0; shift < BITMASK_SIZE; shift++) { 2573 if ((map >> shift) & 0x1) { 2574 so = shift + offset; 2575 sib_core = get_core_id(so); 2576 if (sib_core == thiscpu->physical_core_id) { 2577 CPU_SET_S(so, size, thiscpu->put_ids); 2578 if ((so != cpu) && 2579 (cpus[so].thread_id < 0)) 2580 cpus[so].thread_id = 2581 thread_id++; 2582 } 2583 } 2584 } 2585 } while (!strncmp(&character, ",", 1)); 2586 fclose(filep); 2587 2588 return CPU_COUNT_S(size, thiscpu->put_ids); 2589 } 2590 2591 /* 2592 * run func(thread, core, package) in topology order 2593 * skip non-present cpus 2594 */ 2595 2596 int for_all_cpus_2(int (func)(struct thread_data *, struct core_data *, 2597 struct pkg_data *, struct thread_data *, struct core_data *, 2598 struct pkg_data *), struct thread_data *thread_base, 2599 struct core_data *core_base, struct pkg_data *pkg_base, 2600 struct thread_data *thread_base2, struct core_data *core_base2, 2601 struct pkg_data *pkg_base2) 2602 { 2603 int retval, pkg_no, node_no, core_no, thread_no; 2604 2605 for (pkg_no = 0; pkg_no < topo.num_packages; ++pkg_no) { 2606 for (node_no = 0; node_no < topo.nodes_per_pkg; ++node_no) { 2607 for (core_no = 0; core_no < topo.cores_per_node; 2608 ++core_no) { 2609 for (thread_no = 0; thread_no < 2610 topo.threads_per_core; ++thread_no) { 2611 struct thread_data *t, *t2; 2612 struct core_data *c, *c2; 2613 struct pkg_data *p, *p2; 2614 2615 t = GET_THREAD(thread_base, thread_no, 2616 core_no, node_no, 2617 pkg_no); 2618 2619 if (cpu_is_not_present(t->cpu_id)) 2620 continue; 2621 2622 t2 = GET_THREAD(thread_base2, thread_no, 2623 core_no, node_no, 2624 pkg_no); 2625 2626 c = GET_CORE(core_base, core_no, 2627 node_no, pkg_no); 2628 c2 = GET_CORE(core_base2, core_no, 2629 node_no, 2630 pkg_no); 2631 2632 p = GET_PKG(pkg_base, pkg_no); 2633 p2 = GET_PKG(pkg_base2, pkg_no); 2634 2635 retval = func(t, c, p, t2, c2, p2); 2636 if (retval) 2637 return retval; 2638 } 2639 } 2640 } 2641 } 2642 return 0; 2643 } 2644 2645 /* 2646 * run func(cpu) on every cpu in /proc/stat 2647 * return max_cpu number 2648 */ 2649 int for_all_proc_cpus(int (func)(int)) 2650 { 2651 FILE *fp; 2652 int cpu_num; 2653 int retval; 2654 2655 fp = fopen_or_die(proc_stat, "r"); 2656 2657 retval = fscanf(fp, "cpu %*d %*d %*d %*d %*d %*d %*d %*d %*d %*d\n"); 2658 if (retval != 0) 2659 err(1, "%s: failed to parse format", proc_stat); 2660 2661 while (1) { 2662 retval = fscanf(fp, "cpu%u %*d %*d %*d %*d %*d %*d %*d %*d %*d %*d\n", &cpu_num); 2663 if (retval != 1) 2664 break; 2665 2666 retval = func(cpu_num); 2667 if (retval) { 2668 fclose(fp); 2669 return(retval); 2670 } 2671 } 2672 fclose(fp); 2673 return 0; 2674 } 2675 2676 void re_initialize(void) 2677 { 2678 free_all_buffers(); 2679 setup_all_buffers(); 2680 printf("turbostat: re-initialized with num_cpus %d\n", topo.num_cpus); 2681 } 2682 2683 void set_max_cpu_num(void) 2684 { 2685 FILE *filep; 2686 unsigned long dummy; 2687 2688 topo.max_cpu_num = 0; 2689 filep = fopen_or_die( 2690 "/sys/devices/system/cpu/cpu0/topology/thread_siblings", 2691 "r"); 2692 while (fscanf(filep, "%lx,", &dummy) == 1) 2693 topo.max_cpu_num += BITMASK_SIZE; 2694 fclose(filep); 2695 topo.max_cpu_num--; /* 0 based */ 2696 } 2697 2698 /* 2699 * count_cpus() 2700 * remember the last one seen, it will be the max 2701 */ 2702 int count_cpus(int cpu) 2703 { 2704 topo.num_cpus++; 2705 return 0; 2706 } 2707 int mark_cpu_present(int cpu) 2708 { 2709 CPU_SET_S(cpu, cpu_present_setsize, cpu_present_set); 2710 return 0; 2711 } 2712 2713 int init_thread_id(int cpu) 2714 { 2715 cpus[cpu].thread_id = -1; 2716 return 0; 2717 } 2718 2719 /* 2720 * snapshot_proc_interrupts() 2721 * 2722 * read and record summary of /proc/interrupts 2723 * 2724 * return 1 if config change requires a restart, else return 0 2725 */ 2726 int snapshot_proc_interrupts(void) 2727 { 2728 static FILE *fp; 2729 int column, retval; 2730 2731 if (fp == NULL) 2732 fp = fopen_or_die("/proc/interrupts", "r"); 2733 else 2734 rewind(fp); 2735 2736 /* read 1st line of /proc/interrupts to get cpu* name for each column */ 2737 for (column = 0; column < topo.num_cpus; ++column) { 2738 int cpu_number; 2739 2740 retval = fscanf(fp, " CPU%d", &cpu_number); 2741 if (retval != 1) 2742 break; 2743 2744 if (cpu_number > topo.max_cpu_num) { 2745 warn("/proc/interrupts: cpu%d: > %d", cpu_number, topo.max_cpu_num); 2746 return 1; 2747 } 2748 2749 irq_column_2_cpu[column] = cpu_number; 2750 irqs_per_cpu[cpu_number] = 0; 2751 } 2752 2753 /* read /proc/interrupt count lines and sum up irqs per cpu */ 2754 while (1) { 2755 int column; 2756 char buf[64]; 2757 2758 retval = fscanf(fp, " %s:", buf); /* flush irq# "N:" */ 2759 if (retval != 1) 2760 break; 2761 2762 /* read the count per cpu */ 2763 for (column = 0; column < topo.num_cpus; ++column) { 2764 2765 int cpu_number, irq_count; 2766 2767 retval = fscanf(fp, " %d", &irq_count); 2768 if (retval != 1) 2769 break; 2770 2771 cpu_number = irq_column_2_cpu[column]; 2772 irqs_per_cpu[cpu_number] += irq_count; 2773 2774 } 2775 2776 while (getc(fp) != '\n') 2777 ; /* flush interrupt description */ 2778 2779 } 2780 return 0; 2781 } 2782 /* 2783 * snapshot_gfx_rc6_ms() 2784 * 2785 * record snapshot of 2786 * /sys/class/drm/card0/power/rc6_residency_ms 2787 * 2788 * return 1 if config change requires a restart, else return 0 2789 */ 2790 int snapshot_gfx_rc6_ms(void) 2791 { 2792 FILE *fp; 2793 int retval; 2794 2795 fp = fopen_or_die("/sys/class/drm/card0/power/rc6_residency_ms", "r"); 2796 2797 retval = fscanf(fp, "%lld", &gfx_cur_rc6_ms); 2798 if (retval != 1) 2799 err(1, "GFX rc6"); 2800 2801 fclose(fp); 2802 2803 return 0; 2804 } 2805 /* 2806 * snapshot_gfx_mhz() 2807 * 2808 * record snapshot of 2809 * /sys/class/graphics/fb0/device/drm/card0/gt_cur_freq_mhz 2810 * 2811 * return 1 if config change requires a restart, else return 0 2812 */ 2813 int snapshot_gfx_mhz(void) 2814 { 2815 static FILE *fp; 2816 int retval; 2817 2818 if (fp == NULL) 2819 fp = fopen_or_die("/sys/class/graphics/fb0/device/drm/card0/gt_cur_freq_mhz", "r"); 2820 else { 2821 rewind(fp); 2822 fflush(fp); 2823 } 2824 2825 retval = fscanf(fp, "%d", &gfx_cur_mhz); 2826 if (retval != 1) 2827 err(1, "GFX MHz"); 2828 2829 return 0; 2830 } 2831 2832 /* 2833 * snapshot_cpu_lpi() 2834 * 2835 * record snapshot of 2836 * /sys/devices/system/cpu/cpuidle/low_power_idle_cpu_residency_us 2837 * 2838 * return 1 if config change requires a restart, else return 0 2839 */ 2840 int snapshot_cpu_lpi_us(void) 2841 { 2842 FILE *fp; 2843 int retval; 2844 2845 fp = fopen_or_die("/sys/devices/system/cpu/cpuidle/low_power_idle_cpu_residency_us", "r"); 2846 2847 retval = fscanf(fp, "%lld", &cpuidle_cur_cpu_lpi_us); 2848 if (retval != 1) 2849 err(1, "CPU LPI"); 2850 2851 fclose(fp); 2852 2853 return 0; 2854 } 2855 /* 2856 * snapshot_sys_lpi() 2857 * 2858 * record snapshot of 2859 * /sys/devices/system/cpu/cpuidle/low_power_idle_system_residency_us 2860 * 2861 * return 1 if config change requires a restart, else return 0 2862 */ 2863 int snapshot_sys_lpi_us(void) 2864 { 2865 FILE *fp; 2866 int retval; 2867 2868 fp = fopen_or_die("/sys/devices/system/cpu/cpuidle/low_power_idle_system_residency_us", "r"); 2869 2870 retval = fscanf(fp, "%lld", &cpuidle_cur_sys_lpi_us); 2871 if (retval != 1) 2872 err(1, "SYS LPI"); 2873 2874 fclose(fp); 2875 2876 return 0; 2877 } 2878 /* 2879 * snapshot /proc and /sys files 2880 * 2881 * return 1 if configuration restart needed, else return 0 2882 */ 2883 int snapshot_proc_sysfs_files(void) 2884 { 2885 if (DO_BIC(BIC_IRQ)) 2886 if (snapshot_proc_interrupts()) 2887 return 1; 2888 2889 if (DO_BIC(BIC_GFX_rc6)) 2890 snapshot_gfx_rc6_ms(); 2891 2892 if (DO_BIC(BIC_GFXMHz)) 2893 snapshot_gfx_mhz(); 2894 2895 if (DO_BIC(BIC_CPU_LPI)) 2896 snapshot_cpu_lpi_us(); 2897 2898 if (DO_BIC(BIC_SYS_LPI)) 2899 snapshot_sys_lpi_us(); 2900 2901 return 0; 2902 } 2903 2904 int exit_requested; 2905 2906 static void signal_handler (int signal) 2907 { 2908 switch (signal) { 2909 case SIGINT: 2910 exit_requested = 1; 2911 if (debug) 2912 fprintf(stderr, " SIGINT\n"); 2913 break; 2914 case SIGUSR1: 2915 if (debug > 1) 2916 fprintf(stderr, "SIGUSR1\n"); 2917 break; 2918 } 2919 /* make sure this manually-invoked interval is at least 1ms long */ 2920 nanosleep(&one_msec, NULL); 2921 } 2922 2923 void setup_signal_handler(void) 2924 { 2925 struct sigaction sa; 2926 2927 memset(&sa, 0, sizeof(sa)); 2928 2929 sa.sa_handler = &signal_handler; 2930 2931 if (sigaction(SIGINT, &sa, NULL) < 0) 2932 err(1, "sigaction SIGINT"); 2933 if (sigaction(SIGUSR1, &sa, NULL) < 0) 2934 err(1, "sigaction SIGUSR1"); 2935 } 2936 2937 void do_sleep(void) 2938 { 2939 struct timeval select_timeout; 2940 fd_set readfds; 2941 int retval; 2942 2943 FD_ZERO(&readfds); 2944 FD_SET(0, &readfds); 2945 2946 if (!isatty(fileno(stdin))) { 2947 nanosleep(&interval_ts, NULL); 2948 return; 2949 } 2950 2951 select_timeout = interval_tv; 2952 retval = select(1, &readfds, NULL, NULL, &select_timeout); 2953 2954 if (retval == 1) { 2955 switch (getc(stdin)) { 2956 case 'q': 2957 exit_requested = 1; 2958 break; 2959 } 2960 /* make sure this manually-invoked interval is at least 1ms long */ 2961 nanosleep(&one_msec, NULL); 2962 } 2963 } 2964 2965 2966 void turbostat_loop() 2967 { 2968 int retval; 2969 int restarted = 0; 2970 int done_iters = 0; 2971 2972 setup_signal_handler(); 2973 2974 restart: 2975 restarted++; 2976 2977 snapshot_proc_sysfs_files(); 2978 retval = for_all_cpus(get_counters, EVEN_COUNTERS); 2979 first_counter_read = 0; 2980 if (retval < -1) { 2981 exit(retval); 2982 } else if (retval == -1) { 2983 if (restarted > 1) { 2984 exit(retval); 2985 } 2986 re_initialize(); 2987 goto restart; 2988 } 2989 restarted = 0; 2990 done_iters = 0; 2991 gettimeofday(&tv_even, (struct timezone *)NULL); 2992 2993 while (1) { 2994 if (for_all_proc_cpus(cpu_is_not_present)) { 2995 re_initialize(); 2996 goto restart; 2997 } 2998 do_sleep(); 2999 if (snapshot_proc_sysfs_files()) 3000 goto restart; 3001 retval = for_all_cpus(get_counters, ODD_COUNTERS); 3002 if (retval < -1) { 3003 exit(retval); 3004 } else if (retval == -1) { 3005 re_initialize(); 3006 goto restart; 3007 } 3008 gettimeofday(&tv_odd, (struct timezone *)NULL); 3009 timersub(&tv_odd, &tv_even, &tv_delta); 3010 if (for_all_cpus_2(delta_cpu, ODD_COUNTERS, EVEN_COUNTERS)) { 3011 re_initialize(); 3012 goto restart; 3013 } 3014 compute_average(EVEN_COUNTERS); 3015 format_all_counters(EVEN_COUNTERS); 3016 flush_output_stdout(); 3017 if (exit_requested) 3018 break; 3019 if (num_iterations && ++done_iters >= num_iterations) 3020 break; 3021 do_sleep(); 3022 if (snapshot_proc_sysfs_files()) 3023 goto restart; 3024 retval = for_all_cpus(get_counters, EVEN_COUNTERS); 3025 if (retval < -1) { 3026 exit(retval); 3027 } else if (retval == -1) { 3028 re_initialize(); 3029 goto restart; 3030 } 3031 gettimeofday(&tv_even, (struct timezone *)NULL); 3032 timersub(&tv_even, &tv_odd, &tv_delta); 3033 if (for_all_cpus_2(delta_cpu, EVEN_COUNTERS, ODD_COUNTERS)) { 3034 re_initialize(); 3035 goto restart; 3036 } 3037 compute_average(ODD_COUNTERS); 3038 format_all_counters(ODD_COUNTERS); 3039 flush_output_stdout(); 3040 if (exit_requested) 3041 break; 3042 if (num_iterations && ++done_iters >= num_iterations) 3043 break; 3044 } 3045 } 3046 3047 void check_dev_msr() 3048 { 3049 struct stat sb; 3050 char pathname[32]; 3051 3052 sprintf(pathname, "/dev/cpu/%d/msr", base_cpu); 3053 if (stat(pathname, &sb)) 3054 if (system("/sbin/modprobe msr > /dev/null 2>&1")) 3055 err(-5, "no /dev/cpu/0/msr, Try \"# modprobe msr\" "); 3056 } 3057 3058 void check_permissions() 3059 { 3060 struct __user_cap_header_struct cap_header_data; 3061 cap_user_header_t cap_header = &cap_header_data; 3062 struct __user_cap_data_struct cap_data_data; 3063 cap_user_data_t cap_data = &cap_data_data; 3064 extern int capget(cap_user_header_t hdrp, cap_user_data_t datap); 3065 int do_exit = 0; 3066 char pathname[32]; 3067 3068 /* check for CAP_SYS_RAWIO */ 3069 cap_header->pid = getpid(); 3070 cap_header->version = _LINUX_CAPABILITY_VERSION; 3071 if (capget(cap_header, cap_data) < 0) 3072 err(-6, "capget(2) failed"); 3073 3074 if ((cap_data->effective & (1 << CAP_SYS_RAWIO)) == 0) { 3075 do_exit++; 3076 warnx("capget(CAP_SYS_RAWIO) failed," 3077 " try \"# setcap cap_sys_rawio=ep %s\"", progname); 3078 } 3079 3080 /* test file permissions */ 3081 sprintf(pathname, "/dev/cpu/%d/msr", base_cpu); 3082 if (euidaccess(pathname, R_OK)) { 3083 do_exit++; 3084 warn("/dev/cpu/0/msr open failed, try chown or chmod +r /dev/cpu/*/msr"); 3085 } 3086 3087 /* if all else fails, thell them to be root */ 3088 if (do_exit) 3089 if (getuid() != 0) 3090 warnx("... or simply run as root"); 3091 3092 if (do_exit) 3093 exit(-6); 3094 } 3095 3096 /* 3097 * NHM adds support for additional MSRs: 3098 * 3099 * MSR_SMI_COUNT 0x00000034 3100 * 3101 * MSR_PLATFORM_INFO 0x000000ce 3102 * MSR_PKG_CST_CONFIG_CONTROL 0x000000e2 3103 * 3104 * MSR_MISC_PWR_MGMT 0x000001aa 3105 * 3106 * MSR_PKG_C3_RESIDENCY 0x000003f8 3107 * MSR_PKG_C6_RESIDENCY 0x000003f9 3108 * MSR_CORE_C3_RESIDENCY 0x000003fc 3109 * MSR_CORE_C6_RESIDENCY 0x000003fd 3110 * 3111 * Side effect: 3112 * sets global pkg_cstate_limit to decode MSR_PKG_CST_CONFIG_CONTROL 3113 * sets has_misc_feature_control 3114 */ 3115 int probe_nhm_msrs(unsigned int family, unsigned int model) 3116 { 3117 unsigned long long msr; 3118 unsigned int base_ratio; 3119 int *pkg_cstate_limits; 3120 3121 if (!genuine_intel) 3122 return 0; 3123 3124 if (family != 6) 3125 return 0; 3126 3127 bclk = discover_bclk(family, model); 3128 3129 switch (model) { 3130 case INTEL_FAM6_NEHALEM_EP: /* Core i7, Xeon 5500 series - Bloomfield, Gainstown NHM-EP */ 3131 case INTEL_FAM6_NEHALEM: /* Core i7 and i5 Processor - Clarksfield, Lynnfield, Jasper Forest */ 3132 case 0x1F: /* Core i7 and i5 Processor - Nehalem */ 3133 case INTEL_FAM6_WESTMERE: /* Westmere Client - Clarkdale, Arrandale */ 3134 case INTEL_FAM6_WESTMERE_EP: /* Westmere EP - Gulftown */ 3135 case INTEL_FAM6_NEHALEM_EX: /* Nehalem-EX Xeon - Beckton */ 3136 case INTEL_FAM6_WESTMERE_EX: /* Westmere-EX Xeon - Eagleton */ 3137 pkg_cstate_limits = nhm_pkg_cstate_limits; 3138 break; 3139 case INTEL_FAM6_SANDYBRIDGE: /* SNB */ 3140 case INTEL_FAM6_SANDYBRIDGE_X: /* SNB Xeon */ 3141 case INTEL_FAM6_IVYBRIDGE: /* IVB */ 3142 case INTEL_FAM6_IVYBRIDGE_X: /* IVB Xeon */ 3143 pkg_cstate_limits = snb_pkg_cstate_limits; 3144 has_misc_feature_control = 1; 3145 break; 3146 case INTEL_FAM6_HASWELL_CORE: /* HSW */ 3147 case INTEL_FAM6_HASWELL_X: /* HSX */ 3148 case INTEL_FAM6_HASWELL_ULT: /* HSW */ 3149 case INTEL_FAM6_HASWELL_GT3E: /* HSW */ 3150 case INTEL_FAM6_BROADWELL_CORE: /* BDW */ 3151 case INTEL_FAM6_BROADWELL_GT3E: /* BDW */ 3152 case INTEL_FAM6_BROADWELL_X: /* BDX */ 3153 case INTEL_FAM6_BROADWELL_XEON_D: /* BDX-DE */ 3154 case INTEL_FAM6_SKYLAKE_MOBILE: /* SKL */ 3155 case INTEL_FAM6_SKYLAKE_DESKTOP: /* SKL */ 3156 case INTEL_FAM6_KABYLAKE_MOBILE: /* KBL */ 3157 case INTEL_FAM6_KABYLAKE_DESKTOP: /* KBL */ 3158 case INTEL_FAM6_CANNONLAKE_MOBILE: /* CNL */ 3159 pkg_cstate_limits = hsw_pkg_cstate_limits; 3160 has_misc_feature_control = 1; 3161 break; 3162 case INTEL_FAM6_SKYLAKE_X: /* SKX */ 3163 pkg_cstate_limits = skx_pkg_cstate_limits; 3164 has_misc_feature_control = 1; 3165 break; 3166 case INTEL_FAM6_ATOM_SILVERMONT1: /* BYT */ 3167 no_MSR_MISC_PWR_MGMT = 1; 3168 case INTEL_FAM6_ATOM_SILVERMONT2: /* AVN */ 3169 pkg_cstate_limits = slv_pkg_cstate_limits; 3170 break; 3171 case INTEL_FAM6_ATOM_AIRMONT: /* AMT */ 3172 pkg_cstate_limits = amt_pkg_cstate_limits; 3173 no_MSR_MISC_PWR_MGMT = 1; 3174 break; 3175 case INTEL_FAM6_XEON_PHI_KNL: /* PHI */ 3176 case INTEL_FAM6_XEON_PHI_KNM: 3177 pkg_cstate_limits = phi_pkg_cstate_limits; 3178 break; 3179 case INTEL_FAM6_ATOM_GOLDMONT: /* BXT */ 3180 case INTEL_FAM6_ATOM_GEMINI_LAKE: 3181 case INTEL_FAM6_ATOM_DENVERTON: /* DNV */ 3182 pkg_cstate_limits = bxt_pkg_cstate_limits; 3183 break; 3184 default: 3185 return 0; 3186 } 3187 get_msr(base_cpu, MSR_PKG_CST_CONFIG_CONTROL, &msr); 3188 pkg_cstate_limit = pkg_cstate_limits[msr & 0xF]; 3189 3190 get_msr(base_cpu, MSR_PLATFORM_INFO, &msr); 3191 base_ratio = (msr >> 8) & 0xFF; 3192 3193 base_hz = base_ratio * bclk * 1000000; 3194 has_base_hz = 1; 3195 return 1; 3196 } 3197 /* 3198 * SLV client has support for unique MSRs: 3199 * 3200 * MSR_CC6_DEMOTION_POLICY_CONFIG 3201 * MSR_MC6_DEMOTION_POLICY_CONFIG 3202 */ 3203 3204 int has_slv_msrs(unsigned int family, unsigned int model) 3205 { 3206 if (!genuine_intel) 3207 return 0; 3208 3209 switch (model) { 3210 case INTEL_FAM6_ATOM_SILVERMONT1: 3211 case INTEL_FAM6_ATOM_MERRIFIELD: 3212 case INTEL_FAM6_ATOM_MOOREFIELD: 3213 return 1; 3214 } 3215 return 0; 3216 } 3217 int is_dnv(unsigned int family, unsigned int model) 3218 { 3219 3220 if (!genuine_intel) 3221 return 0; 3222 3223 switch (model) { 3224 case INTEL_FAM6_ATOM_DENVERTON: 3225 return 1; 3226 } 3227 return 0; 3228 } 3229 int is_bdx(unsigned int family, unsigned int model) 3230 { 3231 3232 if (!genuine_intel) 3233 return 0; 3234 3235 switch (model) { 3236 case INTEL_FAM6_BROADWELL_X: 3237 case INTEL_FAM6_BROADWELL_XEON_D: 3238 return 1; 3239 } 3240 return 0; 3241 } 3242 int is_skx(unsigned int family, unsigned int model) 3243 { 3244 3245 if (!genuine_intel) 3246 return 0; 3247 3248 switch (model) { 3249 case INTEL_FAM6_SKYLAKE_X: 3250 return 1; 3251 } 3252 return 0; 3253 } 3254 3255 int has_turbo_ratio_limit(unsigned int family, unsigned int model) 3256 { 3257 if (has_slv_msrs(family, model)) 3258 return 0; 3259 3260 switch (model) { 3261 /* Nehalem compatible, but do not include turbo-ratio limit support */ 3262 case INTEL_FAM6_NEHALEM_EX: /* Nehalem-EX Xeon - Beckton */ 3263 case INTEL_FAM6_WESTMERE_EX: /* Westmere-EX Xeon - Eagleton */ 3264 case INTEL_FAM6_XEON_PHI_KNL: /* PHI - Knights Landing (different MSR definition) */ 3265 case INTEL_FAM6_XEON_PHI_KNM: 3266 return 0; 3267 default: 3268 return 1; 3269 } 3270 } 3271 int has_atom_turbo_ratio_limit(unsigned int family, unsigned int model) 3272 { 3273 if (has_slv_msrs(family, model)) 3274 return 1; 3275 3276 return 0; 3277 } 3278 int has_ivt_turbo_ratio_limit(unsigned int family, unsigned int model) 3279 { 3280 if (!genuine_intel) 3281 return 0; 3282 3283 if (family != 6) 3284 return 0; 3285 3286 switch (model) { 3287 case INTEL_FAM6_IVYBRIDGE_X: /* IVB Xeon */ 3288 case INTEL_FAM6_HASWELL_X: /* HSW Xeon */ 3289 return 1; 3290 default: 3291 return 0; 3292 } 3293 } 3294 int has_hsw_turbo_ratio_limit(unsigned int family, unsigned int model) 3295 { 3296 if (!genuine_intel) 3297 return 0; 3298 3299 if (family != 6) 3300 return 0; 3301 3302 switch (model) { 3303 case INTEL_FAM6_HASWELL_X: /* HSW Xeon */ 3304 return 1; 3305 default: 3306 return 0; 3307 } 3308 } 3309 3310 int has_knl_turbo_ratio_limit(unsigned int family, unsigned int model) 3311 { 3312 if (!genuine_intel) 3313 return 0; 3314 3315 if (family != 6) 3316 return 0; 3317 3318 switch (model) { 3319 case INTEL_FAM6_XEON_PHI_KNL: /* Knights Landing */ 3320 case INTEL_FAM6_XEON_PHI_KNM: 3321 return 1; 3322 default: 3323 return 0; 3324 } 3325 } 3326 int has_glm_turbo_ratio_limit(unsigned int family, unsigned int model) 3327 { 3328 if (!genuine_intel) 3329 return 0; 3330 3331 if (family != 6) 3332 return 0; 3333 3334 switch (model) { 3335 case INTEL_FAM6_ATOM_GOLDMONT: 3336 case INTEL_FAM6_SKYLAKE_X: 3337 return 1; 3338 default: 3339 return 0; 3340 } 3341 } 3342 int has_config_tdp(unsigned int family, unsigned int model) 3343 { 3344 if (!genuine_intel) 3345 return 0; 3346 3347 if (family != 6) 3348 return 0; 3349 3350 switch (model) { 3351 case INTEL_FAM6_IVYBRIDGE: /* IVB */ 3352 case INTEL_FAM6_HASWELL_CORE: /* HSW */ 3353 case INTEL_FAM6_HASWELL_X: /* HSX */ 3354 case INTEL_FAM6_HASWELL_ULT: /* HSW */ 3355 case INTEL_FAM6_HASWELL_GT3E: /* HSW */ 3356 case INTEL_FAM6_BROADWELL_CORE: /* BDW */ 3357 case INTEL_FAM6_BROADWELL_GT3E: /* BDW */ 3358 case INTEL_FAM6_BROADWELL_X: /* BDX */ 3359 case INTEL_FAM6_BROADWELL_XEON_D: /* BDX-DE */ 3360 case INTEL_FAM6_SKYLAKE_MOBILE: /* SKL */ 3361 case INTEL_FAM6_SKYLAKE_DESKTOP: /* SKL */ 3362 case INTEL_FAM6_KABYLAKE_MOBILE: /* KBL */ 3363 case INTEL_FAM6_KABYLAKE_DESKTOP: /* KBL */ 3364 case INTEL_FAM6_CANNONLAKE_MOBILE: /* CNL */ 3365 case INTEL_FAM6_SKYLAKE_X: /* SKX */ 3366 3367 case INTEL_FAM6_XEON_PHI_KNL: /* Knights Landing */ 3368 case INTEL_FAM6_XEON_PHI_KNM: 3369 return 1; 3370 default: 3371 return 0; 3372 } 3373 } 3374 3375 static void 3376 dump_cstate_pstate_config_info(unsigned int family, unsigned int model) 3377 { 3378 if (!do_nhm_platform_info) 3379 return; 3380 3381 dump_nhm_platform_info(); 3382 3383 if (has_hsw_turbo_ratio_limit(family, model)) 3384 dump_hsw_turbo_ratio_limits(); 3385 3386 if (has_ivt_turbo_ratio_limit(family, model)) 3387 dump_ivt_turbo_ratio_limits(); 3388 3389 if (has_turbo_ratio_limit(family, model)) 3390 dump_turbo_ratio_limits(family, model); 3391 3392 if (has_atom_turbo_ratio_limit(family, model)) 3393 dump_atom_turbo_ratio_limits(); 3394 3395 if (has_knl_turbo_ratio_limit(family, model)) 3396 dump_knl_turbo_ratio_limits(); 3397 3398 if (has_config_tdp(family, model)) 3399 dump_config_tdp(); 3400 3401 dump_nhm_cst_cfg(); 3402 } 3403 3404 static void 3405 dump_sysfs_cstate_config(void) 3406 { 3407 char path[64]; 3408 char name_buf[16]; 3409 char desc[64]; 3410 FILE *input; 3411 int state; 3412 char *sp; 3413 3414 if (!DO_BIC(BIC_sysfs)) 3415 return; 3416 3417 for (state = 0; state < 10; ++state) { 3418 3419 sprintf(path, "/sys/devices/system/cpu/cpu%d/cpuidle/state%d/name", 3420 base_cpu, state); 3421 input = fopen(path, "r"); 3422 if (input == NULL) 3423 continue; 3424 fgets(name_buf, sizeof(name_buf), input); 3425 3426 /* truncate "C1-HSW\n" to "C1", or truncate "C1\n" to "C1" */ 3427 sp = strchr(name_buf, '-'); 3428 if (!sp) 3429 sp = strchrnul(name_buf, '\n'); 3430 *sp = '\0'; 3431 3432 fclose(input); 3433 3434 sprintf(path, "/sys/devices/system/cpu/cpu%d/cpuidle/state%d/desc", 3435 base_cpu, state); 3436 input = fopen(path, "r"); 3437 if (input == NULL) 3438 continue; 3439 fgets(desc, sizeof(desc), input); 3440 3441 fprintf(outf, "cpu%d: %s: %s", base_cpu, name_buf, desc); 3442 fclose(input); 3443 } 3444 } 3445 static void 3446 dump_sysfs_pstate_config(void) 3447 { 3448 char path[64]; 3449 char driver_buf[64]; 3450 char governor_buf[64]; 3451 FILE *input; 3452 int turbo; 3453 3454 sprintf(path, "/sys/devices/system/cpu/cpu%d/cpufreq/scaling_driver", 3455 base_cpu); 3456 input = fopen(path, "r"); 3457 if (input == NULL) { 3458 fprintf(stderr, "NSFOD %s\n", path); 3459 return; 3460 } 3461 fgets(driver_buf, sizeof(driver_buf), input); 3462 fclose(input); 3463 3464 sprintf(path, "/sys/devices/system/cpu/cpu%d/cpufreq/scaling_governor", 3465 base_cpu); 3466 input = fopen(path, "r"); 3467 if (input == NULL) { 3468 fprintf(stderr, "NSFOD %s\n", path); 3469 return; 3470 } 3471 fgets(governor_buf, sizeof(governor_buf), input); 3472 fclose(input); 3473 3474 fprintf(outf, "cpu%d: cpufreq driver: %s", base_cpu, driver_buf); 3475 fprintf(outf, "cpu%d: cpufreq governor: %s", base_cpu, governor_buf); 3476 3477 sprintf(path, "/sys/devices/system/cpu/cpufreq/boost"); 3478 input = fopen(path, "r"); 3479 if (input != NULL) { 3480 fscanf(input, "%d", &turbo); 3481 fprintf(outf, "cpufreq boost: %d\n", turbo); 3482 fclose(input); 3483 } 3484 3485 sprintf(path, "/sys/devices/system/cpu/intel_pstate/no_turbo"); 3486 input = fopen(path, "r"); 3487 if (input != NULL) { 3488 fscanf(input, "%d", &turbo); 3489 fprintf(outf, "cpufreq intel_pstate no_turbo: %d\n", turbo); 3490 fclose(input); 3491 } 3492 } 3493 3494 3495 /* 3496 * print_epb() 3497 * Decode the ENERGY_PERF_BIAS MSR 3498 */ 3499 int print_epb(struct thread_data *t, struct core_data *c, struct pkg_data *p) 3500 { 3501 unsigned long long msr; 3502 char *epb_string; 3503 int cpu; 3504 3505 if (!has_epb) 3506 return 0; 3507 3508 cpu = t->cpu_id; 3509 3510 /* EPB is per-package */ 3511 if (!(t->flags & CPU_IS_FIRST_THREAD_IN_CORE) || !(t->flags & CPU_IS_FIRST_CORE_IN_PACKAGE)) 3512 return 0; 3513 3514 if (cpu_migrate(cpu)) { 3515 fprintf(outf, "Could not migrate to CPU %d\n", cpu); 3516 return -1; 3517 } 3518 3519 if (get_msr(cpu, MSR_IA32_ENERGY_PERF_BIAS, &msr)) 3520 return 0; 3521 3522 switch (msr & 0xF) { 3523 case ENERGY_PERF_BIAS_PERFORMANCE: 3524 epb_string = "performance"; 3525 break; 3526 case ENERGY_PERF_BIAS_NORMAL: 3527 epb_string = "balanced"; 3528 break; 3529 case ENERGY_PERF_BIAS_POWERSAVE: 3530 epb_string = "powersave"; 3531 break; 3532 default: 3533 epb_string = "custom"; 3534 break; 3535 } 3536 fprintf(outf, "cpu%d: MSR_IA32_ENERGY_PERF_BIAS: 0x%08llx (%s)\n", cpu, msr, epb_string); 3537 3538 return 0; 3539 } 3540 /* 3541 * print_hwp() 3542 * Decode the MSR_HWP_CAPABILITIES 3543 */ 3544 int print_hwp(struct thread_data *t, struct core_data *c, struct pkg_data *p) 3545 { 3546 unsigned long long msr; 3547 int cpu; 3548 3549 if (!has_hwp) 3550 return 0; 3551 3552 cpu = t->cpu_id; 3553 3554 /* MSR_HWP_CAPABILITIES is per-package */ 3555 if (!(t->flags & CPU_IS_FIRST_THREAD_IN_CORE) || !(t->flags & CPU_IS_FIRST_CORE_IN_PACKAGE)) 3556 return 0; 3557 3558 if (cpu_migrate(cpu)) { 3559 fprintf(outf, "Could not migrate to CPU %d\n", cpu); 3560 return -1; 3561 } 3562 3563 if (get_msr(cpu, MSR_PM_ENABLE, &msr)) 3564 return 0; 3565 3566 fprintf(outf, "cpu%d: MSR_PM_ENABLE: 0x%08llx (%sHWP)\n", 3567 cpu, msr, (msr & (1 << 0)) ? "" : "No-"); 3568 3569 /* MSR_PM_ENABLE[1] == 1 if HWP is enabled and MSRs visible */ 3570 if ((msr & (1 << 0)) == 0) 3571 return 0; 3572 3573 if (get_msr(cpu, MSR_HWP_CAPABILITIES, &msr)) 3574 return 0; 3575 3576 fprintf(outf, "cpu%d: MSR_HWP_CAPABILITIES: 0x%08llx " 3577 "(high %d guar %d eff %d low %d)\n", 3578 cpu, msr, 3579 (unsigned int)HWP_HIGHEST_PERF(msr), 3580 (unsigned int)HWP_GUARANTEED_PERF(msr), 3581 (unsigned int)HWP_MOSTEFFICIENT_PERF(msr), 3582 (unsigned int)HWP_LOWEST_PERF(msr)); 3583 3584 if (get_msr(cpu, MSR_HWP_REQUEST, &msr)) 3585 return 0; 3586 3587 fprintf(outf, "cpu%d: MSR_HWP_REQUEST: 0x%08llx " 3588 "(min %d max %d des %d epp 0x%x window 0x%x pkg 0x%x)\n", 3589 cpu, msr, 3590 (unsigned int)(((msr) >> 0) & 0xff), 3591 (unsigned int)(((msr) >> 8) & 0xff), 3592 (unsigned int)(((msr) >> 16) & 0xff), 3593 (unsigned int)(((msr) >> 24) & 0xff), 3594 (unsigned int)(((msr) >> 32) & 0xff3), 3595 (unsigned int)(((msr) >> 42) & 0x1)); 3596 3597 if (has_hwp_pkg) { 3598 if (get_msr(cpu, MSR_HWP_REQUEST_PKG, &msr)) 3599 return 0; 3600 3601 fprintf(outf, "cpu%d: MSR_HWP_REQUEST_PKG: 0x%08llx " 3602 "(min %d max %d des %d epp 0x%x window 0x%x)\n", 3603 cpu, msr, 3604 (unsigned int)(((msr) >> 0) & 0xff), 3605 (unsigned int)(((msr) >> 8) & 0xff), 3606 (unsigned int)(((msr) >> 16) & 0xff), 3607 (unsigned int)(((msr) >> 24) & 0xff), 3608 (unsigned int)(((msr) >> 32) & 0xff3)); 3609 } 3610 if (has_hwp_notify) { 3611 if (get_msr(cpu, MSR_HWP_INTERRUPT, &msr)) 3612 return 0; 3613 3614 fprintf(outf, "cpu%d: MSR_HWP_INTERRUPT: 0x%08llx " 3615 "(%s_Guaranteed_Perf_Change, %s_Excursion_Min)\n", 3616 cpu, msr, 3617 ((msr) & 0x1) ? "EN" : "Dis", 3618 ((msr) & 0x2) ? "EN" : "Dis"); 3619 } 3620 if (get_msr(cpu, MSR_HWP_STATUS, &msr)) 3621 return 0; 3622 3623 fprintf(outf, "cpu%d: MSR_HWP_STATUS: 0x%08llx " 3624 "(%sGuaranteed_Perf_Change, %sExcursion_Min)\n", 3625 cpu, msr, 3626 ((msr) & 0x1) ? "" : "No-", 3627 ((msr) & 0x2) ? "" : "No-"); 3628 3629 return 0; 3630 } 3631 3632 /* 3633 * print_perf_limit() 3634 */ 3635 int print_perf_limit(struct thread_data *t, struct core_data *c, struct pkg_data *p) 3636 { 3637 unsigned long long msr; 3638 int cpu; 3639 3640 cpu = t->cpu_id; 3641 3642 /* per-package */ 3643 if (!(t->flags & CPU_IS_FIRST_THREAD_IN_CORE) || !(t->flags & CPU_IS_FIRST_CORE_IN_PACKAGE)) 3644 return 0; 3645 3646 if (cpu_migrate(cpu)) { 3647 fprintf(outf, "Could not migrate to CPU %d\n", cpu); 3648 return -1; 3649 } 3650 3651 if (do_core_perf_limit_reasons) { 3652 get_msr(cpu, MSR_CORE_PERF_LIMIT_REASONS, &msr); 3653 fprintf(outf, "cpu%d: MSR_CORE_PERF_LIMIT_REASONS, 0x%08llx", cpu, msr); 3654 fprintf(outf, " (Active: %s%s%s%s%s%s%s%s%s%s%s%s%s%s)", 3655 (msr & 1 << 15) ? "bit15, " : "", 3656 (msr & 1 << 14) ? "bit14, " : "", 3657 (msr & 1 << 13) ? "Transitions, " : "", 3658 (msr & 1 << 12) ? "MultiCoreTurbo, " : "", 3659 (msr & 1 << 11) ? "PkgPwrL2, " : "", 3660 (msr & 1 << 10) ? "PkgPwrL1, " : "", 3661 (msr & 1 << 9) ? "CorePwr, " : "", 3662 (msr & 1 << 8) ? "Amps, " : "", 3663 (msr & 1 << 6) ? "VR-Therm, " : "", 3664 (msr & 1 << 5) ? "Auto-HWP, " : "", 3665 (msr & 1 << 4) ? "Graphics, " : "", 3666 (msr & 1 << 2) ? "bit2, " : "", 3667 (msr & 1 << 1) ? "ThermStatus, " : "", 3668 (msr & 1 << 0) ? "PROCHOT, " : ""); 3669 fprintf(outf, " (Logged: %s%s%s%s%s%s%s%s%s%s%s%s%s%s)\n", 3670 (msr & 1 << 31) ? "bit31, " : "", 3671 (msr & 1 << 30) ? "bit30, " : "", 3672 (msr & 1 << 29) ? "Transitions, " : "", 3673 (msr & 1 << 28) ? "MultiCoreTurbo, " : "", 3674 (msr & 1 << 27) ? "PkgPwrL2, " : "", 3675 (msr & 1 << 26) ? "PkgPwrL1, " : "", 3676 (msr & 1 << 25) ? "CorePwr, " : "", 3677 (msr & 1 << 24) ? "Amps, " : "", 3678 (msr & 1 << 22) ? "VR-Therm, " : "", 3679 (msr & 1 << 21) ? "Auto-HWP, " : "", 3680 (msr & 1 << 20) ? "Graphics, " : "", 3681 (msr & 1 << 18) ? "bit18, " : "", 3682 (msr & 1 << 17) ? "ThermStatus, " : "", 3683 (msr & 1 << 16) ? "PROCHOT, " : ""); 3684 3685 } 3686 if (do_gfx_perf_limit_reasons) { 3687 get_msr(cpu, MSR_GFX_PERF_LIMIT_REASONS, &msr); 3688 fprintf(outf, "cpu%d: MSR_GFX_PERF_LIMIT_REASONS, 0x%08llx", cpu, msr); 3689 fprintf(outf, " (Active: %s%s%s%s%s%s%s%s)", 3690 (msr & 1 << 0) ? "PROCHOT, " : "", 3691 (msr & 1 << 1) ? "ThermStatus, " : "", 3692 (msr & 1 << 4) ? "Graphics, " : "", 3693 (msr & 1 << 6) ? "VR-Therm, " : "", 3694 (msr & 1 << 8) ? "Amps, " : "", 3695 (msr & 1 << 9) ? "GFXPwr, " : "", 3696 (msr & 1 << 10) ? "PkgPwrL1, " : "", 3697 (msr & 1 << 11) ? "PkgPwrL2, " : ""); 3698 fprintf(outf, " (Logged: %s%s%s%s%s%s%s%s)\n", 3699 (msr & 1 << 16) ? "PROCHOT, " : "", 3700 (msr & 1 << 17) ? "ThermStatus, " : "", 3701 (msr & 1 << 20) ? "Graphics, " : "", 3702 (msr & 1 << 22) ? "VR-Therm, " : "", 3703 (msr & 1 << 24) ? "Amps, " : "", 3704 (msr & 1 << 25) ? "GFXPwr, " : "", 3705 (msr & 1 << 26) ? "PkgPwrL1, " : "", 3706 (msr & 1 << 27) ? "PkgPwrL2, " : ""); 3707 } 3708 if (do_ring_perf_limit_reasons) { 3709 get_msr(cpu, MSR_RING_PERF_LIMIT_REASONS, &msr); 3710 fprintf(outf, "cpu%d: MSR_RING_PERF_LIMIT_REASONS, 0x%08llx", cpu, msr); 3711 fprintf(outf, " (Active: %s%s%s%s%s%s)", 3712 (msr & 1 << 0) ? "PROCHOT, " : "", 3713 (msr & 1 << 1) ? "ThermStatus, " : "", 3714 (msr & 1 << 6) ? "VR-Therm, " : "", 3715 (msr & 1 << 8) ? "Amps, " : "", 3716 (msr & 1 << 10) ? "PkgPwrL1, " : "", 3717 (msr & 1 << 11) ? "PkgPwrL2, " : ""); 3718 fprintf(outf, " (Logged: %s%s%s%s%s%s)\n", 3719 (msr & 1 << 16) ? "PROCHOT, " : "", 3720 (msr & 1 << 17) ? "ThermStatus, " : "", 3721 (msr & 1 << 22) ? "VR-Therm, " : "", 3722 (msr & 1 << 24) ? "Amps, " : "", 3723 (msr & 1 << 26) ? "PkgPwrL1, " : "", 3724 (msr & 1 << 27) ? "PkgPwrL2, " : ""); 3725 } 3726 return 0; 3727 } 3728 3729 #define RAPL_POWER_GRANULARITY 0x7FFF /* 15 bit power granularity */ 3730 #define RAPL_TIME_GRANULARITY 0x3F /* 6 bit time granularity */ 3731 3732 double get_tdp(unsigned int model) 3733 { 3734 unsigned long long msr; 3735 3736 if (do_rapl & RAPL_PKG_POWER_INFO) 3737 if (!get_msr(base_cpu, MSR_PKG_POWER_INFO, &msr)) 3738 return ((msr >> 0) & RAPL_POWER_GRANULARITY) * rapl_power_units; 3739 3740 switch (model) { 3741 case INTEL_FAM6_ATOM_SILVERMONT1: 3742 case INTEL_FAM6_ATOM_SILVERMONT2: 3743 return 30.0; 3744 default: 3745 return 135.0; 3746 } 3747 } 3748 3749 /* 3750 * rapl_dram_energy_units_probe() 3751 * Energy units are either hard-coded, or come from RAPL Energy Unit MSR. 3752 */ 3753 static double 3754 rapl_dram_energy_units_probe(int model, double rapl_energy_units) 3755 { 3756 /* only called for genuine_intel, family 6 */ 3757 3758 switch (model) { 3759 case INTEL_FAM6_HASWELL_X: /* HSX */ 3760 case INTEL_FAM6_BROADWELL_X: /* BDX */ 3761 case INTEL_FAM6_BROADWELL_XEON_D: /* BDX-DE */ 3762 case INTEL_FAM6_XEON_PHI_KNL: /* KNL */ 3763 case INTEL_FAM6_XEON_PHI_KNM: 3764 return (rapl_dram_energy_units = 15.3 / 1000000); 3765 default: 3766 return (rapl_energy_units); 3767 } 3768 } 3769 3770 3771 /* 3772 * rapl_probe() 3773 * 3774 * sets do_rapl, rapl_power_units, rapl_energy_units, rapl_time_units 3775 */ 3776 void rapl_probe(unsigned int family, unsigned int model) 3777 { 3778 unsigned long long msr; 3779 unsigned int time_unit; 3780 double tdp; 3781 3782 if (!genuine_intel) 3783 return; 3784 3785 if (family != 6) 3786 return; 3787 3788 switch (model) { 3789 case INTEL_FAM6_SANDYBRIDGE: 3790 case INTEL_FAM6_IVYBRIDGE: 3791 case INTEL_FAM6_HASWELL_CORE: /* HSW */ 3792 case INTEL_FAM6_HASWELL_ULT: /* HSW */ 3793 case INTEL_FAM6_HASWELL_GT3E: /* HSW */ 3794 case INTEL_FAM6_BROADWELL_CORE: /* BDW */ 3795 case INTEL_FAM6_BROADWELL_GT3E: /* BDW */ 3796 do_rapl = RAPL_PKG | RAPL_CORES | RAPL_CORE_POLICY | RAPL_GFX | RAPL_PKG_POWER_INFO; 3797 if (rapl_joules) { 3798 BIC_PRESENT(BIC_Pkg_J); 3799 BIC_PRESENT(BIC_Cor_J); 3800 BIC_PRESENT(BIC_GFX_J); 3801 } else { 3802 BIC_PRESENT(BIC_PkgWatt); 3803 BIC_PRESENT(BIC_CorWatt); 3804 BIC_PRESENT(BIC_GFXWatt); 3805 } 3806 break; 3807 case INTEL_FAM6_ATOM_GOLDMONT: /* BXT */ 3808 case INTEL_FAM6_ATOM_GEMINI_LAKE: 3809 do_rapl = RAPL_PKG | RAPL_PKG_POWER_INFO; 3810 if (rapl_joules) 3811 BIC_PRESENT(BIC_Pkg_J); 3812 else 3813 BIC_PRESENT(BIC_PkgWatt); 3814 break; 3815 case INTEL_FAM6_SKYLAKE_MOBILE: /* SKL */ 3816 case INTEL_FAM6_SKYLAKE_DESKTOP: /* SKL */ 3817 case INTEL_FAM6_KABYLAKE_MOBILE: /* KBL */ 3818 case INTEL_FAM6_KABYLAKE_DESKTOP: /* KBL */ 3819 case INTEL_FAM6_CANNONLAKE_MOBILE: /* CNL */ 3820 do_rapl = RAPL_PKG | RAPL_CORES | RAPL_CORE_POLICY | RAPL_DRAM | RAPL_DRAM_PERF_STATUS | RAPL_PKG_PERF_STATUS | RAPL_GFX | RAPL_PKG_POWER_INFO; 3821 BIC_PRESENT(BIC_PKG__); 3822 BIC_PRESENT(BIC_RAM__); 3823 if (rapl_joules) { 3824 BIC_PRESENT(BIC_Pkg_J); 3825 BIC_PRESENT(BIC_Cor_J); 3826 BIC_PRESENT(BIC_RAM_J); 3827 BIC_PRESENT(BIC_GFX_J); 3828 } else { 3829 BIC_PRESENT(BIC_PkgWatt); 3830 BIC_PRESENT(BIC_CorWatt); 3831 BIC_PRESENT(BIC_RAMWatt); 3832 BIC_PRESENT(BIC_GFXWatt); 3833 } 3834 break; 3835 case INTEL_FAM6_HASWELL_X: /* HSX */ 3836 case INTEL_FAM6_BROADWELL_X: /* BDX */ 3837 case INTEL_FAM6_BROADWELL_XEON_D: /* BDX-DE */ 3838 case INTEL_FAM6_SKYLAKE_X: /* SKX */ 3839 case INTEL_FAM6_XEON_PHI_KNL: /* KNL */ 3840 case INTEL_FAM6_XEON_PHI_KNM: 3841 do_rapl = RAPL_PKG | RAPL_DRAM | RAPL_DRAM_POWER_INFO | RAPL_DRAM_PERF_STATUS | RAPL_PKG_PERF_STATUS | RAPL_PKG_POWER_INFO; 3842 BIC_PRESENT(BIC_PKG__); 3843 BIC_PRESENT(BIC_RAM__); 3844 if (rapl_joules) { 3845 BIC_PRESENT(BIC_Pkg_J); 3846 BIC_PRESENT(BIC_RAM_J); 3847 } else { 3848 BIC_PRESENT(BIC_PkgWatt); 3849 BIC_PRESENT(BIC_RAMWatt); 3850 } 3851 break; 3852 case INTEL_FAM6_SANDYBRIDGE_X: 3853 case INTEL_FAM6_IVYBRIDGE_X: 3854 do_rapl = RAPL_PKG | RAPL_CORES | RAPL_CORE_POLICY | RAPL_DRAM | RAPL_DRAM_POWER_INFO | RAPL_PKG_PERF_STATUS | RAPL_DRAM_PERF_STATUS | RAPL_PKG_POWER_INFO; 3855 BIC_PRESENT(BIC_PKG__); 3856 BIC_PRESENT(BIC_RAM__); 3857 if (rapl_joules) { 3858 BIC_PRESENT(BIC_Pkg_J); 3859 BIC_PRESENT(BIC_Cor_J); 3860 BIC_PRESENT(BIC_RAM_J); 3861 } else { 3862 BIC_PRESENT(BIC_PkgWatt); 3863 BIC_PRESENT(BIC_CorWatt); 3864 BIC_PRESENT(BIC_RAMWatt); 3865 } 3866 break; 3867 case INTEL_FAM6_ATOM_SILVERMONT1: /* BYT */ 3868 case INTEL_FAM6_ATOM_SILVERMONT2: /* AVN */ 3869 do_rapl = RAPL_PKG | RAPL_CORES; 3870 if (rapl_joules) { 3871 BIC_PRESENT(BIC_Pkg_J); 3872 BIC_PRESENT(BIC_Cor_J); 3873 } else { 3874 BIC_PRESENT(BIC_PkgWatt); 3875 BIC_PRESENT(BIC_CorWatt); 3876 } 3877 break; 3878 case INTEL_FAM6_ATOM_DENVERTON: /* DNV */ 3879 do_rapl = RAPL_PKG | RAPL_DRAM | RAPL_DRAM_POWER_INFO | RAPL_DRAM_PERF_STATUS | RAPL_PKG_PERF_STATUS | RAPL_PKG_POWER_INFO | RAPL_CORES_ENERGY_STATUS; 3880 BIC_PRESENT(BIC_PKG__); 3881 BIC_PRESENT(BIC_RAM__); 3882 if (rapl_joules) { 3883 BIC_PRESENT(BIC_Pkg_J); 3884 BIC_PRESENT(BIC_Cor_J); 3885 BIC_PRESENT(BIC_RAM_J); 3886 } else { 3887 BIC_PRESENT(BIC_PkgWatt); 3888 BIC_PRESENT(BIC_CorWatt); 3889 BIC_PRESENT(BIC_RAMWatt); 3890 } 3891 break; 3892 default: 3893 return; 3894 } 3895 3896 /* units on package 0, verify later other packages match */ 3897 if (get_msr(base_cpu, MSR_RAPL_POWER_UNIT, &msr)) 3898 return; 3899 3900 rapl_power_units = 1.0 / (1 << (msr & 0xF)); 3901 if (model == INTEL_FAM6_ATOM_SILVERMONT1) 3902 rapl_energy_units = 1.0 * (1 << (msr >> 8 & 0x1F)) / 1000000; 3903 else 3904 rapl_energy_units = 1.0 / (1 << (msr >> 8 & 0x1F)); 3905 3906 rapl_dram_energy_units = rapl_dram_energy_units_probe(model, rapl_energy_units); 3907 3908 time_unit = msr >> 16 & 0xF; 3909 if (time_unit == 0) 3910 time_unit = 0xA; 3911 3912 rapl_time_units = 1.0 / (1 << (time_unit)); 3913 3914 tdp = get_tdp(model); 3915 3916 rapl_joule_counter_range = 0xFFFFFFFF * rapl_energy_units / tdp; 3917 if (!quiet) 3918 fprintf(outf, "RAPL: %.0f sec. Joule Counter Range, at %.0f Watts\n", rapl_joule_counter_range, tdp); 3919 3920 return; 3921 } 3922 3923 void perf_limit_reasons_probe(unsigned int family, unsigned int model) 3924 { 3925 if (!genuine_intel) 3926 return; 3927 3928 if (family != 6) 3929 return; 3930 3931 switch (model) { 3932 case INTEL_FAM6_HASWELL_CORE: /* HSW */ 3933 case INTEL_FAM6_HASWELL_ULT: /* HSW */ 3934 case INTEL_FAM6_HASWELL_GT3E: /* HSW */ 3935 do_gfx_perf_limit_reasons = 1; 3936 case INTEL_FAM6_HASWELL_X: /* HSX */ 3937 do_core_perf_limit_reasons = 1; 3938 do_ring_perf_limit_reasons = 1; 3939 default: 3940 return; 3941 } 3942 } 3943 3944 void automatic_cstate_conversion_probe(unsigned int family, unsigned int model) 3945 { 3946 if (is_skx(family, model) || is_bdx(family, model)) 3947 has_automatic_cstate_conversion = 1; 3948 } 3949 3950 int print_thermal(struct thread_data *t, struct core_data *c, struct pkg_data *p) 3951 { 3952 unsigned long long msr; 3953 unsigned int dts, dts2; 3954 int cpu; 3955 3956 if (!(do_dts || do_ptm)) 3957 return 0; 3958 3959 cpu = t->cpu_id; 3960 3961 /* DTS is per-core, no need to print for each thread */ 3962 if (!(t->flags & CPU_IS_FIRST_THREAD_IN_CORE)) 3963 return 0; 3964 3965 if (cpu_migrate(cpu)) { 3966 fprintf(outf, "Could not migrate to CPU %d\n", cpu); 3967 return -1; 3968 } 3969 3970 if (do_ptm && (t->flags & CPU_IS_FIRST_CORE_IN_PACKAGE)) { 3971 if (get_msr(cpu, MSR_IA32_PACKAGE_THERM_STATUS, &msr)) 3972 return 0; 3973 3974 dts = (msr >> 16) & 0x7F; 3975 fprintf(outf, "cpu%d: MSR_IA32_PACKAGE_THERM_STATUS: 0x%08llx (%d C)\n", 3976 cpu, msr, tcc_activation_temp - dts); 3977 3978 if (get_msr(cpu, MSR_IA32_PACKAGE_THERM_INTERRUPT, &msr)) 3979 return 0; 3980 3981 dts = (msr >> 16) & 0x7F; 3982 dts2 = (msr >> 8) & 0x7F; 3983 fprintf(outf, "cpu%d: MSR_IA32_PACKAGE_THERM_INTERRUPT: 0x%08llx (%d C, %d C)\n", 3984 cpu, msr, tcc_activation_temp - dts, tcc_activation_temp - dts2); 3985 } 3986 3987 3988 if (do_dts && debug) { 3989 unsigned int resolution; 3990 3991 if (get_msr(cpu, MSR_IA32_THERM_STATUS, &msr)) 3992 return 0; 3993 3994 dts = (msr >> 16) & 0x7F; 3995 resolution = (msr >> 27) & 0xF; 3996 fprintf(outf, "cpu%d: MSR_IA32_THERM_STATUS: 0x%08llx (%d C +/- %d)\n", 3997 cpu, msr, tcc_activation_temp - dts, resolution); 3998 3999 if (get_msr(cpu, MSR_IA32_THERM_INTERRUPT, &msr)) 4000 return 0; 4001 4002 dts = (msr >> 16) & 0x7F; 4003 dts2 = (msr >> 8) & 0x7F; 4004 fprintf(outf, "cpu%d: MSR_IA32_THERM_INTERRUPT: 0x%08llx (%d C, %d C)\n", 4005 cpu, msr, tcc_activation_temp - dts, tcc_activation_temp - dts2); 4006 } 4007 4008 return 0; 4009 } 4010 4011 void print_power_limit_msr(int cpu, unsigned long long msr, char *label) 4012 { 4013 fprintf(outf, "cpu%d: %s: %sabled (%f Watts, %f sec, clamp %sabled)\n", 4014 cpu, label, 4015 ((msr >> 15) & 1) ? "EN" : "DIS", 4016 ((msr >> 0) & 0x7FFF) * rapl_power_units, 4017 (1.0 + (((msr >> 22) & 0x3)/4.0)) * (1 << ((msr >> 17) & 0x1F)) * rapl_time_units, 4018 (((msr >> 16) & 1) ? "EN" : "DIS")); 4019 4020 return; 4021 } 4022 4023 int print_rapl(struct thread_data *t, struct core_data *c, struct pkg_data *p) 4024 { 4025 unsigned long long msr; 4026 int cpu; 4027 4028 if (!do_rapl) 4029 return 0; 4030 4031 /* RAPL counters are per package, so print only for 1st thread/package */ 4032 if (!(t->flags & CPU_IS_FIRST_THREAD_IN_CORE) || !(t->flags & CPU_IS_FIRST_CORE_IN_PACKAGE)) 4033 return 0; 4034 4035 cpu = t->cpu_id; 4036 if (cpu_migrate(cpu)) { 4037 fprintf(outf, "Could not migrate to CPU %d\n", cpu); 4038 return -1; 4039 } 4040 4041 if (get_msr(cpu, MSR_RAPL_POWER_UNIT, &msr)) 4042 return -1; 4043 4044 fprintf(outf, "cpu%d: MSR_RAPL_POWER_UNIT: 0x%08llx (%f Watts, %f Joules, %f sec.)\n", cpu, msr, 4045 rapl_power_units, rapl_energy_units, rapl_time_units); 4046 4047 if (do_rapl & RAPL_PKG_POWER_INFO) { 4048 4049 if (get_msr(cpu, MSR_PKG_POWER_INFO, &msr)) 4050 return -5; 4051 4052 4053 fprintf(outf, "cpu%d: MSR_PKG_POWER_INFO: 0x%08llx (%.0f W TDP, RAPL %.0f - %.0f W, %f sec.)\n", 4054 cpu, msr, 4055 ((msr >> 0) & RAPL_POWER_GRANULARITY) * rapl_power_units, 4056 ((msr >> 16) & RAPL_POWER_GRANULARITY) * rapl_power_units, 4057 ((msr >> 32) & RAPL_POWER_GRANULARITY) * rapl_power_units, 4058 ((msr >> 48) & RAPL_TIME_GRANULARITY) * rapl_time_units); 4059 4060 } 4061 if (do_rapl & RAPL_PKG) { 4062 4063 if (get_msr(cpu, MSR_PKG_POWER_LIMIT, &msr)) 4064 return -9; 4065 4066 fprintf(outf, "cpu%d: MSR_PKG_POWER_LIMIT: 0x%08llx (%slocked)\n", 4067 cpu, msr, (msr >> 63) & 1 ? "" : "UN"); 4068 4069 print_power_limit_msr(cpu, msr, "PKG Limit #1"); 4070 fprintf(outf, "cpu%d: PKG Limit #2: %sabled (%f Watts, %f* sec, clamp %sabled)\n", 4071 cpu, 4072 ((msr >> 47) & 1) ? "EN" : "DIS", 4073 ((msr >> 32) & 0x7FFF) * rapl_power_units, 4074 (1.0 + (((msr >> 54) & 0x3)/4.0)) * (1 << ((msr >> 49) & 0x1F)) * rapl_time_units, 4075 ((msr >> 48) & 1) ? "EN" : "DIS"); 4076 } 4077 4078 if (do_rapl & RAPL_DRAM_POWER_INFO) { 4079 if (get_msr(cpu, MSR_DRAM_POWER_INFO, &msr)) 4080 return -6; 4081 4082 fprintf(outf, "cpu%d: MSR_DRAM_POWER_INFO,: 0x%08llx (%.0f W TDP, RAPL %.0f - %.0f W, %f sec.)\n", 4083 cpu, msr, 4084 ((msr >> 0) & RAPL_POWER_GRANULARITY) * rapl_power_units, 4085 ((msr >> 16) & RAPL_POWER_GRANULARITY) * rapl_power_units, 4086 ((msr >> 32) & RAPL_POWER_GRANULARITY) * rapl_power_units, 4087 ((msr >> 48) & RAPL_TIME_GRANULARITY) * rapl_time_units); 4088 } 4089 if (do_rapl & RAPL_DRAM) { 4090 if (get_msr(cpu, MSR_DRAM_POWER_LIMIT, &msr)) 4091 return -9; 4092 fprintf(outf, "cpu%d: MSR_DRAM_POWER_LIMIT: 0x%08llx (%slocked)\n", 4093 cpu, msr, (msr >> 31) & 1 ? "" : "UN"); 4094 4095 print_power_limit_msr(cpu, msr, "DRAM Limit"); 4096 } 4097 if (do_rapl & RAPL_CORE_POLICY) { 4098 if (get_msr(cpu, MSR_PP0_POLICY, &msr)) 4099 return -7; 4100 4101 fprintf(outf, "cpu%d: MSR_PP0_POLICY: %lld\n", cpu, msr & 0xF); 4102 } 4103 if (do_rapl & RAPL_CORES_POWER_LIMIT) { 4104 if (get_msr(cpu, MSR_PP0_POWER_LIMIT, &msr)) 4105 return -9; 4106 fprintf(outf, "cpu%d: MSR_PP0_POWER_LIMIT: 0x%08llx (%slocked)\n", 4107 cpu, msr, (msr >> 31) & 1 ? "" : "UN"); 4108 print_power_limit_msr(cpu, msr, "Cores Limit"); 4109 } 4110 if (do_rapl & RAPL_GFX) { 4111 if (get_msr(cpu, MSR_PP1_POLICY, &msr)) 4112 return -8; 4113 4114 fprintf(outf, "cpu%d: MSR_PP1_POLICY: %lld\n", cpu, msr & 0xF); 4115 4116 if (get_msr(cpu, MSR_PP1_POWER_LIMIT, &msr)) 4117 return -9; 4118 fprintf(outf, "cpu%d: MSR_PP1_POWER_LIMIT: 0x%08llx (%slocked)\n", 4119 cpu, msr, (msr >> 31) & 1 ? "" : "UN"); 4120 print_power_limit_msr(cpu, msr, "GFX Limit"); 4121 } 4122 return 0; 4123 } 4124 4125 /* 4126 * SNB adds support for additional MSRs: 4127 * 4128 * MSR_PKG_C7_RESIDENCY 0x000003fa 4129 * MSR_CORE_C7_RESIDENCY 0x000003fe 4130 * MSR_PKG_C2_RESIDENCY 0x0000060d 4131 */ 4132 4133 int has_snb_msrs(unsigned int family, unsigned int model) 4134 { 4135 if (!genuine_intel) 4136 return 0; 4137 4138 switch (model) { 4139 case INTEL_FAM6_SANDYBRIDGE: 4140 case INTEL_FAM6_SANDYBRIDGE_X: 4141 case INTEL_FAM6_IVYBRIDGE: /* IVB */ 4142 case INTEL_FAM6_IVYBRIDGE_X: /* IVB Xeon */ 4143 case INTEL_FAM6_HASWELL_CORE: /* HSW */ 4144 case INTEL_FAM6_HASWELL_X: /* HSW */ 4145 case INTEL_FAM6_HASWELL_ULT: /* HSW */ 4146 case INTEL_FAM6_HASWELL_GT3E: /* HSW */ 4147 case INTEL_FAM6_BROADWELL_CORE: /* BDW */ 4148 case INTEL_FAM6_BROADWELL_GT3E: /* BDW */ 4149 case INTEL_FAM6_BROADWELL_X: /* BDX */ 4150 case INTEL_FAM6_BROADWELL_XEON_D: /* BDX-DE */ 4151 case INTEL_FAM6_SKYLAKE_MOBILE: /* SKL */ 4152 case INTEL_FAM6_SKYLAKE_DESKTOP: /* SKL */ 4153 case INTEL_FAM6_KABYLAKE_MOBILE: /* KBL */ 4154 case INTEL_FAM6_KABYLAKE_DESKTOP: /* KBL */ 4155 case INTEL_FAM6_CANNONLAKE_MOBILE: /* CNL */ 4156 case INTEL_FAM6_SKYLAKE_X: /* SKX */ 4157 case INTEL_FAM6_ATOM_GOLDMONT: /* BXT */ 4158 case INTEL_FAM6_ATOM_GEMINI_LAKE: 4159 case INTEL_FAM6_ATOM_DENVERTON: /* DNV */ 4160 return 1; 4161 } 4162 return 0; 4163 } 4164 4165 /* 4166 * HSW adds support for additional MSRs: 4167 * 4168 * MSR_PKG_C8_RESIDENCY 0x00000630 4169 * MSR_PKG_C9_RESIDENCY 0x00000631 4170 * MSR_PKG_C10_RESIDENCY 0x00000632 4171 * 4172 * MSR_PKGC8_IRTL 0x00000633 4173 * MSR_PKGC9_IRTL 0x00000634 4174 * MSR_PKGC10_IRTL 0x00000635 4175 * 4176 */ 4177 int has_hsw_msrs(unsigned int family, unsigned int model) 4178 { 4179 if (!genuine_intel) 4180 return 0; 4181 4182 switch (model) { 4183 case INTEL_FAM6_HASWELL_ULT: /* HSW */ 4184 case INTEL_FAM6_BROADWELL_CORE: /* BDW */ 4185 case INTEL_FAM6_SKYLAKE_MOBILE: /* SKL */ 4186 case INTEL_FAM6_SKYLAKE_DESKTOP: /* SKL */ 4187 case INTEL_FAM6_KABYLAKE_MOBILE: /* KBL */ 4188 case INTEL_FAM6_KABYLAKE_DESKTOP: /* KBL */ 4189 case INTEL_FAM6_CANNONLAKE_MOBILE: /* CNL */ 4190 case INTEL_FAM6_ATOM_GOLDMONT: /* BXT */ 4191 case INTEL_FAM6_ATOM_GEMINI_LAKE: 4192 return 1; 4193 } 4194 return 0; 4195 } 4196 4197 /* 4198 * SKL adds support for additional MSRS: 4199 * 4200 * MSR_PKG_WEIGHTED_CORE_C0_RES 0x00000658 4201 * MSR_PKG_ANY_CORE_C0_RES 0x00000659 4202 * MSR_PKG_ANY_GFXE_C0_RES 0x0000065A 4203 * MSR_PKG_BOTH_CORE_GFXE_C0_RES 0x0000065B 4204 */ 4205 int has_skl_msrs(unsigned int family, unsigned int model) 4206 { 4207 if (!genuine_intel) 4208 return 0; 4209 4210 switch (model) { 4211 case INTEL_FAM6_SKYLAKE_MOBILE: /* SKL */ 4212 case INTEL_FAM6_SKYLAKE_DESKTOP: /* SKL */ 4213 case INTEL_FAM6_KABYLAKE_MOBILE: /* KBL */ 4214 case INTEL_FAM6_KABYLAKE_DESKTOP: /* KBL */ 4215 case INTEL_FAM6_CANNONLAKE_MOBILE: /* CNL */ 4216 return 1; 4217 } 4218 return 0; 4219 } 4220 4221 int is_slm(unsigned int family, unsigned int model) 4222 { 4223 if (!genuine_intel) 4224 return 0; 4225 switch (model) { 4226 case INTEL_FAM6_ATOM_SILVERMONT1: /* BYT */ 4227 case INTEL_FAM6_ATOM_SILVERMONT2: /* AVN */ 4228 return 1; 4229 } 4230 return 0; 4231 } 4232 4233 int is_knl(unsigned int family, unsigned int model) 4234 { 4235 if (!genuine_intel) 4236 return 0; 4237 switch (model) { 4238 case INTEL_FAM6_XEON_PHI_KNL: /* KNL */ 4239 case INTEL_FAM6_XEON_PHI_KNM: 4240 return 1; 4241 } 4242 return 0; 4243 } 4244 4245 int is_cnl(unsigned int family, unsigned int model) 4246 { 4247 if (!genuine_intel) 4248 return 0; 4249 4250 switch (model) { 4251 case INTEL_FAM6_CANNONLAKE_MOBILE: /* CNL */ 4252 return 1; 4253 } 4254 4255 return 0; 4256 } 4257 4258 unsigned int get_aperf_mperf_multiplier(unsigned int family, unsigned int model) 4259 { 4260 if (is_knl(family, model)) 4261 return 1024; 4262 return 1; 4263 } 4264 4265 #define SLM_BCLK_FREQS 5 4266 double slm_freq_table[SLM_BCLK_FREQS] = { 83.3, 100.0, 133.3, 116.7, 80.0}; 4267 4268 double slm_bclk(void) 4269 { 4270 unsigned long long msr = 3; 4271 unsigned int i; 4272 double freq; 4273 4274 if (get_msr(base_cpu, MSR_FSB_FREQ, &msr)) 4275 fprintf(outf, "SLM BCLK: unknown\n"); 4276 4277 i = msr & 0xf; 4278 if (i >= SLM_BCLK_FREQS) { 4279 fprintf(outf, "SLM BCLK[%d] invalid\n", i); 4280 i = 3; 4281 } 4282 freq = slm_freq_table[i]; 4283 4284 if (!quiet) 4285 fprintf(outf, "SLM BCLK: %.1f Mhz\n", freq); 4286 4287 return freq; 4288 } 4289 4290 double discover_bclk(unsigned int family, unsigned int model) 4291 { 4292 if (has_snb_msrs(family, model) || is_knl(family, model)) 4293 return 100.00; 4294 else if (is_slm(family, model)) 4295 return slm_bclk(); 4296 else 4297 return 133.33; 4298 } 4299 4300 /* 4301 * MSR_IA32_TEMPERATURE_TARGET indicates the temperature where 4302 * the Thermal Control Circuit (TCC) activates. 4303 * This is usually equal to tjMax. 4304 * 4305 * Older processors do not have this MSR, so there we guess, 4306 * but also allow cmdline over-ride with -T. 4307 * 4308 * Several MSR temperature values are in units of degrees-C 4309 * below this value, including the Digital Thermal Sensor (DTS), 4310 * Package Thermal Management Sensor (PTM), and thermal event thresholds. 4311 */ 4312 int set_temperature_target(struct thread_data *t, struct core_data *c, struct pkg_data *p) 4313 { 4314 unsigned long long msr; 4315 unsigned int target_c_local; 4316 int cpu; 4317 4318 /* tcc_activation_temp is used only for dts or ptm */ 4319 if (!(do_dts || do_ptm)) 4320 return 0; 4321 4322 /* this is a per-package concept */ 4323 if (!(t->flags & CPU_IS_FIRST_THREAD_IN_CORE) || !(t->flags & CPU_IS_FIRST_CORE_IN_PACKAGE)) 4324 return 0; 4325 4326 cpu = t->cpu_id; 4327 if (cpu_migrate(cpu)) { 4328 fprintf(outf, "Could not migrate to CPU %d\n", cpu); 4329 return -1; 4330 } 4331 4332 if (tcc_activation_temp_override != 0) { 4333 tcc_activation_temp = tcc_activation_temp_override; 4334 fprintf(outf, "cpu%d: Using cmdline TCC Target (%d C)\n", 4335 cpu, tcc_activation_temp); 4336 return 0; 4337 } 4338 4339 /* Temperature Target MSR is Nehalem and newer only */ 4340 if (!do_nhm_platform_info) 4341 goto guess; 4342 4343 if (get_msr(base_cpu, MSR_IA32_TEMPERATURE_TARGET, &msr)) 4344 goto guess; 4345 4346 target_c_local = (msr >> 16) & 0xFF; 4347 4348 if (!quiet) 4349 fprintf(outf, "cpu%d: MSR_IA32_TEMPERATURE_TARGET: 0x%08llx (%d C)\n", 4350 cpu, msr, target_c_local); 4351 4352 if (!target_c_local) 4353 goto guess; 4354 4355 tcc_activation_temp = target_c_local; 4356 4357 return 0; 4358 4359 guess: 4360 tcc_activation_temp = TJMAX_DEFAULT; 4361 fprintf(outf, "cpu%d: Guessing tjMax %d C, Please use -T to specify\n", 4362 cpu, tcc_activation_temp); 4363 4364 return 0; 4365 } 4366 4367 void decode_feature_control_msr(void) 4368 { 4369 unsigned long long msr; 4370 4371 if (!get_msr(base_cpu, MSR_IA32_FEATURE_CONTROL, &msr)) 4372 fprintf(outf, "cpu%d: MSR_IA32_FEATURE_CONTROL: 0x%08llx (%sLocked %s)\n", 4373 base_cpu, msr, 4374 msr & FEATURE_CONTROL_LOCKED ? "" : "UN-", 4375 msr & (1 << 18) ? "SGX" : ""); 4376 } 4377 4378 void decode_misc_enable_msr(void) 4379 { 4380 unsigned long long msr; 4381 4382 if (!genuine_intel) 4383 return; 4384 4385 if (!get_msr(base_cpu, MSR_IA32_MISC_ENABLE, &msr)) 4386 fprintf(outf, "cpu%d: MSR_IA32_MISC_ENABLE: 0x%08llx (%sTCC %sEIST %sMWAIT %sPREFETCH %sTURBO)\n", 4387 base_cpu, msr, 4388 msr & MSR_IA32_MISC_ENABLE_TM1 ? "" : "No-", 4389 msr & MSR_IA32_MISC_ENABLE_ENHANCED_SPEEDSTEP ? "" : "No-", 4390 msr & MSR_IA32_MISC_ENABLE_MWAIT ? "" : "No-", 4391 msr & MSR_IA32_MISC_ENABLE_PREFETCH_DISABLE ? "No-" : "", 4392 msr & MSR_IA32_MISC_ENABLE_TURBO_DISABLE ? "No-" : ""); 4393 } 4394 4395 void decode_misc_feature_control(void) 4396 { 4397 unsigned long long msr; 4398 4399 if (!has_misc_feature_control) 4400 return; 4401 4402 if (!get_msr(base_cpu, MSR_MISC_FEATURE_CONTROL, &msr)) 4403 fprintf(outf, "cpu%d: MSR_MISC_FEATURE_CONTROL: 0x%08llx (%sL2-Prefetch %sL2-Prefetch-pair %sL1-Prefetch %sL1-IP-Prefetch)\n", 4404 base_cpu, msr, 4405 msr & (0 << 0) ? "No-" : "", 4406 msr & (1 << 0) ? "No-" : "", 4407 msr & (2 << 0) ? "No-" : "", 4408 msr & (3 << 0) ? "No-" : ""); 4409 } 4410 /* 4411 * Decode MSR_MISC_PWR_MGMT 4412 * 4413 * Decode the bits according to the Nehalem documentation 4414 * bit[0] seems to continue to have same meaning going forward 4415 * bit[1] less so... 4416 */ 4417 void decode_misc_pwr_mgmt_msr(void) 4418 { 4419 unsigned long long msr; 4420 4421 if (!do_nhm_platform_info) 4422 return; 4423 4424 if (no_MSR_MISC_PWR_MGMT) 4425 return; 4426 4427 if (!get_msr(base_cpu, MSR_MISC_PWR_MGMT, &msr)) 4428 fprintf(outf, "cpu%d: MSR_MISC_PWR_MGMT: 0x%08llx (%sable-EIST_Coordination %sable-EPB %sable-OOB)\n", 4429 base_cpu, msr, 4430 msr & (1 << 0) ? "DIS" : "EN", 4431 msr & (1 << 1) ? "EN" : "DIS", 4432 msr & (1 << 8) ? "EN" : "DIS"); 4433 } 4434 /* 4435 * Decode MSR_CC6_DEMOTION_POLICY_CONFIG, MSR_MC6_DEMOTION_POLICY_CONFIG 4436 * 4437 * This MSRs are present on Silvermont processors, 4438 * Intel Atom processor E3000 series (Baytrail), and friends. 4439 */ 4440 void decode_c6_demotion_policy_msr(void) 4441 { 4442 unsigned long long msr; 4443 4444 if (!get_msr(base_cpu, MSR_CC6_DEMOTION_POLICY_CONFIG, &msr)) 4445 fprintf(outf, "cpu%d: MSR_CC6_DEMOTION_POLICY_CONFIG: 0x%08llx (%sable-CC6-Demotion)\n", 4446 base_cpu, msr, msr & (1 << 0) ? "EN" : "DIS"); 4447 4448 if (!get_msr(base_cpu, MSR_MC6_DEMOTION_POLICY_CONFIG, &msr)) 4449 fprintf(outf, "cpu%d: MSR_MC6_DEMOTION_POLICY_CONFIG: 0x%08llx (%sable-MC6-Demotion)\n", 4450 base_cpu, msr, msr & (1 << 0) ? "EN" : "DIS"); 4451 } 4452 4453 void process_cpuid() 4454 { 4455 unsigned int eax, ebx, ecx, edx, max_level, max_extended_level; 4456 unsigned int fms, family, model, stepping; 4457 unsigned int has_turbo; 4458 4459 eax = ebx = ecx = edx = 0; 4460 4461 __cpuid(0, max_level, ebx, ecx, edx); 4462 4463 if (ebx == 0x756e6547 && edx == 0x49656e69 && ecx == 0x6c65746e) 4464 genuine_intel = 1; 4465 4466 if (!quiet) 4467 fprintf(outf, "CPUID(0): %.4s%.4s%.4s ", 4468 (char *)&ebx, (char *)&edx, (char *)&ecx); 4469 4470 __cpuid(1, fms, ebx, ecx, edx); 4471 family = (fms >> 8) & 0xf; 4472 model = (fms >> 4) & 0xf; 4473 stepping = fms & 0xf; 4474 if (family == 6 || family == 0xf) 4475 model += ((fms >> 16) & 0xf) << 4; 4476 4477 if (!quiet) { 4478 fprintf(outf, "%d CPUID levels; family:model:stepping 0x%x:%x:%x (%d:%d:%d)\n", 4479 max_level, family, model, stepping, family, model, stepping); 4480 fprintf(outf, "CPUID(1): %s %s %s %s %s %s %s %s %s %s\n", 4481 ecx & (1 << 0) ? "SSE3" : "-", 4482 ecx & (1 << 3) ? "MONITOR" : "-", 4483 ecx & (1 << 6) ? "SMX" : "-", 4484 ecx & (1 << 7) ? "EIST" : "-", 4485 ecx & (1 << 8) ? "TM2" : "-", 4486 edx & (1 << 4) ? "TSC" : "-", 4487 edx & (1 << 5) ? "MSR" : "-", 4488 edx & (1 << 22) ? "ACPI-TM" : "-", 4489 edx & (1 << 28) ? "HT" : "-", 4490 edx & (1 << 29) ? "TM" : "-"); 4491 } 4492 4493 if (!(edx & (1 << 5))) 4494 errx(1, "CPUID: no MSR"); 4495 4496 /* 4497 * check max extended function levels of CPUID. 4498 * This is needed to check for invariant TSC. 4499 * This check is valid for both Intel and AMD. 4500 */ 4501 ebx = ecx = edx = 0; 4502 __cpuid(0x80000000, max_extended_level, ebx, ecx, edx); 4503 4504 if (max_extended_level >= 0x80000007) { 4505 4506 /* 4507 * Non-Stop TSC is advertised by CPUID.EAX=0x80000007: EDX.bit8 4508 * this check is valid for both Intel and AMD 4509 */ 4510 __cpuid(0x80000007, eax, ebx, ecx, edx); 4511 has_invariant_tsc = edx & (1 << 8); 4512 } 4513 4514 /* 4515 * APERF/MPERF is advertised by CPUID.EAX=0x6: ECX.bit0 4516 * this check is valid for both Intel and AMD 4517 */ 4518 4519 __cpuid(0x6, eax, ebx, ecx, edx); 4520 has_aperf = ecx & (1 << 0); 4521 if (has_aperf) { 4522 BIC_PRESENT(BIC_Avg_MHz); 4523 BIC_PRESENT(BIC_Busy); 4524 BIC_PRESENT(BIC_Bzy_MHz); 4525 } 4526 do_dts = eax & (1 << 0); 4527 if (do_dts) 4528 BIC_PRESENT(BIC_CoreTmp); 4529 has_turbo = eax & (1 << 1); 4530 do_ptm = eax & (1 << 6); 4531 if (do_ptm) 4532 BIC_PRESENT(BIC_PkgTmp); 4533 has_hwp = eax & (1 << 7); 4534 has_hwp_notify = eax & (1 << 8); 4535 has_hwp_activity_window = eax & (1 << 9); 4536 has_hwp_epp = eax & (1 << 10); 4537 has_hwp_pkg = eax & (1 << 11); 4538 has_epb = ecx & (1 << 3); 4539 4540 if (!quiet) 4541 fprintf(outf, "CPUID(6): %sAPERF, %sTURBO, %sDTS, %sPTM, %sHWP, " 4542 "%sHWPnotify, %sHWPwindow, %sHWPepp, %sHWPpkg, %sEPB\n", 4543 has_aperf ? "" : "No-", 4544 has_turbo ? "" : "No-", 4545 do_dts ? "" : "No-", 4546 do_ptm ? "" : "No-", 4547 has_hwp ? "" : "No-", 4548 has_hwp_notify ? "" : "No-", 4549 has_hwp_activity_window ? "" : "No-", 4550 has_hwp_epp ? "" : "No-", 4551 has_hwp_pkg ? "" : "No-", 4552 has_epb ? "" : "No-"); 4553 4554 if (!quiet) 4555 decode_misc_enable_msr(); 4556 4557 4558 if (max_level >= 0x7 && !quiet) { 4559 int has_sgx; 4560 4561 ecx = 0; 4562 4563 __cpuid_count(0x7, 0, eax, ebx, ecx, edx); 4564 4565 has_sgx = ebx & (1 << 2); 4566 fprintf(outf, "CPUID(7): %sSGX\n", has_sgx ? "" : "No-"); 4567 4568 if (has_sgx) 4569 decode_feature_control_msr(); 4570 } 4571 4572 if (max_level >= 0x15) { 4573 unsigned int eax_crystal; 4574 unsigned int ebx_tsc; 4575 4576 /* 4577 * CPUID 15H TSC/Crystal ratio, possibly Crystal Hz 4578 */ 4579 eax_crystal = ebx_tsc = crystal_hz = edx = 0; 4580 __cpuid(0x15, eax_crystal, ebx_tsc, crystal_hz, edx); 4581 4582 if (ebx_tsc != 0) { 4583 4584 if (!quiet && (ebx != 0)) 4585 fprintf(outf, "CPUID(0x15): eax_crystal: %d ebx_tsc: %d ecx_crystal_hz: %d\n", 4586 eax_crystal, ebx_tsc, crystal_hz); 4587 4588 if (crystal_hz == 0) 4589 switch(model) { 4590 case INTEL_FAM6_SKYLAKE_MOBILE: /* SKL */ 4591 case INTEL_FAM6_SKYLAKE_DESKTOP: /* SKL */ 4592 case INTEL_FAM6_KABYLAKE_MOBILE: /* KBL */ 4593 case INTEL_FAM6_KABYLAKE_DESKTOP: /* KBL */ 4594 crystal_hz = 24000000; /* 24.0 MHz */ 4595 break; 4596 case INTEL_FAM6_ATOM_DENVERTON: /* DNV */ 4597 crystal_hz = 25000000; /* 25.0 MHz */ 4598 break; 4599 case INTEL_FAM6_ATOM_GOLDMONT: /* BXT */ 4600 case INTEL_FAM6_ATOM_GEMINI_LAKE: 4601 crystal_hz = 19200000; /* 19.2 MHz */ 4602 break; 4603 default: 4604 crystal_hz = 0; 4605 } 4606 4607 if (crystal_hz) { 4608 tsc_hz = (unsigned long long) crystal_hz * ebx_tsc / eax_crystal; 4609 if (!quiet) 4610 fprintf(outf, "TSC: %lld MHz (%d Hz * %d / %d / 1000000)\n", 4611 tsc_hz / 1000000, crystal_hz, ebx_tsc, eax_crystal); 4612 } 4613 } 4614 } 4615 if (max_level >= 0x16) { 4616 unsigned int base_mhz, max_mhz, bus_mhz, edx; 4617 4618 /* 4619 * CPUID 16H Base MHz, Max MHz, Bus MHz 4620 */ 4621 base_mhz = max_mhz = bus_mhz = edx = 0; 4622 4623 __cpuid(0x16, base_mhz, max_mhz, bus_mhz, edx); 4624 if (!quiet) 4625 fprintf(outf, "CPUID(0x16): base_mhz: %d max_mhz: %d bus_mhz: %d\n", 4626 base_mhz, max_mhz, bus_mhz); 4627 } 4628 4629 if (has_aperf) 4630 aperf_mperf_multiplier = get_aperf_mperf_multiplier(family, model); 4631 4632 BIC_PRESENT(BIC_IRQ); 4633 BIC_PRESENT(BIC_TSC_MHz); 4634 4635 if (probe_nhm_msrs(family, model)) { 4636 do_nhm_platform_info = 1; 4637 BIC_PRESENT(BIC_CPU_c1); 4638 BIC_PRESENT(BIC_CPU_c3); 4639 BIC_PRESENT(BIC_CPU_c6); 4640 BIC_PRESENT(BIC_SMI); 4641 } 4642 do_snb_cstates = has_snb_msrs(family, model); 4643 4644 if (do_snb_cstates) 4645 BIC_PRESENT(BIC_CPU_c7); 4646 4647 do_irtl_snb = has_snb_msrs(family, model); 4648 if (do_snb_cstates && (pkg_cstate_limit >= PCL__2)) 4649 BIC_PRESENT(BIC_Pkgpc2); 4650 if (pkg_cstate_limit >= PCL__3) 4651 BIC_PRESENT(BIC_Pkgpc3); 4652 if (pkg_cstate_limit >= PCL__6) 4653 BIC_PRESENT(BIC_Pkgpc6); 4654 if (do_snb_cstates && (pkg_cstate_limit >= PCL__7)) 4655 BIC_PRESENT(BIC_Pkgpc7); 4656 if (has_slv_msrs(family, model)) { 4657 BIC_NOT_PRESENT(BIC_Pkgpc2); 4658 BIC_NOT_PRESENT(BIC_Pkgpc3); 4659 BIC_PRESENT(BIC_Pkgpc6); 4660 BIC_NOT_PRESENT(BIC_Pkgpc7); 4661 BIC_PRESENT(BIC_Mod_c6); 4662 use_c1_residency_msr = 1; 4663 } 4664 if (is_dnv(family, model)) { 4665 BIC_PRESENT(BIC_CPU_c1); 4666 BIC_NOT_PRESENT(BIC_CPU_c3); 4667 BIC_NOT_PRESENT(BIC_Pkgpc3); 4668 BIC_NOT_PRESENT(BIC_CPU_c7); 4669 BIC_NOT_PRESENT(BIC_Pkgpc7); 4670 use_c1_residency_msr = 1; 4671 } 4672 if (is_skx(family, model)) { 4673 BIC_NOT_PRESENT(BIC_CPU_c3); 4674 BIC_NOT_PRESENT(BIC_Pkgpc3); 4675 BIC_NOT_PRESENT(BIC_CPU_c7); 4676 BIC_NOT_PRESENT(BIC_Pkgpc7); 4677 } 4678 if (is_bdx(family, model)) { 4679 BIC_NOT_PRESENT(BIC_CPU_c7); 4680 BIC_NOT_PRESENT(BIC_Pkgpc7); 4681 } 4682 if (has_hsw_msrs(family, model)) { 4683 BIC_PRESENT(BIC_Pkgpc8); 4684 BIC_PRESENT(BIC_Pkgpc9); 4685 BIC_PRESENT(BIC_Pkgpc10); 4686 } 4687 do_irtl_hsw = has_hsw_msrs(family, model); 4688 if (has_skl_msrs(family, model)) { 4689 BIC_PRESENT(BIC_Totl_c0); 4690 BIC_PRESENT(BIC_Any_c0); 4691 BIC_PRESENT(BIC_GFX_c0); 4692 BIC_PRESENT(BIC_CPUGFX); 4693 } 4694 do_slm_cstates = is_slm(family, model); 4695 do_knl_cstates = is_knl(family, model); 4696 do_cnl_cstates = is_cnl(family, model); 4697 4698 if (!quiet) 4699 decode_misc_pwr_mgmt_msr(); 4700 4701 if (!quiet && has_slv_msrs(family, model)) 4702 decode_c6_demotion_policy_msr(); 4703 4704 rapl_probe(family, model); 4705 perf_limit_reasons_probe(family, model); 4706 automatic_cstate_conversion_probe(family, model); 4707 4708 if (!quiet) 4709 dump_cstate_pstate_config_info(family, model); 4710 4711 if (!quiet) 4712 dump_sysfs_cstate_config(); 4713 if (!quiet) 4714 dump_sysfs_pstate_config(); 4715 4716 if (has_skl_msrs(family, model)) 4717 calculate_tsc_tweak(); 4718 4719 if (!access("/sys/class/drm/card0/power/rc6_residency_ms", R_OK)) 4720 BIC_PRESENT(BIC_GFX_rc6); 4721 4722 if (!access("/sys/class/graphics/fb0/device/drm/card0/gt_cur_freq_mhz", R_OK)) 4723 BIC_PRESENT(BIC_GFXMHz); 4724 4725 if (!access("/sys/devices/system/cpu/cpuidle/low_power_idle_cpu_residency_us", R_OK)) 4726 BIC_PRESENT(BIC_CPU_LPI); 4727 else 4728 BIC_NOT_PRESENT(BIC_CPU_LPI); 4729 4730 if (!access("/sys/devices/system/cpu/cpuidle/low_power_idle_system_residency_us", R_OK)) 4731 BIC_PRESENT(BIC_SYS_LPI); 4732 else 4733 BIC_NOT_PRESENT(BIC_SYS_LPI); 4734 4735 if (!quiet) 4736 decode_misc_feature_control(); 4737 4738 return; 4739 } 4740 4741 /* 4742 * in /dev/cpu/ return success for names that are numbers 4743 * ie. filter out ".", "..", "microcode". 4744 */ 4745 int dir_filter(const struct dirent *dirp) 4746 { 4747 if (isdigit(dirp->d_name[0])) 4748 return 1; 4749 else 4750 return 0; 4751 } 4752 4753 int open_dev_cpu_msr(int dummy1) 4754 { 4755 return 0; 4756 } 4757 4758 void topology_probe() 4759 { 4760 int i; 4761 int max_core_id = 0; 4762 int max_package_id = 0; 4763 int max_siblings = 0; 4764 4765 /* Initialize num_cpus, max_cpu_num */ 4766 set_max_cpu_num(); 4767 topo.num_cpus = 0; 4768 for_all_proc_cpus(count_cpus); 4769 if (!summary_only && topo.num_cpus > 1) 4770 BIC_PRESENT(BIC_CPU); 4771 4772 if (debug > 1) 4773 fprintf(outf, "num_cpus %d max_cpu_num %d\n", topo.num_cpus, topo.max_cpu_num); 4774 4775 cpus = calloc(1, (topo.max_cpu_num + 1) * sizeof(struct cpu_topology)); 4776 if (cpus == NULL) 4777 err(1, "calloc cpus"); 4778 4779 /* 4780 * Allocate and initialize cpu_present_set 4781 */ 4782 cpu_present_set = CPU_ALLOC((topo.max_cpu_num + 1)); 4783 if (cpu_present_set == NULL) 4784 err(3, "CPU_ALLOC"); 4785 cpu_present_setsize = CPU_ALLOC_SIZE((topo.max_cpu_num + 1)); 4786 CPU_ZERO_S(cpu_present_setsize, cpu_present_set); 4787 for_all_proc_cpus(mark_cpu_present); 4788 4789 /* 4790 * Validate that all cpus in cpu_subset are also in cpu_present_set 4791 */ 4792 for (i = 0; i < CPU_SUBSET_MAXCPUS; ++i) { 4793 if (CPU_ISSET_S(i, cpu_subset_size, cpu_subset)) 4794 if (!CPU_ISSET_S(i, cpu_present_setsize, cpu_present_set)) 4795 err(1, "cpu%d not present", i); 4796 } 4797 4798 /* 4799 * Allocate and initialize cpu_affinity_set 4800 */ 4801 cpu_affinity_set = CPU_ALLOC((topo.max_cpu_num + 1)); 4802 if (cpu_affinity_set == NULL) 4803 err(3, "CPU_ALLOC"); 4804 cpu_affinity_setsize = CPU_ALLOC_SIZE((topo.max_cpu_num + 1)); 4805 CPU_ZERO_S(cpu_affinity_setsize, cpu_affinity_set); 4806 4807 for_all_proc_cpus(init_thread_id); 4808 4809 /* 4810 * For online cpus 4811 * find max_core_id, max_package_id 4812 */ 4813 for (i = 0; i <= topo.max_cpu_num; ++i) { 4814 int siblings; 4815 4816 if (cpu_is_not_present(i)) { 4817 if (debug > 1) 4818 fprintf(outf, "cpu%d NOT PRESENT\n", i); 4819 continue; 4820 } 4821 4822 cpus[i].logical_cpu_id = i; 4823 4824 /* get package information */ 4825 cpus[i].physical_package_id = get_physical_package_id(i); 4826 if (cpus[i].physical_package_id > max_package_id) 4827 max_package_id = cpus[i].physical_package_id; 4828 4829 /* get numa node information */ 4830 cpus[i].physical_node_id = get_physical_node_id(&cpus[i]); 4831 if (cpus[i].physical_node_id > topo.max_node_num) 4832 topo.max_node_num = cpus[i].physical_node_id; 4833 4834 /* get core information */ 4835 cpus[i].physical_core_id = get_core_id(i); 4836 if (cpus[i].physical_core_id > max_core_id) 4837 max_core_id = cpus[i].physical_core_id; 4838 4839 /* get thread information */ 4840 siblings = get_thread_siblings(&cpus[i]); 4841 if (siblings > max_siblings) 4842 max_siblings = siblings; 4843 if (cpus[i].thread_id != -1) 4844 topo.num_cores++; 4845 4846 if (debug > 1) 4847 fprintf(outf, 4848 "cpu %d pkg %d node %d core %d thread %d\n", 4849 i, cpus[i].physical_package_id, 4850 cpus[i].physical_node_id, 4851 cpus[i].physical_core_id, 4852 cpus[i].thread_id); 4853 } 4854 4855 topo.cores_per_node = max_core_id + 1; 4856 if (debug > 1) 4857 fprintf(outf, "max_core_id %d, sizing for %d cores per package\n", 4858 max_core_id, topo.cores_per_node); 4859 if (!summary_only && topo.cores_per_node > 1) 4860 BIC_PRESENT(BIC_Core); 4861 4862 topo.num_packages = max_package_id + 1; 4863 if (debug > 1) 4864 fprintf(outf, "max_package_id %d, sizing for %d packages\n", 4865 max_package_id, topo.num_packages); 4866 if (!summary_only && topo.num_packages > 1) 4867 BIC_PRESENT(BIC_Package); 4868 4869 set_node_data(); 4870 if (debug > 1) 4871 fprintf(outf, "nodes_per_pkg %d\n", topo.nodes_per_pkg); 4872 if (!summary_only && topo.nodes_per_pkg > 1) 4873 BIC_PRESENT(BIC_Node); 4874 4875 topo.threads_per_core = max_siblings; 4876 if (debug > 1) 4877 fprintf(outf, "max_siblings %d\n", max_siblings); 4878 } 4879 4880 void 4881 allocate_counters(struct thread_data **t, struct core_data **c, 4882 struct pkg_data **p) 4883 { 4884 int i; 4885 int num_cores = topo.cores_per_node * topo.nodes_per_pkg * 4886 topo.num_packages; 4887 int num_threads = topo.threads_per_core * num_cores; 4888 4889 *t = calloc(num_threads, sizeof(struct thread_data)); 4890 if (*t == NULL) 4891 goto error; 4892 4893 for (i = 0; i < num_threads; i++) 4894 (*t)[i].cpu_id = -1; 4895 4896 *c = calloc(num_cores, sizeof(struct core_data)); 4897 if (*c == NULL) 4898 goto error; 4899 4900 for (i = 0; i < num_cores; i++) 4901 (*c)[i].core_id = -1; 4902 4903 *p = calloc(topo.num_packages, sizeof(struct pkg_data)); 4904 if (*p == NULL) 4905 goto error; 4906 4907 for (i = 0; i < topo.num_packages; i++) 4908 (*p)[i].package_id = i; 4909 4910 return; 4911 error: 4912 err(1, "calloc counters"); 4913 } 4914 /* 4915 * init_counter() 4916 * 4917 * set FIRST_THREAD_IN_CORE and FIRST_CORE_IN_PACKAGE 4918 */ 4919 void init_counter(struct thread_data *thread_base, struct core_data *core_base, 4920 struct pkg_data *pkg_base, int cpu_id) 4921 { 4922 int pkg_id = cpus[cpu_id].physical_package_id; 4923 int node_id = cpus[cpu_id].logical_node_id; 4924 int core_id = cpus[cpu_id].physical_core_id; 4925 int thread_id = cpus[cpu_id].thread_id; 4926 struct thread_data *t; 4927 struct core_data *c; 4928 struct pkg_data *p; 4929 4930 4931 /* Workaround for systems where physical_node_id==-1 4932 * and logical_node_id==(-1 - topo.num_cpus) 4933 */ 4934 if (node_id < 0) 4935 node_id = 0; 4936 4937 t = GET_THREAD(thread_base, thread_id, core_id, node_id, pkg_id); 4938 c = GET_CORE(core_base, core_id, node_id, pkg_id); 4939 p = GET_PKG(pkg_base, pkg_id); 4940 4941 t->cpu_id = cpu_id; 4942 if (thread_id == 0) { 4943 t->flags |= CPU_IS_FIRST_THREAD_IN_CORE; 4944 if (cpu_is_first_core_in_package(cpu_id)) 4945 t->flags |= CPU_IS_FIRST_CORE_IN_PACKAGE; 4946 } 4947 4948 c->core_id = core_id; 4949 p->package_id = pkg_id; 4950 } 4951 4952 4953 int initialize_counters(int cpu_id) 4954 { 4955 init_counter(EVEN_COUNTERS, cpu_id); 4956 init_counter(ODD_COUNTERS, cpu_id); 4957 return 0; 4958 } 4959 4960 void allocate_output_buffer() 4961 { 4962 output_buffer = calloc(1, (1 + topo.num_cpus) * 1024); 4963 outp = output_buffer; 4964 if (outp == NULL) 4965 err(-1, "calloc output buffer"); 4966 } 4967 void allocate_fd_percpu(void) 4968 { 4969 fd_percpu = calloc(topo.max_cpu_num + 1, sizeof(int)); 4970 if (fd_percpu == NULL) 4971 err(-1, "calloc fd_percpu"); 4972 } 4973 void allocate_irq_buffers(void) 4974 { 4975 irq_column_2_cpu = calloc(topo.num_cpus, sizeof(int)); 4976 if (irq_column_2_cpu == NULL) 4977 err(-1, "calloc %d", topo.num_cpus); 4978 4979 irqs_per_cpu = calloc(topo.max_cpu_num + 1, sizeof(int)); 4980 if (irqs_per_cpu == NULL) 4981 err(-1, "calloc %d", topo.max_cpu_num + 1); 4982 } 4983 void setup_all_buffers(void) 4984 { 4985 topology_probe(); 4986 allocate_irq_buffers(); 4987 allocate_fd_percpu(); 4988 allocate_counters(&thread_even, &core_even, &package_even); 4989 allocate_counters(&thread_odd, &core_odd, &package_odd); 4990 allocate_output_buffer(); 4991 for_all_proc_cpus(initialize_counters); 4992 } 4993 4994 void set_base_cpu(void) 4995 { 4996 base_cpu = sched_getcpu(); 4997 if (base_cpu < 0) 4998 err(-ENODEV, "No valid cpus found"); 4999 5000 if (debug > 1) 5001 fprintf(outf, "base_cpu = %d\n", base_cpu); 5002 } 5003 5004 void turbostat_init() 5005 { 5006 setup_all_buffers(); 5007 set_base_cpu(); 5008 check_dev_msr(); 5009 check_permissions(); 5010 process_cpuid(); 5011 5012 5013 if (!quiet) 5014 for_all_cpus(print_hwp, ODD_COUNTERS); 5015 5016 if (!quiet) 5017 for_all_cpus(print_epb, ODD_COUNTERS); 5018 5019 if (!quiet) 5020 for_all_cpus(print_perf_limit, ODD_COUNTERS); 5021 5022 if (!quiet) 5023 for_all_cpus(print_rapl, ODD_COUNTERS); 5024 5025 for_all_cpus(set_temperature_target, ODD_COUNTERS); 5026 5027 if (!quiet) 5028 for_all_cpus(print_thermal, ODD_COUNTERS); 5029 5030 if (!quiet && do_irtl_snb) 5031 print_irtl(); 5032 } 5033 5034 int fork_it(char **argv) 5035 { 5036 pid_t child_pid; 5037 int status; 5038 5039 snapshot_proc_sysfs_files(); 5040 status = for_all_cpus(get_counters, EVEN_COUNTERS); 5041 first_counter_read = 0; 5042 if (status) 5043 exit(status); 5044 /* clear affinity side-effect of get_counters() */ 5045 sched_setaffinity(0, cpu_present_setsize, cpu_present_set); 5046 gettimeofday(&tv_even, (struct timezone *)NULL); 5047 5048 child_pid = fork(); 5049 if (!child_pid) { 5050 /* child */ 5051 execvp(argv[0], argv); 5052 err(errno, "exec %s", argv[0]); 5053 } else { 5054 5055 /* parent */ 5056 if (child_pid == -1) 5057 err(1, "fork"); 5058 5059 signal(SIGINT, SIG_IGN); 5060 signal(SIGQUIT, SIG_IGN); 5061 if (waitpid(child_pid, &status, 0) == -1) 5062 err(status, "waitpid"); 5063 } 5064 /* 5065 * n.b. fork_it() does not check for errors from for_all_cpus() 5066 * because re-starting is problematic when forking 5067 */ 5068 snapshot_proc_sysfs_files(); 5069 for_all_cpus(get_counters, ODD_COUNTERS); 5070 gettimeofday(&tv_odd, (struct timezone *)NULL); 5071 timersub(&tv_odd, &tv_even, &tv_delta); 5072 if (for_all_cpus_2(delta_cpu, ODD_COUNTERS, EVEN_COUNTERS)) 5073 fprintf(outf, "%s: Counter reset detected\n", progname); 5074 else { 5075 compute_average(EVEN_COUNTERS); 5076 format_all_counters(EVEN_COUNTERS); 5077 } 5078 5079 fprintf(outf, "%.6f sec\n", tv_delta.tv_sec + tv_delta.tv_usec/1000000.0); 5080 5081 flush_output_stderr(); 5082 5083 return status; 5084 } 5085 5086 int get_and_dump_counters(void) 5087 { 5088 int status; 5089 5090 snapshot_proc_sysfs_files(); 5091 status = for_all_cpus(get_counters, ODD_COUNTERS); 5092 if (status) 5093 return status; 5094 5095 status = for_all_cpus(dump_counters, ODD_COUNTERS); 5096 if (status) 5097 return status; 5098 5099 flush_output_stdout(); 5100 5101 return status; 5102 } 5103 5104 void print_version() { 5105 fprintf(outf, "turbostat version 18.06.20" 5106 " - Len Brown <lenb@kernel.org>\n"); 5107 } 5108 5109 int add_counter(unsigned int msr_num, char *path, char *name, 5110 unsigned int width, enum counter_scope scope, 5111 enum counter_type type, enum counter_format format, int flags) 5112 { 5113 struct msr_counter *msrp; 5114 5115 msrp = calloc(1, sizeof(struct msr_counter)); 5116 if (msrp == NULL) { 5117 perror("calloc"); 5118 exit(1); 5119 } 5120 5121 msrp->msr_num = msr_num; 5122 strncpy(msrp->name, name, NAME_BYTES); 5123 if (path) 5124 strncpy(msrp->path, path, PATH_BYTES); 5125 msrp->width = width; 5126 msrp->type = type; 5127 msrp->format = format; 5128 msrp->flags = flags; 5129 5130 switch (scope) { 5131 5132 case SCOPE_CPU: 5133 msrp->next = sys.tp; 5134 sys.tp = msrp; 5135 sys.added_thread_counters++; 5136 if (sys.added_thread_counters > MAX_ADDED_THREAD_COUNTERS) { 5137 fprintf(stderr, "exceeded max %d added thread counters\n", 5138 MAX_ADDED_COUNTERS); 5139 exit(-1); 5140 } 5141 break; 5142 5143 case SCOPE_CORE: 5144 msrp->next = sys.cp; 5145 sys.cp = msrp; 5146 sys.added_core_counters++; 5147 if (sys.added_core_counters > MAX_ADDED_COUNTERS) { 5148 fprintf(stderr, "exceeded max %d added core counters\n", 5149 MAX_ADDED_COUNTERS); 5150 exit(-1); 5151 } 5152 break; 5153 5154 case SCOPE_PACKAGE: 5155 msrp->next = sys.pp; 5156 sys.pp = msrp; 5157 sys.added_package_counters++; 5158 if (sys.added_package_counters > MAX_ADDED_COUNTERS) { 5159 fprintf(stderr, "exceeded max %d added package counters\n", 5160 MAX_ADDED_COUNTERS); 5161 exit(-1); 5162 } 5163 break; 5164 } 5165 5166 return 0; 5167 } 5168 5169 void parse_add_command(char *add_command) 5170 { 5171 int msr_num = 0; 5172 char *path = NULL; 5173 char name_buffer[NAME_BYTES] = ""; 5174 int width = 64; 5175 int fail = 0; 5176 enum counter_scope scope = SCOPE_CPU; 5177 enum counter_type type = COUNTER_CYCLES; 5178 enum counter_format format = FORMAT_DELTA; 5179 5180 while (add_command) { 5181 5182 if (sscanf(add_command, "msr0x%x", &msr_num) == 1) 5183 goto next; 5184 5185 if (sscanf(add_command, "msr%d", &msr_num) == 1) 5186 goto next; 5187 5188 if (*add_command == '/') { 5189 path = add_command; 5190 goto next; 5191 } 5192 5193 if (sscanf(add_command, "u%d", &width) == 1) { 5194 if ((width == 32) || (width == 64)) 5195 goto next; 5196 width = 64; 5197 } 5198 if (!strncmp(add_command, "cpu", strlen("cpu"))) { 5199 scope = SCOPE_CPU; 5200 goto next; 5201 } 5202 if (!strncmp(add_command, "core", strlen("core"))) { 5203 scope = SCOPE_CORE; 5204 goto next; 5205 } 5206 if (!strncmp(add_command, "package", strlen("package"))) { 5207 scope = SCOPE_PACKAGE; 5208 goto next; 5209 } 5210 if (!strncmp(add_command, "cycles", strlen("cycles"))) { 5211 type = COUNTER_CYCLES; 5212 goto next; 5213 } 5214 if (!strncmp(add_command, "seconds", strlen("seconds"))) { 5215 type = COUNTER_SECONDS; 5216 goto next; 5217 } 5218 if (!strncmp(add_command, "usec", strlen("usec"))) { 5219 type = COUNTER_USEC; 5220 goto next; 5221 } 5222 if (!strncmp(add_command, "raw", strlen("raw"))) { 5223 format = FORMAT_RAW; 5224 goto next; 5225 } 5226 if (!strncmp(add_command, "delta", strlen("delta"))) { 5227 format = FORMAT_DELTA; 5228 goto next; 5229 } 5230 if (!strncmp(add_command, "percent", strlen("percent"))) { 5231 format = FORMAT_PERCENT; 5232 goto next; 5233 } 5234 5235 if (sscanf(add_command, "%18s,%*s", name_buffer) == 1) { /* 18 < NAME_BYTES */ 5236 char *eos; 5237 5238 eos = strchr(name_buffer, ','); 5239 if (eos) 5240 *eos = '\0'; 5241 goto next; 5242 } 5243 5244 next: 5245 add_command = strchr(add_command, ','); 5246 if (add_command) { 5247 *add_command = '\0'; 5248 add_command++; 5249 } 5250 5251 } 5252 if ((msr_num == 0) && (path == NULL)) { 5253 fprintf(stderr, "--add: (msrDDD | msr0xXXX | /path_to_counter ) required\n"); 5254 fail++; 5255 } 5256 5257 /* generate default column header */ 5258 if (*name_buffer == '\0') { 5259 if (width == 32) 5260 sprintf(name_buffer, "M0x%x%s", msr_num, format == FORMAT_PERCENT ? "%" : ""); 5261 else 5262 sprintf(name_buffer, "M0X%x%s", msr_num, format == FORMAT_PERCENT ? "%" : ""); 5263 } 5264 5265 if (add_counter(msr_num, path, name_buffer, width, scope, type, format, 0)) 5266 fail++; 5267 5268 if (fail) { 5269 help(); 5270 exit(1); 5271 } 5272 } 5273 5274 int is_deferred_skip(char *name) 5275 { 5276 int i; 5277 5278 for (i = 0; i < deferred_skip_index; ++i) 5279 if (!strcmp(name, deferred_skip_names[i])) 5280 return 1; 5281 return 0; 5282 } 5283 5284 void probe_sysfs(void) 5285 { 5286 char path[64]; 5287 char name_buf[16]; 5288 FILE *input; 5289 int state; 5290 char *sp; 5291 5292 if (!DO_BIC(BIC_sysfs)) 5293 return; 5294 5295 for (state = 10; state >= 0; --state) { 5296 5297 sprintf(path, "/sys/devices/system/cpu/cpu%d/cpuidle/state%d/name", 5298 base_cpu, state); 5299 input = fopen(path, "r"); 5300 if (input == NULL) 5301 continue; 5302 fgets(name_buf, sizeof(name_buf), input); 5303 5304 /* truncate "C1-HSW\n" to "C1", or truncate "C1\n" to "C1" */ 5305 sp = strchr(name_buf, '-'); 5306 if (!sp) 5307 sp = strchrnul(name_buf, '\n'); 5308 *sp = '%'; 5309 *(sp + 1) = '\0'; 5310 5311 fclose(input); 5312 5313 sprintf(path, "cpuidle/state%d/time", state); 5314 5315 if (is_deferred_skip(name_buf)) 5316 continue; 5317 5318 add_counter(0, path, name_buf, 64, SCOPE_CPU, COUNTER_USEC, 5319 FORMAT_PERCENT, SYSFS_PERCPU); 5320 } 5321 5322 for (state = 10; state >= 0; --state) { 5323 5324 sprintf(path, "/sys/devices/system/cpu/cpu%d/cpuidle/state%d/name", 5325 base_cpu, state); 5326 input = fopen(path, "r"); 5327 if (input == NULL) 5328 continue; 5329 fgets(name_buf, sizeof(name_buf), input); 5330 /* truncate "C1-HSW\n" to "C1", or truncate "C1\n" to "C1" */ 5331 sp = strchr(name_buf, '-'); 5332 if (!sp) 5333 sp = strchrnul(name_buf, '\n'); 5334 *sp = '\0'; 5335 fclose(input); 5336 5337 sprintf(path, "cpuidle/state%d/usage", state); 5338 5339 if (is_deferred_skip(name_buf)) 5340 continue; 5341 5342 add_counter(0, path, name_buf, 64, SCOPE_CPU, COUNTER_ITEMS, 5343 FORMAT_DELTA, SYSFS_PERCPU); 5344 } 5345 5346 } 5347 5348 5349 /* 5350 * parse cpuset with following syntax 5351 * 1,2,4..6,8-10 and set bits in cpu_subset 5352 */ 5353 void parse_cpu_command(char *optarg) 5354 { 5355 unsigned int start, end; 5356 char *next; 5357 5358 if (!strcmp(optarg, "core")) { 5359 if (cpu_subset) 5360 goto error; 5361 show_core_only++; 5362 return; 5363 } 5364 if (!strcmp(optarg, "package")) { 5365 if (cpu_subset) 5366 goto error; 5367 show_pkg_only++; 5368 return; 5369 } 5370 if (show_core_only || show_pkg_only) 5371 goto error; 5372 5373 cpu_subset = CPU_ALLOC(CPU_SUBSET_MAXCPUS); 5374 if (cpu_subset == NULL) 5375 err(3, "CPU_ALLOC"); 5376 cpu_subset_size = CPU_ALLOC_SIZE(CPU_SUBSET_MAXCPUS); 5377 5378 CPU_ZERO_S(cpu_subset_size, cpu_subset); 5379 5380 next = optarg; 5381 5382 while (next && *next) { 5383 5384 if (*next == '-') /* no negative cpu numbers */ 5385 goto error; 5386 5387 start = strtoul(next, &next, 10); 5388 5389 if (start >= CPU_SUBSET_MAXCPUS) 5390 goto error; 5391 CPU_SET_S(start, cpu_subset_size, cpu_subset); 5392 5393 if (*next == '\0') 5394 break; 5395 5396 if (*next == ',') { 5397 next += 1; 5398 continue; 5399 } 5400 5401 if (*next == '-') { 5402 next += 1; /* start range */ 5403 } else if (*next == '.') { 5404 next += 1; 5405 if (*next == '.') 5406 next += 1; /* start range */ 5407 else 5408 goto error; 5409 } 5410 5411 end = strtoul(next, &next, 10); 5412 if (end <= start) 5413 goto error; 5414 5415 while (++start <= end) { 5416 if (start >= CPU_SUBSET_MAXCPUS) 5417 goto error; 5418 CPU_SET_S(start, cpu_subset_size, cpu_subset); 5419 } 5420 5421 if (*next == ',') 5422 next += 1; 5423 else if (*next != '\0') 5424 goto error; 5425 } 5426 5427 return; 5428 5429 error: 5430 fprintf(stderr, "\"--cpu %s\" malformed\n", optarg); 5431 help(); 5432 exit(-1); 5433 } 5434 5435 5436 void cmdline(int argc, char **argv) 5437 { 5438 int opt; 5439 int option_index = 0; 5440 static struct option long_options[] = { 5441 {"add", required_argument, 0, 'a'}, 5442 {"cpu", required_argument, 0, 'c'}, 5443 {"Dump", no_argument, 0, 'D'}, 5444 {"debug", no_argument, 0, 'd'}, /* internal, not documented */ 5445 {"enable", required_argument, 0, 'e'}, 5446 {"interval", required_argument, 0, 'i'}, 5447 {"num_iterations", required_argument, 0, 'n'}, 5448 {"help", no_argument, 0, 'h'}, 5449 {"hide", required_argument, 0, 'H'}, // meh, -h taken by --help 5450 {"Joules", no_argument, 0, 'J'}, 5451 {"list", no_argument, 0, 'l'}, 5452 {"out", required_argument, 0, 'o'}, 5453 {"quiet", no_argument, 0, 'q'}, 5454 {"show", required_argument, 0, 's'}, 5455 {"Summary", no_argument, 0, 'S'}, 5456 {"TCC", required_argument, 0, 'T'}, 5457 {"version", no_argument, 0, 'v' }, 5458 {0, 0, 0, 0 } 5459 }; 5460 5461 progname = argv[0]; 5462 5463 while ((opt = getopt_long_only(argc, argv, "+C:c:Dde:hi:Jn:o:qST:v", 5464 long_options, &option_index)) != -1) { 5465 switch (opt) { 5466 case 'a': 5467 parse_add_command(optarg); 5468 break; 5469 case 'c': 5470 parse_cpu_command(optarg); 5471 break; 5472 case 'D': 5473 dump_only++; 5474 break; 5475 case 'e': 5476 /* --enable specified counter */ 5477 bic_enabled = bic_enabled | bic_lookup(optarg, SHOW_LIST); 5478 break; 5479 case 'd': 5480 debug++; 5481 ENABLE_BIC(BIC_DISABLED_BY_DEFAULT); 5482 break; 5483 case 'H': 5484 /* 5485 * --hide: do not show those specified 5486 * multiple invocations simply clear more bits in enabled mask 5487 */ 5488 bic_enabled &= ~bic_lookup(optarg, HIDE_LIST); 5489 break; 5490 case 'h': 5491 default: 5492 help(); 5493 exit(1); 5494 case 'i': 5495 { 5496 double interval = strtod(optarg, NULL); 5497 5498 if (interval < 0.001) { 5499 fprintf(outf, "interval %f seconds is too small\n", 5500 interval); 5501 exit(2); 5502 } 5503 5504 interval_tv.tv_sec = interval_ts.tv_sec = interval; 5505 interval_tv.tv_usec = (interval - interval_tv.tv_sec) * 1000000; 5506 interval_ts.tv_nsec = (interval - interval_ts.tv_sec) * 1000000000; 5507 } 5508 break; 5509 case 'J': 5510 rapl_joules++; 5511 break; 5512 case 'l': 5513 ENABLE_BIC(BIC_DISABLED_BY_DEFAULT); 5514 list_header_only++; 5515 quiet++; 5516 break; 5517 case 'o': 5518 outf = fopen_or_die(optarg, "w"); 5519 break; 5520 case 'q': 5521 quiet = 1; 5522 break; 5523 case 'n': 5524 num_iterations = strtod(optarg, NULL); 5525 5526 if (num_iterations <= 0) { 5527 fprintf(outf, "iterations %d should be positive number\n", 5528 num_iterations); 5529 exit(2); 5530 } 5531 break; 5532 case 's': 5533 /* 5534 * --show: show only those specified 5535 * The 1st invocation will clear and replace the enabled mask 5536 * subsequent invocations can add to it. 5537 */ 5538 if (shown == 0) 5539 bic_enabled = bic_lookup(optarg, SHOW_LIST); 5540 else 5541 bic_enabled |= bic_lookup(optarg, SHOW_LIST); 5542 shown = 1; 5543 break; 5544 case 'S': 5545 summary_only++; 5546 break; 5547 case 'T': 5548 tcc_activation_temp_override = atoi(optarg); 5549 break; 5550 case 'v': 5551 print_version(); 5552 exit(0); 5553 break; 5554 } 5555 } 5556 } 5557 5558 int main(int argc, char **argv) 5559 { 5560 outf = stderr; 5561 cmdline(argc, argv); 5562 5563 if (!quiet) 5564 print_version(); 5565 5566 probe_sysfs(); 5567 5568 turbostat_init(); 5569 5570 /* dump counters and exit */ 5571 if (dump_only) 5572 return get_and_dump_counters(); 5573 5574 /* list header and exit */ 5575 if (list_header_only) { 5576 print_header(","); 5577 flush_output_stdout(); 5578 return 0; 5579 } 5580 5581 /* 5582 * if any params left, it must be a command to fork 5583 */ 5584 if (argc - optind) 5585 return fork_it(argv + optind); 5586 else 5587 turbostat_loop(); 5588 5589 return 0; 5590 } 5591