1 /* 2 * Performance events x86 architecture code 3 * 4 * Copyright (C) 2008 Thomas Gleixner <tglx@linutronix.de> 5 * Copyright (C) 2008-2009 Red Hat, Inc., Ingo Molnar 6 * Copyright (C) 2009 Jaswinder Singh Rajput 7 * Copyright (C) 2009 Advanced Micro Devices, Inc., Robert Richter 8 * Copyright (C) 2008-2009 Red Hat, Inc., Peter Zijlstra 9 * Copyright (C) 2009 Intel Corporation, <markus.t.metzger@intel.com> 10 * Copyright (C) 2009 Google, Inc., Stephane Eranian 11 * 12 * For licencing details see kernel-base/COPYING 13 */ 14 15 #include <linux/perf_event.h> 16 #include <linux/capability.h> 17 #include <linux/notifier.h> 18 #include <linux/hardirq.h> 19 #include <linux/kprobes.h> 20 #include <linux/export.h> 21 #include <linux/init.h> 22 #include <linux/kdebug.h> 23 #include <linux/sched/mm.h> 24 #include <linux/sched/clock.h> 25 #include <linux/uaccess.h> 26 #include <linux/slab.h> 27 #include <linux/cpu.h> 28 #include <linux/bitops.h> 29 #include <linux/device.h> 30 #include <linux/nospec.h> 31 #include <linux/static_call.h> 32 33 #include <asm/apic.h> 34 #include <asm/stacktrace.h> 35 #include <asm/nmi.h> 36 #include <asm/smp.h> 37 #include <asm/alternative.h> 38 #include <asm/mmu_context.h> 39 #include <asm/tlbflush.h> 40 #include <asm/timer.h> 41 #include <asm/desc.h> 42 #include <asm/ldt.h> 43 #include <asm/unwind.h> 44 45 #include "perf_event.h" 46 47 struct x86_pmu x86_pmu __read_mostly; 48 static struct pmu pmu; 49 50 DEFINE_PER_CPU(struct cpu_hw_events, cpu_hw_events) = { 51 .enabled = 1, 52 .pmu = &pmu, 53 }; 54 55 DEFINE_STATIC_KEY_FALSE(rdpmc_never_available_key); 56 DEFINE_STATIC_KEY_FALSE(rdpmc_always_available_key); 57 DEFINE_STATIC_KEY_FALSE(perf_is_hybrid); 58 59 /* 60 * This here uses DEFINE_STATIC_CALL_NULL() to get a static_call defined 61 * from just a typename, as opposed to an actual function. 62 */ 63 DEFINE_STATIC_CALL_NULL(x86_pmu_handle_irq, *x86_pmu.handle_irq); 64 DEFINE_STATIC_CALL_NULL(x86_pmu_disable_all, *x86_pmu.disable_all); 65 DEFINE_STATIC_CALL_NULL(x86_pmu_enable_all, *x86_pmu.enable_all); 66 DEFINE_STATIC_CALL_NULL(x86_pmu_enable, *x86_pmu.enable); 67 DEFINE_STATIC_CALL_NULL(x86_pmu_disable, *x86_pmu.disable); 68 69 DEFINE_STATIC_CALL_NULL(x86_pmu_assign, *x86_pmu.assign); 70 71 DEFINE_STATIC_CALL_NULL(x86_pmu_add, *x86_pmu.add); 72 DEFINE_STATIC_CALL_NULL(x86_pmu_del, *x86_pmu.del); 73 DEFINE_STATIC_CALL_NULL(x86_pmu_read, *x86_pmu.read); 74 75 DEFINE_STATIC_CALL_NULL(x86_pmu_schedule_events, *x86_pmu.schedule_events); 76 DEFINE_STATIC_CALL_NULL(x86_pmu_get_event_constraints, *x86_pmu.get_event_constraints); 77 DEFINE_STATIC_CALL_NULL(x86_pmu_put_event_constraints, *x86_pmu.put_event_constraints); 78 79 DEFINE_STATIC_CALL_NULL(x86_pmu_start_scheduling, *x86_pmu.start_scheduling); 80 DEFINE_STATIC_CALL_NULL(x86_pmu_commit_scheduling, *x86_pmu.commit_scheduling); 81 DEFINE_STATIC_CALL_NULL(x86_pmu_stop_scheduling, *x86_pmu.stop_scheduling); 82 83 DEFINE_STATIC_CALL_NULL(x86_pmu_sched_task, *x86_pmu.sched_task); 84 DEFINE_STATIC_CALL_NULL(x86_pmu_swap_task_ctx, *x86_pmu.swap_task_ctx); 85 86 DEFINE_STATIC_CALL_NULL(x86_pmu_drain_pebs, *x86_pmu.drain_pebs); 87 DEFINE_STATIC_CALL_NULL(x86_pmu_pebs_aliases, *x86_pmu.pebs_aliases); 88 89 /* 90 * This one is magic, it will get called even when PMU init fails (because 91 * there is no PMU), in which case it should simply return NULL. 92 */ 93 DEFINE_STATIC_CALL_RET0(x86_pmu_guest_get_msrs, *x86_pmu.guest_get_msrs); 94 95 u64 __read_mostly hw_cache_event_ids 96 [PERF_COUNT_HW_CACHE_MAX] 97 [PERF_COUNT_HW_CACHE_OP_MAX] 98 [PERF_COUNT_HW_CACHE_RESULT_MAX]; 99 u64 __read_mostly hw_cache_extra_regs 100 [PERF_COUNT_HW_CACHE_MAX] 101 [PERF_COUNT_HW_CACHE_OP_MAX] 102 [PERF_COUNT_HW_CACHE_RESULT_MAX]; 103 104 /* 105 * Propagate event elapsed time into the generic event. 106 * Can only be executed on the CPU where the event is active. 107 * Returns the delta events processed. 108 */ 109 u64 x86_perf_event_update(struct perf_event *event) 110 { 111 struct hw_perf_event *hwc = &event->hw; 112 int shift = 64 - x86_pmu.cntval_bits; 113 u64 prev_raw_count, new_raw_count; 114 u64 delta; 115 116 if (unlikely(!hwc->event_base)) 117 return 0; 118 119 if (unlikely(is_topdown_count(event)) && x86_pmu.update_topdown_event) 120 return x86_pmu.update_topdown_event(event); 121 122 /* 123 * Careful: an NMI might modify the previous event value. 124 * 125 * Our tactic to handle this is to first atomically read and 126 * exchange a new raw count - then add that new-prev delta 127 * count to the generic event atomically: 128 */ 129 again: 130 prev_raw_count = local64_read(&hwc->prev_count); 131 rdpmcl(hwc->event_base_rdpmc, new_raw_count); 132 133 if (local64_cmpxchg(&hwc->prev_count, prev_raw_count, 134 new_raw_count) != prev_raw_count) 135 goto again; 136 137 /* 138 * Now we have the new raw value and have updated the prev 139 * timestamp already. We can now calculate the elapsed delta 140 * (event-)time and add that to the generic event. 141 * 142 * Careful, not all hw sign-extends above the physical width 143 * of the count. 144 */ 145 delta = (new_raw_count << shift) - (prev_raw_count << shift); 146 delta >>= shift; 147 148 local64_add(delta, &event->count); 149 local64_sub(delta, &hwc->period_left); 150 151 return new_raw_count; 152 } 153 154 /* 155 * Find and validate any extra registers to set up. 156 */ 157 static int x86_pmu_extra_regs(u64 config, struct perf_event *event) 158 { 159 struct extra_reg *extra_regs = hybrid(event->pmu, extra_regs); 160 struct hw_perf_event_extra *reg; 161 struct extra_reg *er; 162 163 reg = &event->hw.extra_reg; 164 165 if (!extra_regs) 166 return 0; 167 168 for (er = extra_regs; er->msr; er++) { 169 if (er->event != (config & er->config_mask)) 170 continue; 171 if (event->attr.config1 & ~er->valid_mask) 172 return -EINVAL; 173 /* Check if the extra msrs can be safely accessed*/ 174 if (!er->extra_msr_access) 175 return -ENXIO; 176 177 reg->idx = er->idx; 178 reg->config = event->attr.config1; 179 reg->reg = er->msr; 180 break; 181 } 182 return 0; 183 } 184 185 static atomic_t active_events; 186 static atomic_t pmc_refcount; 187 static DEFINE_MUTEX(pmc_reserve_mutex); 188 189 #ifdef CONFIG_X86_LOCAL_APIC 190 191 static inline int get_possible_num_counters(void) 192 { 193 int i, num_counters = x86_pmu.num_counters; 194 195 if (!is_hybrid()) 196 return num_counters; 197 198 for (i = 0; i < x86_pmu.num_hybrid_pmus; i++) 199 num_counters = max_t(int, num_counters, x86_pmu.hybrid_pmu[i].num_counters); 200 201 return num_counters; 202 } 203 204 static bool reserve_pmc_hardware(void) 205 { 206 int i, num_counters = get_possible_num_counters(); 207 208 for (i = 0; i < num_counters; i++) { 209 if (!reserve_perfctr_nmi(x86_pmu_event_addr(i))) 210 goto perfctr_fail; 211 } 212 213 for (i = 0; i < num_counters; i++) { 214 if (!reserve_evntsel_nmi(x86_pmu_config_addr(i))) 215 goto eventsel_fail; 216 } 217 218 return true; 219 220 eventsel_fail: 221 for (i--; i >= 0; i--) 222 release_evntsel_nmi(x86_pmu_config_addr(i)); 223 224 i = num_counters; 225 226 perfctr_fail: 227 for (i--; i >= 0; i--) 228 release_perfctr_nmi(x86_pmu_event_addr(i)); 229 230 return false; 231 } 232 233 static void release_pmc_hardware(void) 234 { 235 int i, num_counters = get_possible_num_counters(); 236 237 for (i = 0; i < num_counters; i++) { 238 release_perfctr_nmi(x86_pmu_event_addr(i)); 239 release_evntsel_nmi(x86_pmu_config_addr(i)); 240 } 241 } 242 243 #else 244 245 static bool reserve_pmc_hardware(void) { return true; } 246 static void release_pmc_hardware(void) {} 247 248 #endif 249 250 bool check_hw_exists(struct pmu *pmu, int num_counters, int num_counters_fixed) 251 { 252 u64 val, val_fail = -1, val_new= ~0; 253 int i, reg, reg_fail = -1, ret = 0; 254 int bios_fail = 0; 255 int reg_safe = -1; 256 257 /* 258 * Check to see if the BIOS enabled any of the counters, if so 259 * complain and bail. 260 */ 261 for (i = 0; i < num_counters; i++) { 262 reg = x86_pmu_config_addr(i); 263 ret = rdmsrl_safe(reg, &val); 264 if (ret) 265 goto msr_fail; 266 if (val & ARCH_PERFMON_EVENTSEL_ENABLE) { 267 bios_fail = 1; 268 val_fail = val; 269 reg_fail = reg; 270 } else { 271 reg_safe = i; 272 } 273 } 274 275 if (num_counters_fixed) { 276 reg = MSR_ARCH_PERFMON_FIXED_CTR_CTRL; 277 ret = rdmsrl_safe(reg, &val); 278 if (ret) 279 goto msr_fail; 280 for (i = 0; i < num_counters_fixed; i++) { 281 if (fixed_counter_disabled(i, pmu)) 282 continue; 283 if (val & (0x03ULL << i*4)) { 284 bios_fail = 1; 285 val_fail = val; 286 reg_fail = reg; 287 } 288 } 289 } 290 291 /* 292 * If all the counters are enabled, the below test will always 293 * fail. The tools will also become useless in this scenario. 294 * Just fail and disable the hardware counters. 295 */ 296 297 if (reg_safe == -1) { 298 reg = reg_safe; 299 goto msr_fail; 300 } 301 302 /* 303 * Read the current value, change it and read it back to see if it 304 * matches, this is needed to detect certain hardware emulators 305 * (qemu/kvm) that don't trap on the MSR access and always return 0s. 306 */ 307 reg = x86_pmu_event_addr(reg_safe); 308 if (rdmsrl_safe(reg, &val)) 309 goto msr_fail; 310 val ^= 0xffffUL; 311 ret = wrmsrl_safe(reg, val); 312 ret |= rdmsrl_safe(reg, &val_new); 313 if (ret || val != val_new) 314 goto msr_fail; 315 316 /* 317 * We still allow the PMU driver to operate: 318 */ 319 if (bios_fail) { 320 pr_cont("Broken BIOS detected, complain to your hardware vendor.\n"); 321 pr_err(FW_BUG "the BIOS has corrupted hw-PMU resources (MSR %x is %Lx)\n", 322 reg_fail, val_fail); 323 } 324 325 return true; 326 327 msr_fail: 328 if (boot_cpu_has(X86_FEATURE_HYPERVISOR)) { 329 pr_cont("PMU not available due to virtualization, using software events only.\n"); 330 } else { 331 pr_cont("Broken PMU hardware detected, using software events only.\n"); 332 pr_err("Failed to access perfctr msr (MSR %x is %Lx)\n", 333 reg, val_new); 334 } 335 336 return false; 337 } 338 339 static void hw_perf_event_destroy(struct perf_event *event) 340 { 341 x86_release_hardware(); 342 atomic_dec(&active_events); 343 } 344 345 void hw_perf_lbr_event_destroy(struct perf_event *event) 346 { 347 hw_perf_event_destroy(event); 348 349 /* undo the lbr/bts event accounting */ 350 x86_del_exclusive(x86_lbr_exclusive_lbr); 351 } 352 353 static inline int x86_pmu_initialized(void) 354 { 355 return x86_pmu.handle_irq != NULL; 356 } 357 358 static inline int 359 set_ext_hw_attr(struct hw_perf_event *hwc, struct perf_event *event) 360 { 361 struct perf_event_attr *attr = &event->attr; 362 unsigned int cache_type, cache_op, cache_result; 363 u64 config, val; 364 365 config = attr->config; 366 367 cache_type = (config >> 0) & 0xff; 368 if (cache_type >= PERF_COUNT_HW_CACHE_MAX) 369 return -EINVAL; 370 cache_type = array_index_nospec(cache_type, PERF_COUNT_HW_CACHE_MAX); 371 372 cache_op = (config >> 8) & 0xff; 373 if (cache_op >= PERF_COUNT_HW_CACHE_OP_MAX) 374 return -EINVAL; 375 cache_op = array_index_nospec(cache_op, PERF_COUNT_HW_CACHE_OP_MAX); 376 377 cache_result = (config >> 16) & 0xff; 378 if (cache_result >= PERF_COUNT_HW_CACHE_RESULT_MAX) 379 return -EINVAL; 380 cache_result = array_index_nospec(cache_result, PERF_COUNT_HW_CACHE_RESULT_MAX); 381 382 val = hybrid_var(event->pmu, hw_cache_event_ids)[cache_type][cache_op][cache_result]; 383 if (val == 0) 384 return -ENOENT; 385 386 if (val == -1) 387 return -EINVAL; 388 389 hwc->config |= val; 390 attr->config1 = hybrid_var(event->pmu, hw_cache_extra_regs)[cache_type][cache_op][cache_result]; 391 return x86_pmu_extra_regs(val, event); 392 } 393 394 int x86_reserve_hardware(void) 395 { 396 int err = 0; 397 398 if (!atomic_inc_not_zero(&pmc_refcount)) { 399 mutex_lock(&pmc_reserve_mutex); 400 if (atomic_read(&pmc_refcount) == 0) { 401 if (!reserve_pmc_hardware()) { 402 err = -EBUSY; 403 } else { 404 reserve_ds_buffers(); 405 reserve_lbr_buffers(); 406 } 407 } 408 if (!err) 409 atomic_inc(&pmc_refcount); 410 mutex_unlock(&pmc_reserve_mutex); 411 } 412 413 return err; 414 } 415 416 void x86_release_hardware(void) 417 { 418 if (atomic_dec_and_mutex_lock(&pmc_refcount, &pmc_reserve_mutex)) { 419 release_pmc_hardware(); 420 release_ds_buffers(); 421 release_lbr_buffers(); 422 mutex_unlock(&pmc_reserve_mutex); 423 } 424 } 425 426 /* 427 * Check if we can create event of a certain type (that no conflicting events 428 * are present). 429 */ 430 int x86_add_exclusive(unsigned int what) 431 { 432 int i; 433 434 /* 435 * When lbr_pt_coexist we allow PT to coexist with either LBR or BTS. 436 * LBR and BTS are still mutually exclusive. 437 */ 438 if (x86_pmu.lbr_pt_coexist && what == x86_lbr_exclusive_pt) 439 goto out; 440 441 if (!atomic_inc_not_zero(&x86_pmu.lbr_exclusive[what])) { 442 mutex_lock(&pmc_reserve_mutex); 443 for (i = 0; i < ARRAY_SIZE(x86_pmu.lbr_exclusive); i++) { 444 if (i != what && atomic_read(&x86_pmu.lbr_exclusive[i])) 445 goto fail_unlock; 446 } 447 atomic_inc(&x86_pmu.lbr_exclusive[what]); 448 mutex_unlock(&pmc_reserve_mutex); 449 } 450 451 out: 452 atomic_inc(&active_events); 453 return 0; 454 455 fail_unlock: 456 mutex_unlock(&pmc_reserve_mutex); 457 return -EBUSY; 458 } 459 460 void x86_del_exclusive(unsigned int what) 461 { 462 atomic_dec(&active_events); 463 464 /* 465 * See the comment in x86_add_exclusive(). 466 */ 467 if (x86_pmu.lbr_pt_coexist && what == x86_lbr_exclusive_pt) 468 return; 469 470 atomic_dec(&x86_pmu.lbr_exclusive[what]); 471 } 472 473 int x86_setup_perfctr(struct perf_event *event) 474 { 475 struct perf_event_attr *attr = &event->attr; 476 struct hw_perf_event *hwc = &event->hw; 477 u64 config; 478 479 if (!is_sampling_event(event)) { 480 hwc->sample_period = x86_pmu.max_period; 481 hwc->last_period = hwc->sample_period; 482 local64_set(&hwc->period_left, hwc->sample_period); 483 } 484 485 if (attr->type == event->pmu->type) 486 return x86_pmu_extra_regs(event->attr.config, event); 487 488 if (attr->type == PERF_TYPE_HW_CACHE) 489 return set_ext_hw_attr(hwc, event); 490 491 if (attr->config >= x86_pmu.max_events) 492 return -EINVAL; 493 494 attr->config = array_index_nospec((unsigned long)attr->config, x86_pmu.max_events); 495 496 /* 497 * The generic map: 498 */ 499 config = x86_pmu.event_map(attr->config); 500 501 if (config == 0) 502 return -ENOENT; 503 504 if (config == -1LL) 505 return -EINVAL; 506 507 hwc->config |= config; 508 509 return 0; 510 } 511 512 /* 513 * check that branch_sample_type is compatible with 514 * settings needed for precise_ip > 1 which implies 515 * using the LBR to capture ALL taken branches at the 516 * priv levels of the measurement 517 */ 518 static inline int precise_br_compat(struct perf_event *event) 519 { 520 u64 m = event->attr.branch_sample_type; 521 u64 b = 0; 522 523 /* must capture all branches */ 524 if (!(m & PERF_SAMPLE_BRANCH_ANY)) 525 return 0; 526 527 m &= PERF_SAMPLE_BRANCH_KERNEL | PERF_SAMPLE_BRANCH_USER; 528 529 if (!event->attr.exclude_user) 530 b |= PERF_SAMPLE_BRANCH_USER; 531 532 if (!event->attr.exclude_kernel) 533 b |= PERF_SAMPLE_BRANCH_KERNEL; 534 535 /* 536 * ignore PERF_SAMPLE_BRANCH_HV, not supported on x86 537 */ 538 539 return m == b; 540 } 541 542 int x86_pmu_max_precise(void) 543 { 544 int precise = 0; 545 546 /* Support for constant skid */ 547 if (x86_pmu.pebs_active && !x86_pmu.pebs_broken) { 548 precise++; 549 550 /* Support for IP fixup */ 551 if (x86_pmu.lbr_nr || x86_pmu.intel_cap.pebs_format >= 2) 552 precise++; 553 554 if (x86_pmu.pebs_prec_dist) 555 precise++; 556 } 557 return precise; 558 } 559 560 int x86_pmu_hw_config(struct perf_event *event) 561 { 562 if (event->attr.precise_ip) { 563 int precise = x86_pmu_max_precise(); 564 565 if (event->attr.precise_ip > precise) 566 return -EOPNOTSUPP; 567 568 /* There's no sense in having PEBS for non sampling events: */ 569 if (!is_sampling_event(event)) 570 return -EINVAL; 571 } 572 /* 573 * check that PEBS LBR correction does not conflict with 574 * whatever the user is asking with attr->branch_sample_type 575 */ 576 if (event->attr.precise_ip > 1 && x86_pmu.intel_cap.pebs_format < 2) { 577 u64 *br_type = &event->attr.branch_sample_type; 578 579 if (has_branch_stack(event)) { 580 if (!precise_br_compat(event)) 581 return -EOPNOTSUPP; 582 583 /* branch_sample_type is compatible */ 584 585 } else { 586 /* 587 * user did not specify branch_sample_type 588 * 589 * For PEBS fixups, we capture all 590 * the branches at the priv level of the 591 * event. 592 */ 593 *br_type = PERF_SAMPLE_BRANCH_ANY; 594 595 if (!event->attr.exclude_user) 596 *br_type |= PERF_SAMPLE_BRANCH_USER; 597 598 if (!event->attr.exclude_kernel) 599 *br_type |= PERF_SAMPLE_BRANCH_KERNEL; 600 } 601 } 602 603 if (event->attr.branch_sample_type & PERF_SAMPLE_BRANCH_CALL_STACK) 604 event->attach_state |= PERF_ATTACH_TASK_DATA; 605 606 /* 607 * Generate PMC IRQs: 608 * (keep 'enabled' bit clear for now) 609 */ 610 event->hw.config = ARCH_PERFMON_EVENTSEL_INT; 611 612 /* 613 * Count user and OS events unless requested not to 614 */ 615 if (!event->attr.exclude_user) 616 event->hw.config |= ARCH_PERFMON_EVENTSEL_USR; 617 if (!event->attr.exclude_kernel) 618 event->hw.config |= ARCH_PERFMON_EVENTSEL_OS; 619 620 if (event->attr.type == event->pmu->type) 621 event->hw.config |= event->attr.config & X86_RAW_EVENT_MASK; 622 623 if (event->attr.sample_period && x86_pmu.limit_period) { 624 if (x86_pmu.limit_period(event, event->attr.sample_period) > 625 event->attr.sample_period) 626 return -EINVAL; 627 } 628 629 /* sample_regs_user never support XMM registers */ 630 if (unlikely(event->attr.sample_regs_user & PERF_REG_EXTENDED_MASK)) 631 return -EINVAL; 632 /* 633 * Besides the general purpose registers, XMM registers may 634 * be collected in PEBS on some platforms, e.g. Icelake 635 */ 636 if (unlikely(event->attr.sample_regs_intr & PERF_REG_EXTENDED_MASK)) { 637 if (!(event->pmu->capabilities & PERF_PMU_CAP_EXTENDED_REGS)) 638 return -EINVAL; 639 640 if (!event->attr.precise_ip) 641 return -EINVAL; 642 } 643 644 return x86_setup_perfctr(event); 645 } 646 647 /* 648 * Setup the hardware configuration for a given attr_type 649 */ 650 static int __x86_pmu_event_init(struct perf_event *event) 651 { 652 int err; 653 654 if (!x86_pmu_initialized()) 655 return -ENODEV; 656 657 err = x86_reserve_hardware(); 658 if (err) 659 return err; 660 661 atomic_inc(&active_events); 662 event->destroy = hw_perf_event_destroy; 663 664 event->hw.idx = -1; 665 event->hw.last_cpu = -1; 666 event->hw.last_tag = ~0ULL; 667 668 /* mark unused */ 669 event->hw.extra_reg.idx = EXTRA_REG_NONE; 670 event->hw.branch_reg.idx = EXTRA_REG_NONE; 671 672 return x86_pmu.hw_config(event); 673 } 674 675 void x86_pmu_disable_all(void) 676 { 677 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); 678 int idx; 679 680 for (idx = 0; idx < x86_pmu.num_counters; idx++) { 681 struct hw_perf_event *hwc = &cpuc->events[idx]->hw; 682 u64 val; 683 684 if (!test_bit(idx, cpuc->active_mask)) 685 continue; 686 rdmsrl(x86_pmu_config_addr(idx), val); 687 if (!(val & ARCH_PERFMON_EVENTSEL_ENABLE)) 688 continue; 689 val &= ~ARCH_PERFMON_EVENTSEL_ENABLE; 690 wrmsrl(x86_pmu_config_addr(idx), val); 691 if (is_counter_pair(hwc)) 692 wrmsrl(x86_pmu_config_addr(idx + 1), 0); 693 } 694 } 695 696 struct perf_guest_switch_msr *perf_guest_get_msrs(int *nr) 697 { 698 return static_call(x86_pmu_guest_get_msrs)(nr); 699 } 700 EXPORT_SYMBOL_GPL(perf_guest_get_msrs); 701 702 /* 703 * There may be PMI landing after enabled=0. The PMI hitting could be before or 704 * after disable_all. 705 * 706 * If PMI hits before disable_all, the PMU will be disabled in the NMI handler. 707 * It will not be re-enabled in the NMI handler again, because enabled=0. After 708 * handling the NMI, disable_all will be called, which will not change the 709 * state either. If PMI hits after disable_all, the PMU is already disabled 710 * before entering NMI handler. The NMI handler will not change the state 711 * either. 712 * 713 * So either situation is harmless. 714 */ 715 static void x86_pmu_disable(struct pmu *pmu) 716 { 717 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); 718 719 if (!x86_pmu_initialized()) 720 return; 721 722 if (!cpuc->enabled) 723 return; 724 725 cpuc->n_added = 0; 726 cpuc->enabled = 0; 727 barrier(); 728 729 static_call(x86_pmu_disable_all)(); 730 } 731 732 void x86_pmu_enable_all(int added) 733 { 734 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); 735 int idx; 736 737 for (idx = 0; idx < x86_pmu.num_counters; idx++) { 738 struct hw_perf_event *hwc = &cpuc->events[idx]->hw; 739 740 if (!test_bit(idx, cpuc->active_mask)) 741 continue; 742 743 __x86_pmu_enable_event(hwc, ARCH_PERFMON_EVENTSEL_ENABLE); 744 } 745 } 746 747 static inline int is_x86_event(struct perf_event *event) 748 { 749 int i; 750 751 if (!is_hybrid()) 752 return event->pmu == &pmu; 753 754 for (i = 0; i < x86_pmu.num_hybrid_pmus; i++) { 755 if (event->pmu == &x86_pmu.hybrid_pmu[i].pmu) 756 return true; 757 } 758 759 return false; 760 } 761 762 struct pmu *x86_get_pmu(unsigned int cpu) 763 { 764 struct cpu_hw_events *cpuc = &per_cpu(cpu_hw_events, cpu); 765 766 /* 767 * All CPUs of the hybrid type have been offline. 768 * The x86_get_pmu() should not be invoked. 769 */ 770 if (WARN_ON_ONCE(!cpuc->pmu)) 771 return &pmu; 772 773 return cpuc->pmu; 774 } 775 /* 776 * Event scheduler state: 777 * 778 * Assign events iterating over all events and counters, beginning 779 * with events with least weights first. Keep the current iterator 780 * state in struct sched_state. 781 */ 782 struct sched_state { 783 int weight; 784 int event; /* event index */ 785 int counter; /* counter index */ 786 int unassigned; /* number of events to be assigned left */ 787 int nr_gp; /* number of GP counters used */ 788 u64 used; 789 }; 790 791 /* Total max is X86_PMC_IDX_MAX, but we are O(n!) limited */ 792 #define SCHED_STATES_MAX 2 793 794 struct perf_sched { 795 int max_weight; 796 int max_events; 797 int max_gp; 798 int saved_states; 799 struct event_constraint **constraints; 800 struct sched_state state; 801 struct sched_state saved[SCHED_STATES_MAX]; 802 }; 803 804 /* 805 * Initialize iterator that runs through all events and counters. 806 */ 807 static void perf_sched_init(struct perf_sched *sched, struct event_constraint **constraints, 808 int num, int wmin, int wmax, int gpmax) 809 { 810 int idx; 811 812 memset(sched, 0, sizeof(*sched)); 813 sched->max_events = num; 814 sched->max_weight = wmax; 815 sched->max_gp = gpmax; 816 sched->constraints = constraints; 817 818 for (idx = 0; idx < num; idx++) { 819 if (constraints[idx]->weight == wmin) 820 break; 821 } 822 823 sched->state.event = idx; /* start with min weight */ 824 sched->state.weight = wmin; 825 sched->state.unassigned = num; 826 } 827 828 static void perf_sched_save_state(struct perf_sched *sched) 829 { 830 if (WARN_ON_ONCE(sched->saved_states >= SCHED_STATES_MAX)) 831 return; 832 833 sched->saved[sched->saved_states] = sched->state; 834 sched->saved_states++; 835 } 836 837 static bool perf_sched_restore_state(struct perf_sched *sched) 838 { 839 if (!sched->saved_states) 840 return false; 841 842 sched->saved_states--; 843 sched->state = sched->saved[sched->saved_states]; 844 845 /* this assignment didn't work out */ 846 /* XXX broken vs EVENT_PAIR */ 847 sched->state.used &= ~BIT_ULL(sched->state.counter); 848 849 /* try the next one */ 850 sched->state.counter++; 851 852 return true; 853 } 854 855 /* 856 * Select a counter for the current event to schedule. Return true on 857 * success. 858 */ 859 static bool __perf_sched_find_counter(struct perf_sched *sched) 860 { 861 struct event_constraint *c; 862 int idx; 863 864 if (!sched->state.unassigned) 865 return false; 866 867 if (sched->state.event >= sched->max_events) 868 return false; 869 870 c = sched->constraints[sched->state.event]; 871 /* Prefer fixed purpose counters */ 872 if (c->idxmsk64 & (~0ULL << INTEL_PMC_IDX_FIXED)) { 873 idx = INTEL_PMC_IDX_FIXED; 874 for_each_set_bit_from(idx, c->idxmsk, X86_PMC_IDX_MAX) { 875 u64 mask = BIT_ULL(idx); 876 877 if (sched->state.used & mask) 878 continue; 879 880 sched->state.used |= mask; 881 goto done; 882 } 883 } 884 885 /* Grab the first unused counter starting with idx */ 886 idx = sched->state.counter; 887 for_each_set_bit_from(idx, c->idxmsk, INTEL_PMC_IDX_FIXED) { 888 u64 mask = BIT_ULL(idx); 889 890 if (c->flags & PERF_X86_EVENT_PAIR) 891 mask |= mask << 1; 892 893 if (sched->state.used & mask) 894 continue; 895 896 if (sched->state.nr_gp++ >= sched->max_gp) 897 return false; 898 899 sched->state.used |= mask; 900 goto done; 901 } 902 903 return false; 904 905 done: 906 sched->state.counter = idx; 907 908 if (c->overlap) 909 perf_sched_save_state(sched); 910 911 return true; 912 } 913 914 static bool perf_sched_find_counter(struct perf_sched *sched) 915 { 916 while (!__perf_sched_find_counter(sched)) { 917 if (!perf_sched_restore_state(sched)) 918 return false; 919 } 920 921 return true; 922 } 923 924 /* 925 * Go through all unassigned events and find the next one to schedule. 926 * Take events with the least weight first. Return true on success. 927 */ 928 static bool perf_sched_next_event(struct perf_sched *sched) 929 { 930 struct event_constraint *c; 931 932 if (!sched->state.unassigned || !--sched->state.unassigned) 933 return false; 934 935 do { 936 /* next event */ 937 sched->state.event++; 938 if (sched->state.event >= sched->max_events) { 939 /* next weight */ 940 sched->state.event = 0; 941 sched->state.weight++; 942 if (sched->state.weight > sched->max_weight) 943 return false; 944 } 945 c = sched->constraints[sched->state.event]; 946 } while (c->weight != sched->state.weight); 947 948 sched->state.counter = 0; /* start with first counter */ 949 950 return true; 951 } 952 953 /* 954 * Assign a counter for each event. 955 */ 956 int perf_assign_events(struct event_constraint **constraints, int n, 957 int wmin, int wmax, int gpmax, int *assign) 958 { 959 struct perf_sched sched; 960 961 perf_sched_init(&sched, constraints, n, wmin, wmax, gpmax); 962 963 do { 964 if (!perf_sched_find_counter(&sched)) 965 break; /* failed */ 966 if (assign) 967 assign[sched.state.event] = sched.state.counter; 968 } while (perf_sched_next_event(&sched)); 969 970 return sched.state.unassigned; 971 } 972 EXPORT_SYMBOL_GPL(perf_assign_events); 973 974 int x86_schedule_events(struct cpu_hw_events *cpuc, int n, int *assign) 975 { 976 int num_counters = hybrid(cpuc->pmu, num_counters); 977 struct event_constraint *c; 978 struct perf_event *e; 979 int n0, i, wmin, wmax, unsched = 0; 980 struct hw_perf_event *hwc; 981 u64 used_mask = 0; 982 983 /* 984 * Compute the number of events already present; see x86_pmu_add(), 985 * validate_group() and x86_pmu_commit_txn(). For the former two 986 * cpuc->n_events hasn't been updated yet, while for the latter 987 * cpuc->n_txn contains the number of events added in the current 988 * transaction. 989 */ 990 n0 = cpuc->n_events; 991 if (cpuc->txn_flags & PERF_PMU_TXN_ADD) 992 n0 -= cpuc->n_txn; 993 994 static_call_cond(x86_pmu_start_scheduling)(cpuc); 995 996 for (i = 0, wmin = X86_PMC_IDX_MAX, wmax = 0; i < n; i++) { 997 c = cpuc->event_constraint[i]; 998 999 /* 1000 * Previously scheduled events should have a cached constraint, 1001 * while new events should not have one. 1002 */ 1003 WARN_ON_ONCE((c && i >= n0) || (!c && i < n0)); 1004 1005 /* 1006 * Request constraints for new events; or for those events that 1007 * have a dynamic constraint -- for those the constraint can 1008 * change due to external factors (sibling state, allow_tfa). 1009 */ 1010 if (!c || (c->flags & PERF_X86_EVENT_DYNAMIC)) { 1011 c = static_call(x86_pmu_get_event_constraints)(cpuc, i, cpuc->event_list[i]); 1012 cpuc->event_constraint[i] = c; 1013 } 1014 1015 wmin = min(wmin, c->weight); 1016 wmax = max(wmax, c->weight); 1017 } 1018 1019 /* 1020 * fastpath, try to reuse previous register 1021 */ 1022 for (i = 0; i < n; i++) { 1023 u64 mask; 1024 1025 hwc = &cpuc->event_list[i]->hw; 1026 c = cpuc->event_constraint[i]; 1027 1028 /* never assigned */ 1029 if (hwc->idx == -1) 1030 break; 1031 1032 /* constraint still honored */ 1033 if (!test_bit(hwc->idx, c->idxmsk)) 1034 break; 1035 1036 mask = BIT_ULL(hwc->idx); 1037 if (is_counter_pair(hwc)) 1038 mask |= mask << 1; 1039 1040 /* not already used */ 1041 if (used_mask & mask) 1042 break; 1043 1044 used_mask |= mask; 1045 1046 if (assign) 1047 assign[i] = hwc->idx; 1048 } 1049 1050 /* slow path */ 1051 if (i != n) { 1052 int gpmax = num_counters; 1053 1054 /* 1055 * Do not allow scheduling of more than half the available 1056 * generic counters. 1057 * 1058 * This helps avoid counter starvation of sibling thread by 1059 * ensuring at most half the counters cannot be in exclusive 1060 * mode. There is no designated counters for the limits. Any 1061 * N/2 counters can be used. This helps with events with 1062 * specific counter constraints. 1063 */ 1064 if (is_ht_workaround_enabled() && !cpuc->is_fake && 1065 READ_ONCE(cpuc->excl_cntrs->exclusive_present)) 1066 gpmax /= 2; 1067 1068 /* 1069 * Reduce the amount of available counters to allow fitting 1070 * the extra Merge events needed by large increment events. 1071 */ 1072 if (x86_pmu.flags & PMU_FL_PAIR) { 1073 gpmax = num_counters - cpuc->n_pair; 1074 WARN_ON(gpmax <= 0); 1075 } 1076 1077 unsched = perf_assign_events(cpuc->event_constraint, n, wmin, 1078 wmax, gpmax, assign); 1079 } 1080 1081 /* 1082 * In case of success (unsched = 0), mark events as committed, 1083 * so we do not put_constraint() in case new events are added 1084 * and fail to be scheduled 1085 * 1086 * We invoke the lower level commit callback to lock the resource 1087 * 1088 * We do not need to do all of this in case we are called to 1089 * validate an event group (assign == NULL) 1090 */ 1091 if (!unsched && assign) { 1092 for (i = 0; i < n; i++) 1093 static_call_cond(x86_pmu_commit_scheduling)(cpuc, i, assign[i]); 1094 } else { 1095 for (i = n0; i < n; i++) { 1096 e = cpuc->event_list[i]; 1097 1098 /* 1099 * release events that failed scheduling 1100 */ 1101 static_call_cond(x86_pmu_put_event_constraints)(cpuc, e); 1102 1103 cpuc->event_constraint[i] = NULL; 1104 } 1105 } 1106 1107 static_call_cond(x86_pmu_stop_scheduling)(cpuc); 1108 1109 return unsched ? -EINVAL : 0; 1110 } 1111 1112 static int add_nr_metric_event(struct cpu_hw_events *cpuc, 1113 struct perf_event *event) 1114 { 1115 if (is_metric_event(event)) { 1116 if (cpuc->n_metric == INTEL_TD_METRIC_NUM) 1117 return -EINVAL; 1118 cpuc->n_metric++; 1119 cpuc->n_txn_metric++; 1120 } 1121 1122 return 0; 1123 } 1124 1125 static void del_nr_metric_event(struct cpu_hw_events *cpuc, 1126 struct perf_event *event) 1127 { 1128 if (is_metric_event(event)) 1129 cpuc->n_metric--; 1130 } 1131 1132 static int collect_event(struct cpu_hw_events *cpuc, struct perf_event *event, 1133 int max_count, int n) 1134 { 1135 union perf_capabilities intel_cap = hybrid(cpuc->pmu, intel_cap); 1136 1137 if (intel_cap.perf_metrics && add_nr_metric_event(cpuc, event)) 1138 return -EINVAL; 1139 1140 if (n >= max_count + cpuc->n_metric) 1141 return -EINVAL; 1142 1143 cpuc->event_list[n] = event; 1144 if (is_counter_pair(&event->hw)) { 1145 cpuc->n_pair++; 1146 cpuc->n_txn_pair++; 1147 } 1148 1149 return 0; 1150 } 1151 1152 /* 1153 * dogrp: true if must collect siblings events (group) 1154 * returns total number of events and error code 1155 */ 1156 static int collect_events(struct cpu_hw_events *cpuc, struct perf_event *leader, bool dogrp) 1157 { 1158 int num_counters = hybrid(cpuc->pmu, num_counters); 1159 int num_counters_fixed = hybrid(cpuc->pmu, num_counters_fixed); 1160 struct perf_event *event; 1161 int n, max_count; 1162 1163 max_count = num_counters + num_counters_fixed; 1164 1165 /* current number of events already accepted */ 1166 n = cpuc->n_events; 1167 if (!cpuc->n_events) 1168 cpuc->pebs_output = 0; 1169 1170 if (!cpuc->is_fake && leader->attr.precise_ip) { 1171 /* 1172 * For PEBS->PT, if !aux_event, the group leader (PT) went 1173 * away, the group was broken down and this singleton event 1174 * can't schedule any more. 1175 */ 1176 if (is_pebs_pt(leader) && !leader->aux_event) 1177 return -EINVAL; 1178 1179 /* 1180 * pebs_output: 0: no PEBS so far, 1: PT, 2: DS 1181 */ 1182 if (cpuc->pebs_output && 1183 cpuc->pebs_output != is_pebs_pt(leader) + 1) 1184 return -EINVAL; 1185 1186 cpuc->pebs_output = is_pebs_pt(leader) + 1; 1187 } 1188 1189 if (is_x86_event(leader)) { 1190 if (collect_event(cpuc, leader, max_count, n)) 1191 return -EINVAL; 1192 n++; 1193 } 1194 1195 if (!dogrp) 1196 return n; 1197 1198 for_each_sibling_event(event, leader) { 1199 if (!is_x86_event(event) || event->state <= PERF_EVENT_STATE_OFF) 1200 continue; 1201 1202 if (collect_event(cpuc, event, max_count, n)) 1203 return -EINVAL; 1204 1205 n++; 1206 } 1207 return n; 1208 } 1209 1210 static inline void x86_assign_hw_event(struct perf_event *event, 1211 struct cpu_hw_events *cpuc, int i) 1212 { 1213 struct hw_perf_event *hwc = &event->hw; 1214 int idx; 1215 1216 idx = hwc->idx = cpuc->assign[i]; 1217 hwc->last_cpu = smp_processor_id(); 1218 hwc->last_tag = ++cpuc->tags[i]; 1219 1220 static_call_cond(x86_pmu_assign)(event, idx); 1221 1222 switch (hwc->idx) { 1223 case INTEL_PMC_IDX_FIXED_BTS: 1224 case INTEL_PMC_IDX_FIXED_VLBR: 1225 hwc->config_base = 0; 1226 hwc->event_base = 0; 1227 break; 1228 1229 case INTEL_PMC_IDX_METRIC_BASE ... INTEL_PMC_IDX_METRIC_END: 1230 /* All the metric events are mapped onto the fixed counter 3. */ 1231 idx = INTEL_PMC_IDX_FIXED_SLOTS; 1232 fallthrough; 1233 case INTEL_PMC_IDX_FIXED ... INTEL_PMC_IDX_FIXED_BTS-1: 1234 hwc->config_base = MSR_ARCH_PERFMON_FIXED_CTR_CTRL; 1235 hwc->event_base = MSR_ARCH_PERFMON_FIXED_CTR0 + 1236 (idx - INTEL_PMC_IDX_FIXED); 1237 hwc->event_base_rdpmc = (idx - INTEL_PMC_IDX_FIXED) | 1238 INTEL_PMC_FIXED_RDPMC_BASE; 1239 break; 1240 1241 default: 1242 hwc->config_base = x86_pmu_config_addr(hwc->idx); 1243 hwc->event_base = x86_pmu_event_addr(hwc->idx); 1244 hwc->event_base_rdpmc = x86_pmu_rdpmc_index(hwc->idx); 1245 break; 1246 } 1247 } 1248 1249 /** 1250 * x86_perf_rdpmc_index - Return PMC counter used for event 1251 * @event: the perf_event to which the PMC counter was assigned 1252 * 1253 * The counter assigned to this performance event may change if interrupts 1254 * are enabled. This counter should thus never be used while interrupts are 1255 * enabled. Before this function is used to obtain the assigned counter the 1256 * event should be checked for validity using, for example, 1257 * perf_event_read_local(), within the same interrupt disabled section in 1258 * which this counter is planned to be used. 1259 * 1260 * Return: The index of the performance monitoring counter assigned to 1261 * @perf_event. 1262 */ 1263 int x86_perf_rdpmc_index(struct perf_event *event) 1264 { 1265 lockdep_assert_irqs_disabled(); 1266 1267 return event->hw.event_base_rdpmc; 1268 } 1269 1270 static inline int match_prev_assignment(struct hw_perf_event *hwc, 1271 struct cpu_hw_events *cpuc, 1272 int i) 1273 { 1274 return hwc->idx == cpuc->assign[i] && 1275 hwc->last_cpu == smp_processor_id() && 1276 hwc->last_tag == cpuc->tags[i]; 1277 } 1278 1279 static void x86_pmu_start(struct perf_event *event, int flags); 1280 1281 static void x86_pmu_enable(struct pmu *pmu) 1282 { 1283 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); 1284 struct perf_event *event; 1285 struct hw_perf_event *hwc; 1286 int i, added = cpuc->n_added; 1287 1288 if (!x86_pmu_initialized()) 1289 return; 1290 1291 if (cpuc->enabled) 1292 return; 1293 1294 if (cpuc->n_added) { 1295 int n_running = cpuc->n_events - cpuc->n_added; 1296 /* 1297 * apply assignment obtained either from 1298 * hw_perf_group_sched_in() or x86_pmu_enable() 1299 * 1300 * step1: save events moving to new counters 1301 */ 1302 for (i = 0; i < n_running; i++) { 1303 event = cpuc->event_list[i]; 1304 hwc = &event->hw; 1305 1306 /* 1307 * we can avoid reprogramming counter if: 1308 * - assigned same counter as last time 1309 * - running on same CPU as last time 1310 * - no other event has used the counter since 1311 */ 1312 if (hwc->idx == -1 || 1313 match_prev_assignment(hwc, cpuc, i)) 1314 continue; 1315 1316 /* 1317 * Ensure we don't accidentally enable a stopped 1318 * counter simply because we rescheduled. 1319 */ 1320 if (hwc->state & PERF_HES_STOPPED) 1321 hwc->state |= PERF_HES_ARCH; 1322 1323 x86_pmu_stop(event, PERF_EF_UPDATE); 1324 } 1325 1326 /* 1327 * step2: reprogram moved events into new counters 1328 */ 1329 for (i = 0; i < cpuc->n_events; i++) { 1330 event = cpuc->event_list[i]; 1331 hwc = &event->hw; 1332 1333 if (!match_prev_assignment(hwc, cpuc, i)) 1334 x86_assign_hw_event(event, cpuc, i); 1335 else if (i < n_running) 1336 continue; 1337 1338 if (hwc->state & PERF_HES_ARCH) 1339 continue; 1340 1341 x86_pmu_start(event, PERF_EF_RELOAD); 1342 } 1343 cpuc->n_added = 0; 1344 perf_events_lapic_init(); 1345 } 1346 1347 cpuc->enabled = 1; 1348 barrier(); 1349 1350 static_call(x86_pmu_enable_all)(added); 1351 } 1352 1353 static DEFINE_PER_CPU(u64 [X86_PMC_IDX_MAX], pmc_prev_left); 1354 1355 /* 1356 * Set the next IRQ period, based on the hwc->period_left value. 1357 * To be called with the event disabled in hw: 1358 */ 1359 int x86_perf_event_set_period(struct perf_event *event) 1360 { 1361 struct hw_perf_event *hwc = &event->hw; 1362 s64 left = local64_read(&hwc->period_left); 1363 s64 period = hwc->sample_period; 1364 int ret = 0, idx = hwc->idx; 1365 1366 if (unlikely(!hwc->event_base)) 1367 return 0; 1368 1369 if (unlikely(is_topdown_count(event)) && 1370 x86_pmu.set_topdown_event_period) 1371 return x86_pmu.set_topdown_event_period(event); 1372 1373 /* 1374 * If we are way outside a reasonable range then just skip forward: 1375 */ 1376 if (unlikely(left <= -period)) { 1377 left = period; 1378 local64_set(&hwc->period_left, left); 1379 hwc->last_period = period; 1380 ret = 1; 1381 } 1382 1383 if (unlikely(left <= 0)) { 1384 left += period; 1385 local64_set(&hwc->period_left, left); 1386 hwc->last_period = period; 1387 ret = 1; 1388 } 1389 /* 1390 * Quirk: certain CPUs dont like it if just 1 hw_event is left: 1391 */ 1392 if (unlikely(left < 2)) 1393 left = 2; 1394 1395 if (left > x86_pmu.max_period) 1396 left = x86_pmu.max_period; 1397 1398 if (x86_pmu.limit_period) 1399 left = x86_pmu.limit_period(event, left); 1400 1401 per_cpu(pmc_prev_left[idx], smp_processor_id()) = left; 1402 1403 /* 1404 * The hw event starts counting from this event offset, 1405 * mark it to be able to extra future deltas: 1406 */ 1407 local64_set(&hwc->prev_count, (u64)-left); 1408 1409 wrmsrl(hwc->event_base, (u64)(-left) & x86_pmu.cntval_mask); 1410 1411 /* 1412 * Sign extend the Merge event counter's upper 16 bits since 1413 * we currently declare a 48-bit counter width 1414 */ 1415 if (is_counter_pair(hwc)) 1416 wrmsrl(x86_pmu_event_addr(idx + 1), 0xffff); 1417 1418 /* 1419 * Due to erratum on certan cpu we need 1420 * a second write to be sure the register 1421 * is updated properly 1422 */ 1423 if (x86_pmu.perfctr_second_write) { 1424 wrmsrl(hwc->event_base, 1425 (u64)(-left) & x86_pmu.cntval_mask); 1426 } 1427 1428 perf_event_update_userpage(event); 1429 1430 return ret; 1431 } 1432 1433 void x86_pmu_enable_event(struct perf_event *event) 1434 { 1435 if (__this_cpu_read(cpu_hw_events.enabled)) 1436 __x86_pmu_enable_event(&event->hw, 1437 ARCH_PERFMON_EVENTSEL_ENABLE); 1438 } 1439 1440 /* 1441 * Add a single event to the PMU. 1442 * 1443 * The event is added to the group of enabled events 1444 * but only if it can be scheduled with existing events. 1445 */ 1446 static int x86_pmu_add(struct perf_event *event, int flags) 1447 { 1448 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); 1449 struct hw_perf_event *hwc; 1450 int assign[X86_PMC_IDX_MAX]; 1451 int n, n0, ret; 1452 1453 hwc = &event->hw; 1454 1455 n0 = cpuc->n_events; 1456 ret = n = collect_events(cpuc, event, false); 1457 if (ret < 0) 1458 goto out; 1459 1460 hwc->state = PERF_HES_UPTODATE | PERF_HES_STOPPED; 1461 if (!(flags & PERF_EF_START)) 1462 hwc->state |= PERF_HES_ARCH; 1463 1464 /* 1465 * If group events scheduling transaction was started, 1466 * skip the schedulability test here, it will be performed 1467 * at commit time (->commit_txn) as a whole. 1468 * 1469 * If commit fails, we'll call ->del() on all events 1470 * for which ->add() was called. 1471 */ 1472 if (cpuc->txn_flags & PERF_PMU_TXN_ADD) 1473 goto done_collect; 1474 1475 ret = static_call(x86_pmu_schedule_events)(cpuc, n, assign); 1476 if (ret) 1477 goto out; 1478 /* 1479 * copy new assignment, now we know it is possible 1480 * will be used by hw_perf_enable() 1481 */ 1482 memcpy(cpuc->assign, assign, n*sizeof(int)); 1483 1484 done_collect: 1485 /* 1486 * Commit the collect_events() state. See x86_pmu_del() and 1487 * x86_pmu_*_txn(). 1488 */ 1489 cpuc->n_events = n; 1490 cpuc->n_added += n - n0; 1491 cpuc->n_txn += n - n0; 1492 1493 /* 1494 * This is before x86_pmu_enable() will call x86_pmu_start(), 1495 * so we enable LBRs before an event needs them etc.. 1496 */ 1497 static_call_cond(x86_pmu_add)(event); 1498 1499 ret = 0; 1500 out: 1501 return ret; 1502 } 1503 1504 static void x86_pmu_start(struct perf_event *event, int flags) 1505 { 1506 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); 1507 int idx = event->hw.idx; 1508 1509 if (WARN_ON_ONCE(!(event->hw.state & PERF_HES_STOPPED))) 1510 return; 1511 1512 if (WARN_ON_ONCE(idx == -1)) 1513 return; 1514 1515 if (flags & PERF_EF_RELOAD) { 1516 WARN_ON_ONCE(!(event->hw.state & PERF_HES_UPTODATE)); 1517 x86_perf_event_set_period(event); 1518 } 1519 1520 event->hw.state = 0; 1521 1522 cpuc->events[idx] = event; 1523 __set_bit(idx, cpuc->active_mask); 1524 static_call(x86_pmu_enable)(event); 1525 perf_event_update_userpage(event); 1526 } 1527 1528 void perf_event_print_debug(void) 1529 { 1530 u64 ctrl, status, overflow, pmc_ctrl, pmc_count, prev_left, fixed; 1531 u64 pebs, debugctl; 1532 int cpu = smp_processor_id(); 1533 struct cpu_hw_events *cpuc = &per_cpu(cpu_hw_events, cpu); 1534 int num_counters = hybrid(cpuc->pmu, num_counters); 1535 int num_counters_fixed = hybrid(cpuc->pmu, num_counters_fixed); 1536 struct event_constraint *pebs_constraints = hybrid(cpuc->pmu, pebs_constraints); 1537 unsigned long flags; 1538 int idx; 1539 1540 if (!num_counters) 1541 return; 1542 1543 local_irq_save(flags); 1544 1545 if (x86_pmu.version >= 2) { 1546 rdmsrl(MSR_CORE_PERF_GLOBAL_CTRL, ctrl); 1547 rdmsrl(MSR_CORE_PERF_GLOBAL_STATUS, status); 1548 rdmsrl(MSR_CORE_PERF_GLOBAL_OVF_CTRL, overflow); 1549 rdmsrl(MSR_ARCH_PERFMON_FIXED_CTR_CTRL, fixed); 1550 1551 pr_info("\n"); 1552 pr_info("CPU#%d: ctrl: %016llx\n", cpu, ctrl); 1553 pr_info("CPU#%d: status: %016llx\n", cpu, status); 1554 pr_info("CPU#%d: overflow: %016llx\n", cpu, overflow); 1555 pr_info("CPU#%d: fixed: %016llx\n", cpu, fixed); 1556 if (pebs_constraints) { 1557 rdmsrl(MSR_IA32_PEBS_ENABLE, pebs); 1558 pr_info("CPU#%d: pebs: %016llx\n", cpu, pebs); 1559 } 1560 if (x86_pmu.lbr_nr) { 1561 rdmsrl(MSR_IA32_DEBUGCTLMSR, debugctl); 1562 pr_info("CPU#%d: debugctl: %016llx\n", cpu, debugctl); 1563 } 1564 } 1565 pr_info("CPU#%d: active: %016llx\n", cpu, *(u64 *)cpuc->active_mask); 1566 1567 for (idx = 0; idx < num_counters; idx++) { 1568 rdmsrl(x86_pmu_config_addr(idx), pmc_ctrl); 1569 rdmsrl(x86_pmu_event_addr(idx), pmc_count); 1570 1571 prev_left = per_cpu(pmc_prev_left[idx], cpu); 1572 1573 pr_info("CPU#%d: gen-PMC%d ctrl: %016llx\n", 1574 cpu, idx, pmc_ctrl); 1575 pr_info("CPU#%d: gen-PMC%d count: %016llx\n", 1576 cpu, idx, pmc_count); 1577 pr_info("CPU#%d: gen-PMC%d left: %016llx\n", 1578 cpu, idx, prev_left); 1579 } 1580 for (idx = 0; idx < num_counters_fixed; idx++) { 1581 if (fixed_counter_disabled(idx, cpuc->pmu)) 1582 continue; 1583 rdmsrl(MSR_ARCH_PERFMON_FIXED_CTR0 + idx, pmc_count); 1584 1585 pr_info("CPU#%d: fixed-PMC%d count: %016llx\n", 1586 cpu, idx, pmc_count); 1587 } 1588 local_irq_restore(flags); 1589 } 1590 1591 void x86_pmu_stop(struct perf_event *event, int flags) 1592 { 1593 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); 1594 struct hw_perf_event *hwc = &event->hw; 1595 1596 if (test_bit(hwc->idx, cpuc->active_mask)) { 1597 static_call(x86_pmu_disable)(event); 1598 __clear_bit(hwc->idx, cpuc->active_mask); 1599 cpuc->events[hwc->idx] = NULL; 1600 WARN_ON_ONCE(hwc->state & PERF_HES_STOPPED); 1601 hwc->state |= PERF_HES_STOPPED; 1602 } 1603 1604 if ((flags & PERF_EF_UPDATE) && !(hwc->state & PERF_HES_UPTODATE)) { 1605 /* 1606 * Drain the remaining delta count out of a event 1607 * that we are disabling: 1608 */ 1609 x86_perf_event_update(event); 1610 hwc->state |= PERF_HES_UPTODATE; 1611 } 1612 } 1613 1614 static void x86_pmu_del(struct perf_event *event, int flags) 1615 { 1616 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); 1617 union perf_capabilities intel_cap = hybrid(cpuc->pmu, intel_cap); 1618 int i; 1619 1620 /* 1621 * If we're called during a txn, we only need to undo x86_pmu.add. 1622 * The events never got scheduled and ->cancel_txn will truncate 1623 * the event_list. 1624 * 1625 * XXX assumes any ->del() called during a TXN will only be on 1626 * an event added during that same TXN. 1627 */ 1628 if (cpuc->txn_flags & PERF_PMU_TXN_ADD) 1629 goto do_del; 1630 1631 __set_bit(event->hw.idx, cpuc->dirty); 1632 1633 /* 1634 * Not a TXN, therefore cleanup properly. 1635 */ 1636 x86_pmu_stop(event, PERF_EF_UPDATE); 1637 1638 for (i = 0; i < cpuc->n_events; i++) { 1639 if (event == cpuc->event_list[i]) 1640 break; 1641 } 1642 1643 if (WARN_ON_ONCE(i == cpuc->n_events)) /* called ->del() without ->add() ? */ 1644 return; 1645 1646 /* If we have a newly added event; make sure to decrease n_added. */ 1647 if (i >= cpuc->n_events - cpuc->n_added) 1648 --cpuc->n_added; 1649 1650 static_call_cond(x86_pmu_put_event_constraints)(cpuc, event); 1651 1652 /* Delete the array entry. */ 1653 while (++i < cpuc->n_events) { 1654 cpuc->event_list[i-1] = cpuc->event_list[i]; 1655 cpuc->event_constraint[i-1] = cpuc->event_constraint[i]; 1656 } 1657 cpuc->event_constraint[i-1] = NULL; 1658 --cpuc->n_events; 1659 if (intel_cap.perf_metrics) 1660 del_nr_metric_event(cpuc, event); 1661 1662 perf_event_update_userpage(event); 1663 1664 do_del: 1665 1666 /* 1667 * This is after x86_pmu_stop(); so we disable LBRs after any 1668 * event can need them etc.. 1669 */ 1670 static_call_cond(x86_pmu_del)(event); 1671 } 1672 1673 int x86_pmu_handle_irq(struct pt_regs *regs) 1674 { 1675 struct perf_sample_data data; 1676 struct cpu_hw_events *cpuc; 1677 struct perf_event *event; 1678 int idx, handled = 0; 1679 u64 val; 1680 1681 cpuc = this_cpu_ptr(&cpu_hw_events); 1682 1683 /* 1684 * Some chipsets need to unmask the LVTPC in a particular spot 1685 * inside the nmi handler. As a result, the unmasking was pushed 1686 * into all the nmi handlers. 1687 * 1688 * This generic handler doesn't seem to have any issues where the 1689 * unmasking occurs so it was left at the top. 1690 */ 1691 apic_write(APIC_LVTPC, APIC_DM_NMI); 1692 1693 for (idx = 0; idx < x86_pmu.num_counters; idx++) { 1694 if (!test_bit(idx, cpuc->active_mask)) 1695 continue; 1696 1697 event = cpuc->events[idx]; 1698 1699 val = x86_perf_event_update(event); 1700 if (val & (1ULL << (x86_pmu.cntval_bits - 1))) 1701 continue; 1702 1703 /* 1704 * event overflow 1705 */ 1706 handled++; 1707 perf_sample_data_init(&data, 0, event->hw.last_period); 1708 1709 if (!x86_perf_event_set_period(event)) 1710 continue; 1711 1712 if (perf_event_overflow(event, &data, regs)) 1713 x86_pmu_stop(event, 0); 1714 } 1715 1716 if (handled) 1717 inc_irq_stat(apic_perf_irqs); 1718 1719 return handled; 1720 } 1721 1722 void perf_events_lapic_init(void) 1723 { 1724 if (!x86_pmu.apic || !x86_pmu_initialized()) 1725 return; 1726 1727 /* 1728 * Always use NMI for PMU 1729 */ 1730 apic_write(APIC_LVTPC, APIC_DM_NMI); 1731 } 1732 1733 static int 1734 perf_event_nmi_handler(unsigned int cmd, struct pt_regs *regs) 1735 { 1736 u64 start_clock; 1737 u64 finish_clock; 1738 int ret; 1739 1740 /* 1741 * All PMUs/events that share this PMI handler should make sure to 1742 * increment active_events for their events. 1743 */ 1744 if (!atomic_read(&active_events)) 1745 return NMI_DONE; 1746 1747 start_clock = sched_clock(); 1748 ret = static_call(x86_pmu_handle_irq)(regs); 1749 finish_clock = sched_clock(); 1750 1751 perf_sample_event_took(finish_clock - start_clock); 1752 1753 return ret; 1754 } 1755 NOKPROBE_SYMBOL(perf_event_nmi_handler); 1756 1757 struct event_constraint emptyconstraint; 1758 struct event_constraint unconstrained; 1759 1760 static int x86_pmu_prepare_cpu(unsigned int cpu) 1761 { 1762 struct cpu_hw_events *cpuc = &per_cpu(cpu_hw_events, cpu); 1763 int i; 1764 1765 for (i = 0 ; i < X86_PERF_KFREE_MAX; i++) 1766 cpuc->kfree_on_online[i] = NULL; 1767 if (x86_pmu.cpu_prepare) 1768 return x86_pmu.cpu_prepare(cpu); 1769 return 0; 1770 } 1771 1772 static int x86_pmu_dead_cpu(unsigned int cpu) 1773 { 1774 if (x86_pmu.cpu_dead) 1775 x86_pmu.cpu_dead(cpu); 1776 return 0; 1777 } 1778 1779 static int x86_pmu_online_cpu(unsigned int cpu) 1780 { 1781 struct cpu_hw_events *cpuc = &per_cpu(cpu_hw_events, cpu); 1782 int i; 1783 1784 for (i = 0 ; i < X86_PERF_KFREE_MAX; i++) { 1785 kfree(cpuc->kfree_on_online[i]); 1786 cpuc->kfree_on_online[i] = NULL; 1787 } 1788 return 0; 1789 } 1790 1791 static int x86_pmu_starting_cpu(unsigned int cpu) 1792 { 1793 if (x86_pmu.cpu_starting) 1794 x86_pmu.cpu_starting(cpu); 1795 return 0; 1796 } 1797 1798 static int x86_pmu_dying_cpu(unsigned int cpu) 1799 { 1800 if (x86_pmu.cpu_dying) 1801 x86_pmu.cpu_dying(cpu); 1802 return 0; 1803 } 1804 1805 static void __init pmu_check_apic(void) 1806 { 1807 if (boot_cpu_has(X86_FEATURE_APIC)) 1808 return; 1809 1810 x86_pmu.apic = 0; 1811 pr_info("no APIC, boot with the \"lapic\" boot parameter to force-enable it.\n"); 1812 pr_info("no hardware sampling interrupt available.\n"); 1813 1814 /* 1815 * If we have a PMU initialized but no APIC 1816 * interrupts, we cannot sample hardware 1817 * events (user-space has to fall back and 1818 * sample via a hrtimer based software event): 1819 */ 1820 pmu.capabilities |= PERF_PMU_CAP_NO_INTERRUPT; 1821 1822 } 1823 1824 static struct attribute_group x86_pmu_format_group __ro_after_init = { 1825 .name = "format", 1826 .attrs = NULL, 1827 }; 1828 1829 ssize_t events_sysfs_show(struct device *dev, struct device_attribute *attr, char *page) 1830 { 1831 struct perf_pmu_events_attr *pmu_attr = 1832 container_of(attr, struct perf_pmu_events_attr, attr); 1833 u64 config = 0; 1834 1835 if (pmu_attr->id < x86_pmu.max_events) 1836 config = x86_pmu.event_map(pmu_attr->id); 1837 1838 /* string trumps id */ 1839 if (pmu_attr->event_str) 1840 return sprintf(page, "%s", pmu_attr->event_str); 1841 1842 return x86_pmu.events_sysfs_show(page, config); 1843 } 1844 EXPORT_SYMBOL_GPL(events_sysfs_show); 1845 1846 ssize_t events_ht_sysfs_show(struct device *dev, struct device_attribute *attr, 1847 char *page) 1848 { 1849 struct perf_pmu_events_ht_attr *pmu_attr = 1850 container_of(attr, struct perf_pmu_events_ht_attr, attr); 1851 1852 /* 1853 * Report conditional events depending on Hyper-Threading. 1854 * 1855 * This is overly conservative as usually the HT special 1856 * handling is not needed if the other CPU thread is idle. 1857 * 1858 * Note this does not (and cannot) handle the case when thread 1859 * siblings are invisible, for example with virtualization 1860 * if they are owned by some other guest. The user tool 1861 * has to re-read when a thread sibling gets onlined later. 1862 */ 1863 return sprintf(page, "%s", 1864 topology_max_smt_threads() > 1 ? 1865 pmu_attr->event_str_ht : 1866 pmu_attr->event_str_noht); 1867 } 1868 1869 ssize_t events_hybrid_sysfs_show(struct device *dev, 1870 struct device_attribute *attr, 1871 char *page) 1872 { 1873 struct perf_pmu_events_hybrid_attr *pmu_attr = 1874 container_of(attr, struct perf_pmu_events_hybrid_attr, attr); 1875 struct x86_hybrid_pmu *pmu; 1876 const char *str, *next_str; 1877 int i; 1878 1879 if (hweight64(pmu_attr->pmu_type) == 1) 1880 return sprintf(page, "%s", pmu_attr->event_str); 1881 1882 /* 1883 * Hybrid PMUs may support the same event name, but with different 1884 * event encoding, e.g., the mem-loads event on an Atom PMU has 1885 * different event encoding from a Core PMU. 1886 * 1887 * The event_str includes all event encodings. Each event encoding 1888 * is divided by ";". The order of the event encodings must follow 1889 * the order of the hybrid PMU index. 1890 */ 1891 pmu = container_of(dev_get_drvdata(dev), struct x86_hybrid_pmu, pmu); 1892 1893 str = pmu_attr->event_str; 1894 for (i = 0; i < x86_pmu.num_hybrid_pmus; i++) { 1895 if (!(x86_pmu.hybrid_pmu[i].cpu_type & pmu_attr->pmu_type)) 1896 continue; 1897 if (x86_pmu.hybrid_pmu[i].cpu_type & pmu->cpu_type) { 1898 next_str = strchr(str, ';'); 1899 if (next_str) 1900 return snprintf(page, next_str - str + 1, "%s", str); 1901 else 1902 return sprintf(page, "%s", str); 1903 } 1904 str = strchr(str, ';'); 1905 str++; 1906 } 1907 1908 return 0; 1909 } 1910 EXPORT_SYMBOL_GPL(events_hybrid_sysfs_show); 1911 1912 EVENT_ATTR(cpu-cycles, CPU_CYCLES ); 1913 EVENT_ATTR(instructions, INSTRUCTIONS ); 1914 EVENT_ATTR(cache-references, CACHE_REFERENCES ); 1915 EVENT_ATTR(cache-misses, CACHE_MISSES ); 1916 EVENT_ATTR(branch-instructions, BRANCH_INSTRUCTIONS ); 1917 EVENT_ATTR(branch-misses, BRANCH_MISSES ); 1918 EVENT_ATTR(bus-cycles, BUS_CYCLES ); 1919 EVENT_ATTR(stalled-cycles-frontend, STALLED_CYCLES_FRONTEND ); 1920 EVENT_ATTR(stalled-cycles-backend, STALLED_CYCLES_BACKEND ); 1921 EVENT_ATTR(ref-cycles, REF_CPU_CYCLES ); 1922 1923 static struct attribute *empty_attrs; 1924 1925 static struct attribute *events_attr[] = { 1926 EVENT_PTR(CPU_CYCLES), 1927 EVENT_PTR(INSTRUCTIONS), 1928 EVENT_PTR(CACHE_REFERENCES), 1929 EVENT_PTR(CACHE_MISSES), 1930 EVENT_PTR(BRANCH_INSTRUCTIONS), 1931 EVENT_PTR(BRANCH_MISSES), 1932 EVENT_PTR(BUS_CYCLES), 1933 EVENT_PTR(STALLED_CYCLES_FRONTEND), 1934 EVENT_PTR(STALLED_CYCLES_BACKEND), 1935 EVENT_PTR(REF_CPU_CYCLES), 1936 NULL, 1937 }; 1938 1939 /* 1940 * Remove all undefined events (x86_pmu.event_map(id) == 0) 1941 * out of events_attr attributes. 1942 */ 1943 static umode_t 1944 is_visible(struct kobject *kobj, struct attribute *attr, int idx) 1945 { 1946 struct perf_pmu_events_attr *pmu_attr; 1947 1948 if (idx >= x86_pmu.max_events) 1949 return 0; 1950 1951 pmu_attr = container_of(attr, struct perf_pmu_events_attr, attr.attr); 1952 /* str trumps id */ 1953 return pmu_attr->event_str || x86_pmu.event_map(idx) ? attr->mode : 0; 1954 } 1955 1956 static struct attribute_group x86_pmu_events_group __ro_after_init = { 1957 .name = "events", 1958 .attrs = events_attr, 1959 .is_visible = is_visible, 1960 }; 1961 1962 ssize_t x86_event_sysfs_show(char *page, u64 config, u64 event) 1963 { 1964 u64 umask = (config & ARCH_PERFMON_EVENTSEL_UMASK) >> 8; 1965 u64 cmask = (config & ARCH_PERFMON_EVENTSEL_CMASK) >> 24; 1966 bool edge = (config & ARCH_PERFMON_EVENTSEL_EDGE); 1967 bool pc = (config & ARCH_PERFMON_EVENTSEL_PIN_CONTROL); 1968 bool any = (config & ARCH_PERFMON_EVENTSEL_ANY); 1969 bool inv = (config & ARCH_PERFMON_EVENTSEL_INV); 1970 ssize_t ret; 1971 1972 /* 1973 * We have whole page size to spend and just little data 1974 * to write, so we can safely use sprintf. 1975 */ 1976 ret = sprintf(page, "event=0x%02llx", event); 1977 1978 if (umask) 1979 ret += sprintf(page + ret, ",umask=0x%02llx", umask); 1980 1981 if (edge) 1982 ret += sprintf(page + ret, ",edge"); 1983 1984 if (pc) 1985 ret += sprintf(page + ret, ",pc"); 1986 1987 if (any) 1988 ret += sprintf(page + ret, ",any"); 1989 1990 if (inv) 1991 ret += sprintf(page + ret, ",inv"); 1992 1993 if (cmask) 1994 ret += sprintf(page + ret, ",cmask=0x%02llx", cmask); 1995 1996 ret += sprintf(page + ret, "\n"); 1997 1998 return ret; 1999 } 2000 2001 static struct attribute_group x86_pmu_attr_group; 2002 static struct attribute_group x86_pmu_caps_group; 2003 2004 static void x86_pmu_static_call_update(void) 2005 { 2006 static_call_update(x86_pmu_handle_irq, x86_pmu.handle_irq); 2007 static_call_update(x86_pmu_disable_all, x86_pmu.disable_all); 2008 static_call_update(x86_pmu_enable_all, x86_pmu.enable_all); 2009 static_call_update(x86_pmu_enable, x86_pmu.enable); 2010 static_call_update(x86_pmu_disable, x86_pmu.disable); 2011 2012 static_call_update(x86_pmu_assign, x86_pmu.assign); 2013 2014 static_call_update(x86_pmu_add, x86_pmu.add); 2015 static_call_update(x86_pmu_del, x86_pmu.del); 2016 static_call_update(x86_pmu_read, x86_pmu.read); 2017 2018 static_call_update(x86_pmu_schedule_events, x86_pmu.schedule_events); 2019 static_call_update(x86_pmu_get_event_constraints, x86_pmu.get_event_constraints); 2020 static_call_update(x86_pmu_put_event_constraints, x86_pmu.put_event_constraints); 2021 2022 static_call_update(x86_pmu_start_scheduling, x86_pmu.start_scheduling); 2023 static_call_update(x86_pmu_commit_scheduling, x86_pmu.commit_scheduling); 2024 static_call_update(x86_pmu_stop_scheduling, x86_pmu.stop_scheduling); 2025 2026 static_call_update(x86_pmu_sched_task, x86_pmu.sched_task); 2027 static_call_update(x86_pmu_swap_task_ctx, x86_pmu.swap_task_ctx); 2028 2029 static_call_update(x86_pmu_drain_pebs, x86_pmu.drain_pebs); 2030 static_call_update(x86_pmu_pebs_aliases, x86_pmu.pebs_aliases); 2031 2032 static_call_update(x86_pmu_guest_get_msrs, x86_pmu.guest_get_msrs); 2033 } 2034 2035 static void _x86_pmu_read(struct perf_event *event) 2036 { 2037 x86_perf_event_update(event); 2038 } 2039 2040 void x86_pmu_show_pmu_cap(int num_counters, int num_counters_fixed, 2041 u64 intel_ctrl) 2042 { 2043 pr_info("... version: %d\n", x86_pmu.version); 2044 pr_info("... bit width: %d\n", x86_pmu.cntval_bits); 2045 pr_info("... generic registers: %d\n", num_counters); 2046 pr_info("... value mask: %016Lx\n", x86_pmu.cntval_mask); 2047 pr_info("... max period: %016Lx\n", x86_pmu.max_period); 2048 pr_info("... fixed-purpose events: %lu\n", 2049 hweight64((((1ULL << num_counters_fixed) - 1) 2050 << INTEL_PMC_IDX_FIXED) & intel_ctrl)); 2051 pr_info("... event mask: %016Lx\n", intel_ctrl); 2052 } 2053 2054 /* 2055 * The generic code is not hybrid friendly. The hybrid_pmu->pmu 2056 * of the first registered PMU is unconditionally assigned to 2057 * each possible cpuctx->ctx.pmu. 2058 * Update the correct hybrid PMU to the cpuctx->ctx.pmu. 2059 */ 2060 void x86_pmu_update_cpu_context(struct pmu *pmu, int cpu) 2061 { 2062 struct perf_cpu_context *cpuctx; 2063 2064 if (!pmu->pmu_cpu_context) 2065 return; 2066 2067 cpuctx = per_cpu_ptr(pmu->pmu_cpu_context, cpu); 2068 cpuctx->ctx.pmu = pmu; 2069 } 2070 2071 static int __init init_hw_perf_events(void) 2072 { 2073 struct x86_pmu_quirk *quirk; 2074 int err; 2075 2076 pr_info("Performance Events: "); 2077 2078 switch (boot_cpu_data.x86_vendor) { 2079 case X86_VENDOR_INTEL: 2080 err = intel_pmu_init(); 2081 break; 2082 case X86_VENDOR_AMD: 2083 err = amd_pmu_init(); 2084 break; 2085 case X86_VENDOR_HYGON: 2086 err = amd_pmu_init(); 2087 x86_pmu.name = "HYGON"; 2088 break; 2089 case X86_VENDOR_ZHAOXIN: 2090 case X86_VENDOR_CENTAUR: 2091 err = zhaoxin_pmu_init(); 2092 break; 2093 default: 2094 err = -ENOTSUPP; 2095 } 2096 if (err != 0) { 2097 pr_cont("no PMU driver, software events only.\n"); 2098 return 0; 2099 } 2100 2101 pmu_check_apic(); 2102 2103 /* sanity check that the hardware exists or is emulated */ 2104 if (!check_hw_exists(&pmu, x86_pmu.num_counters, x86_pmu.num_counters_fixed)) 2105 return 0; 2106 2107 pr_cont("%s PMU driver.\n", x86_pmu.name); 2108 2109 x86_pmu.attr_rdpmc = 1; /* enable userspace RDPMC usage by default */ 2110 2111 for (quirk = x86_pmu.quirks; quirk; quirk = quirk->next) 2112 quirk->func(); 2113 2114 if (!x86_pmu.intel_ctrl) 2115 x86_pmu.intel_ctrl = (1 << x86_pmu.num_counters) - 1; 2116 2117 perf_events_lapic_init(); 2118 register_nmi_handler(NMI_LOCAL, perf_event_nmi_handler, 0, "PMI"); 2119 2120 unconstrained = (struct event_constraint) 2121 __EVENT_CONSTRAINT(0, (1ULL << x86_pmu.num_counters) - 1, 2122 0, x86_pmu.num_counters, 0, 0); 2123 2124 x86_pmu_format_group.attrs = x86_pmu.format_attrs; 2125 2126 if (!x86_pmu.events_sysfs_show) 2127 x86_pmu_events_group.attrs = &empty_attrs; 2128 2129 pmu.attr_update = x86_pmu.attr_update; 2130 2131 if (!is_hybrid()) { 2132 x86_pmu_show_pmu_cap(x86_pmu.num_counters, 2133 x86_pmu.num_counters_fixed, 2134 x86_pmu.intel_ctrl); 2135 } 2136 2137 if (!x86_pmu.read) 2138 x86_pmu.read = _x86_pmu_read; 2139 2140 if (!x86_pmu.guest_get_msrs) 2141 x86_pmu.guest_get_msrs = (void *)&__static_call_return0; 2142 2143 x86_pmu_static_call_update(); 2144 2145 /* 2146 * Install callbacks. Core will call them for each online 2147 * cpu. 2148 */ 2149 err = cpuhp_setup_state(CPUHP_PERF_X86_PREPARE, "perf/x86:prepare", 2150 x86_pmu_prepare_cpu, x86_pmu_dead_cpu); 2151 if (err) 2152 return err; 2153 2154 err = cpuhp_setup_state(CPUHP_AP_PERF_X86_STARTING, 2155 "perf/x86:starting", x86_pmu_starting_cpu, 2156 x86_pmu_dying_cpu); 2157 if (err) 2158 goto out; 2159 2160 err = cpuhp_setup_state(CPUHP_AP_PERF_X86_ONLINE, "perf/x86:online", 2161 x86_pmu_online_cpu, NULL); 2162 if (err) 2163 goto out1; 2164 2165 if (!is_hybrid()) { 2166 err = perf_pmu_register(&pmu, "cpu", PERF_TYPE_RAW); 2167 if (err) 2168 goto out2; 2169 } else { 2170 u8 cpu_type = get_this_hybrid_cpu_type(); 2171 struct x86_hybrid_pmu *hybrid_pmu; 2172 int i, j; 2173 2174 if (!cpu_type && x86_pmu.get_hybrid_cpu_type) 2175 cpu_type = x86_pmu.get_hybrid_cpu_type(); 2176 2177 for (i = 0; i < x86_pmu.num_hybrid_pmus; i++) { 2178 hybrid_pmu = &x86_pmu.hybrid_pmu[i]; 2179 2180 hybrid_pmu->pmu = pmu; 2181 hybrid_pmu->pmu.type = -1; 2182 hybrid_pmu->pmu.attr_update = x86_pmu.attr_update; 2183 hybrid_pmu->pmu.capabilities |= PERF_PMU_CAP_HETEROGENEOUS_CPUS; 2184 hybrid_pmu->pmu.capabilities |= PERF_PMU_CAP_EXTENDED_HW_TYPE; 2185 2186 err = perf_pmu_register(&hybrid_pmu->pmu, hybrid_pmu->name, 2187 (hybrid_pmu->cpu_type == hybrid_big) ? PERF_TYPE_RAW : -1); 2188 if (err) 2189 break; 2190 2191 if (cpu_type == hybrid_pmu->cpu_type) 2192 x86_pmu_update_cpu_context(&hybrid_pmu->pmu, raw_smp_processor_id()); 2193 } 2194 2195 if (i < x86_pmu.num_hybrid_pmus) { 2196 for (j = 0; j < i; j++) 2197 perf_pmu_unregister(&x86_pmu.hybrid_pmu[j].pmu); 2198 pr_warn("Failed to register hybrid PMUs\n"); 2199 kfree(x86_pmu.hybrid_pmu); 2200 x86_pmu.hybrid_pmu = NULL; 2201 x86_pmu.num_hybrid_pmus = 0; 2202 goto out2; 2203 } 2204 } 2205 2206 return 0; 2207 2208 out2: 2209 cpuhp_remove_state(CPUHP_AP_PERF_X86_ONLINE); 2210 out1: 2211 cpuhp_remove_state(CPUHP_AP_PERF_X86_STARTING); 2212 out: 2213 cpuhp_remove_state(CPUHP_PERF_X86_PREPARE); 2214 return err; 2215 } 2216 early_initcall(init_hw_perf_events); 2217 2218 static void x86_pmu_read(struct perf_event *event) 2219 { 2220 static_call(x86_pmu_read)(event); 2221 } 2222 2223 /* 2224 * Start group events scheduling transaction 2225 * Set the flag to make pmu::enable() not perform the 2226 * schedulability test, it will be performed at commit time 2227 * 2228 * We only support PERF_PMU_TXN_ADD transactions. Save the 2229 * transaction flags but otherwise ignore non-PERF_PMU_TXN_ADD 2230 * transactions. 2231 */ 2232 static void x86_pmu_start_txn(struct pmu *pmu, unsigned int txn_flags) 2233 { 2234 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); 2235 2236 WARN_ON_ONCE(cpuc->txn_flags); /* txn already in flight */ 2237 2238 cpuc->txn_flags = txn_flags; 2239 if (txn_flags & ~PERF_PMU_TXN_ADD) 2240 return; 2241 2242 perf_pmu_disable(pmu); 2243 __this_cpu_write(cpu_hw_events.n_txn, 0); 2244 __this_cpu_write(cpu_hw_events.n_txn_pair, 0); 2245 __this_cpu_write(cpu_hw_events.n_txn_metric, 0); 2246 } 2247 2248 /* 2249 * Stop group events scheduling transaction 2250 * Clear the flag and pmu::enable() will perform the 2251 * schedulability test. 2252 */ 2253 static void x86_pmu_cancel_txn(struct pmu *pmu) 2254 { 2255 unsigned int txn_flags; 2256 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); 2257 2258 WARN_ON_ONCE(!cpuc->txn_flags); /* no txn in flight */ 2259 2260 txn_flags = cpuc->txn_flags; 2261 cpuc->txn_flags = 0; 2262 if (txn_flags & ~PERF_PMU_TXN_ADD) 2263 return; 2264 2265 /* 2266 * Truncate collected array by the number of events added in this 2267 * transaction. See x86_pmu_add() and x86_pmu_*_txn(). 2268 */ 2269 __this_cpu_sub(cpu_hw_events.n_added, __this_cpu_read(cpu_hw_events.n_txn)); 2270 __this_cpu_sub(cpu_hw_events.n_events, __this_cpu_read(cpu_hw_events.n_txn)); 2271 __this_cpu_sub(cpu_hw_events.n_pair, __this_cpu_read(cpu_hw_events.n_txn_pair)); 2272 __this_cpu_sub(cpu_hw_events.n_metric, __this_cpu_read(cpu_hw_events.n_txn_metric)); 2273 perf_pmu_enable(pmu); 2274 } 2275 2276 /* 2277 * Commit group events scheduling transaction 2278 * Perform the group schedulability test as a whole 2279 * Return 0 if success 2280 * 2281 * Does not cancel the transaction on failure; expects the caller to do this. 2282 */ 2283 static int x86_pmu_commit_txn(struct pmu *pmu) 2284 { 2285 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); 2286 int assign[X86_PMC_IDX_MAX]; 2287 int n, ret; 2288 2289 WARN_ON_ONCE(!cpuc->txn_flags); /* no txn in flight */ 2290 2291 if (cpuc->txn_flags & ~PERF_PMU_TXN_ADD) { 2292 cpuc->txn_flags = 0; 2293 return 0; 2294 } 2295 2296 n = cpuc->n_events; 2297 2298 if (!x86_pmu_initialized()) 2299 return -EAGAIN; 2300 2301 ret = static_call(x86_pmu_schedule_events)(cpuc, n, assign); 2302 if (ret) 2303 return ret; 2304 2305 /* 2306 * copy new assignment, now we know it is possible 2307 * will be used by hw_perf_enable() 2308 */ 2309 memcpy(cpuc->assign, assign, n*sizeof(int)); 2310 2311 cpuc->txn_flags = 0; 2312 perf_pmu_enable(pmu); 2313 return 0; 2314 } 2315 /* 2316 * a fake_cpuc is used to validate event groups. Due to 2317 * the extra reg logic, we need to also allocate a fake 2318 * per_core and per_cpu structure. Otherwise, group events 2319 * using extra reg may conflict without the kernel being 2320 * able to catch this when the last event gets added to 2321 * the group. 2322 */ 2323 static void free_fake_cpuc(struct cpu_hw_events *cpuc) 2324 { 2325 intel_cpuc_finish(cpuc); 2326 kfree(cpuc); 2327 } 2328 2329 static struct cpu_hw_events *allocate_fake_cpuc(struct pmu *event_pmu) 2330 { 2331 struct cpu_hw_events *cpuc; 2332 int cpu; 2333 2334 cpuc = kzalloc(sizeof(*cpuc), GFP_KERNEL); 2335 if (!cpuc) 2336 return ERR_PTR(-ENOMEM); 2337 cpuc->is_fake = 1; 2338 2339 if (is_hybrid()) { 2340 struct x86_hybrid_pmu *h_pmu; 2341 2342 h_pmu = hybrid_pmu(event_pmu); 2343 if (cpumask_empty(&h_pmu->supported_cpus)) 2344 goto error; 2345 cpu = cpumask_first(&h_pmu->supported_cpus); 2346 } else 2347 cpu = raw_smp_processor_id(); 2348 cpuc->pmu = event_pmu; 2349 2350 if (intel_cpuc_prepare(cpuc, cpu)) 2351 goto error; 2352 2353 return cpuc; 2354 error: 2355 free_fake_cpuc(cpuc); 2356 return ERR_PTR(-ENOMEM); 2357 } 2358 2359 /* 2360 * validate that we can schedule this event 2361 */ 2362 static int validate_event(struct perf_event *event) 2363 { 2364 struct cpu_hw_events *fake_cpuc; 2365 struct event_constraint *c; 2366 int ret = 0; 2367 2368 fake_cpuc = allocate_fake_cpuc(event->pmu); 2369 if (IS_ERR(fake_cpuc)) 2370 return PTR_ERR(fake_cpuc); 2371 2372 c = x86_pmu.get_event_constraints(fake_cpuc, 0, event); 2373 2374 if (!c || !c->weight) 2375 ret = -EINVAL; 2376 2377 if (x86_pmu.put_event_constraints) 2378 x86_pmu.put_event_constraints(fake_cpuc, event); 2379 2380 free_fake_cpuc(fake_cpuc); 2381 2382 return ret; 2383 } 2384 2385 /* 2386 * validate a single event group 2387 * 2388 * validation include: 2389 * - check events are compatible which each other 2390 * - events do not compete for the same counter 2391 * - number of events <= number of counters 2392 * 2393 * validation ensures the group can be loaded onto the 2394 * PMU if it was the only group available. 2395 */ 2396 static int validate_group(struct perf_event *event) 2397 { 2398 struct perf_event *leader = event->group_leader; 2399 struct cpu_hw_events *fake_cpuc; 2400 int ret = -EINVAL, n; 2401 2402 /* 2403 * Reject events from different hybrid PMUs. 2404 */ 2405 if (is_hybrid()) { 2406 struct perf_event *sibling; 2407 struct pmu *pmu = NULL; 2408 2409 if (is_x86_event(leader)) 2410 pmu = leader->pmu; 2411 2412 for_each_sibling_event(sibling, leader) { 2413 if (!is_x86_event(sibling)) 2414 continue; 2415 if (!pmu) 2416 pmu = sibling->pmu; 2417 else if (pmu != sibling->pmu) 2418 return ret; 2419 } 2420 } 2421 2422 fake_cpuc = allocate_fake_cpuc(event->pmu); 2423 if (IS_ERR(fake_cpuc)) 2424 return PTR_ERR(fake_cpuc); 2425 /* 2426 * the event is not yet connected with its 2427 * siblings therefore we must first collect 2428 * existing siblings, then add the new event 2429 * before we can simulate the scheduling 2430 */ 2431 n = collect_events(fake_cpuc, leader, true); 2432 if (n < 0) 2433 goto out; 2434 2435 fake_cpuc->n_events = n; 2436 n = collect_events(fake_cpuc, event, false); 2437 if (n < 0) 2438 goto out; 2439 2440 fake_cpuc->n_events = 0; 2441 ret = x86_pmu.schedule_events(fake_cpuc, n, NULL); 2442 2443 out: 2444 free_fake_cpuc(fake_cpuc); 2445 return ret; 2446 } 2447 2448 static int x86_pmu_event_init(struct perf_event *event) 2449 { 2450 struct x86_hybrid_pmu *pmu = NULL; 2451 int err; 2452 2453 if ((event->attr.type != event->pmu->type) && 2454 (event->attr.type != PERF_TYPE_HARDWARE) && 2455 (event->attr.type != PERF_TYPE_HW_CACHE)) 2456 return -ENOENT; 2457 2458 if (is_hybrid() && (event->cpu != -1)) { 2459 pmu = hybrid_pmu(event->pmu); 2460 if (!cpumask_test_cpu(event->cpu, &pmu->supported_cpus)) 2461 return -ENOENT; 2462 } 2463 2464 err = __x86_pmu_event_init(event); 2465 if (!err) { 2466 if (event->group_leader != event) 2467 err = validate_group(event); 2468 else 2469 err = validate_event(event); 2470 } 2471 if (err) { 2472 if (event->destroy) 2473 event->destroy(event); 2474 event->destroy = NULL; 2475 } 2476 2477 if (READ_ONCE(x86_pmu.attr_rdpmc) && 2478 !(event->hw.flags & PERF_X86_EVENT_LARGE_PEBS)) 2479 event->hw.flags |= PERF_EVENT_FLAG_USER_READ_CNT; 2480 2481 return err; 2482 } 2483 2484 void perf_clear_dirty_counters(void) 2485 { 2486 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); 2487 int i; 2488 2489 /* Don't need to clear the assigned counter. */ 2490 for (i = 0; i < cpuc->n_events; i++) 2491 __clear_bit(cpuc->assign[i], cpuc->dirty); 2492 2493 if (bitmap_empty(cpuc->dirty, X86_PMC_IDX_MAX)) 2494 return; 2495 2496 for_each_set_bit(i, cpuc->dirty, X86_PMC_IDX_MAX) { 2497 if (i >= INTEL_PMC_IDX_FIXED) { 2498 /* Metrics and fake events don't have corresponding HW counters. */ 2499 if ((i - INTEL_PMC_IDX_FIXED) >= hybrid(cpuc->pmu, num_counters_fixed)) 2500 continue; 2501 2502 wrmsrl(MSR_ARCH_PERFMON_FIXED_CTR0 + (i - INTEL_PMC_IDX_FIXED), 0); 2503 } else { 2504 wrmsrl(x86_pmu_event_addr(i), 0); 2505 } 2506 } 2507 2508 bitmap_zero(cpuc->dirty, X86_PMC_IDX_MAX); 2509 } 2510 2511 static void x86_pmu_event_mapped(struct perf_event *event, struct mm_struct *mm) 2512 { 2513 if (!(event->hw.flags & PERF_EVENT_FLAG_USER_READ_CNT)) 2514 return; 2515 2516 /* 2517 * This function relies on not being called concurrently in two 2518 * tasks in the same mm. Otherwise one task could observe 2519 * perf_rdpmc_allowed > 1 and return all the way back to 2520 * userspace with CR4.PCE clear while another task is still 2521 * doing on_each_cpu_mask() to propagate CR4.PCE. 2522 * 2523 * For now, this can't happen because all callers hold mmap_lock 2524 * for write. If this changes, we'll need a different solution. 2525 */ 2526 mmap_assert_write_locked(mm); 2527 2528 if (atomic_inc_return(&mm->context.perf_rdpmc_allowed) == 1) 2529 on_each_cpu_mask(mm_cpumask(mm), cr4_update_pce, NULL, 1); 2530 } 2531 2532 static void x86_pmu_event_unmapped(struct perf_event *event, struct mm_struct *mm) 2533 { 2534 if (!(event->hw.flags & PERF_EVENT_FLAG_USER_READ_CNT)) 2535 return; 2536 2537 if (atomic_dec_and_test(&mm->context.perf_rdpmc_allowed)) 2538 on_each_cpu_mask(mm_cpumask(mm), cr4_update_pce, NULL, 1); 2539 } 2540 2541 static int x86_pmu_event_idx(struct perf_event *event) 2542 { 2543 struct hw_perf_event *hwc = &event->hw; 2544 2545 if (!(hwc->flags & PERF_EVENT_FLAG_USER_READ_CNT)) 2546 return 0; 2547 2548 if (is_metric_idx(hwc->idx)) 2549 return INTEL_PMC_FIXED_RDPMC_METRICS + 1; 2550 else 2551 return hwc->event_base_rdpmc + 1; 2552 } 2553 2554 static ssize_t get_attr_rdpmc(struct device *cdev, 2555 struct device_attribute *attr, 2556 char *buf) 2557 { 2558 return snprintf(buf, 40, "%d\n", x86_pmu.attr_rdpmc); 2559 } 2560 2561 static ssize_t set_attr_rdpmc(struct device *cdev, 2562 struct device_attribute *attr, 2563 const char *buf, size_t count) 2564 { 2565 unsigned long val; 2566 ssize_t ret; 2567 2568 ret = kstrtoul(buf, 0, &val); 2569 if (ret) 2570 return ret; 2571 2572 if (val > 2) 2573 return -EINVAL; 2574 2575 if (x86_pmu.attr_rdpmc_broken) 2576 return -ENOTSUPP; 2577 2578 if (val != x86_pmu.attr_rdpmc) { 2579 /* 2580 * Changing into or out of never available or always available, 2581 * aka perf-event-bypassing mode. This path is extremely slow, 2582 * but only root can trigger it, so it's okay. 2583 */ 2584 if (val == 0) 2585 static_branch_inc(&rdpmc_never_available_key); 2586 else if (x86_pmu.attr_rdpmc == 0) 2587 static_branch_dec(&rdpmc_never_available_key); 2588 2589 if (val == 2) 2590 static_branch_inc(&rdpmc_always_available_key); 2591 else if (x86_pmu.attr_rdpmc == 2) 2592 static_branch_dec(&rdpmc_always_available_key); 2593 2594 on_each_cpu(cr4_update_pce, NULL, 1); 2595 x86_pmu.attr_rdpmc = val; 2596 } 2597 2598 return count; 2599 } 2600 2601 static DEVICE_ATTR(rdpmc, S_IRUSR | S_IWUSR, get_attr_rdpmc, set_attr_rdpmc); 2602 2603 static struct attribute *x86_pmu_attrs[] = { 2604 &dev_attr_rdpmc.attr, 2605 NULL, 2606 }; 2607 2608 static struct attribute_group x86_pmu_attr_group __ro_after_init = { 2609 .attrs = x86_pmu_attrs, 2610 }; 2611 2612 static ssize_t max_precise_show(struct device *cdev, 2613 struct device_attribute *attr, 2614 char *buf) 2615 { 2616 return snprintf(buf, PAGE_SIZE, "%d\n", x86_pmu_max_precise()); 2617 } 2618 2619 static DEVICE_ATTR_RO(max_precise); 2620 2621 static struct attribute *x86_pmu_caps_attrs[] = { 2622 &dev_attr_max_precise.attr, 2623 NULL 2624 }; 2625 2626 static struct attribute_group x86_pmu_caps_group __ro_after_init = { 2627 .name = "caps", 2628 .attrs = x86_pmu_caps_attrs, 2629 }; 2630 2631 static const struct attribute_group *x86_pmu_attr_groups[] = { 2632 &x86_pmu_attr_group, 2633 &x86_pmu_format_group, 2634 &x86_pmu_events_group, 2635 &x86_pmu_caps_group, 2636 NULL, 2637 }; 2638 2639 static void x86_pmu_sched_task(struct perf_event_context *ctx, bool sched_in) 2640 { 2641 static_call_cond(x86_pmu_sched_task)(ctx, sched_in); 2642 } 2643 2644 static void x86_pmu_swap_task_ctx(struct perf_event_context *prev, 2645 struct perf_event_context *next) 2646 { 2647 static_call_cond(x86_pmu_swap_task_ctx)(prev, next); 2648 } 2649 2650 void perf_check_microcode(void) 2651 { 2652 if (x86_pmu.check_microcode) 2653 x86_pmu.check_microcode(); 2654 } 2655 2656 static int x86_pmu_check_period(struct perf_event *event, u64 value) 2657 { 2658 if (x86_pmu.check_period && x86_pmu.check_period(event, value)) 2659 return -EINVAL; 2660 2661 if (value && x86_pmu.limit_period) { 2662 if (x86_pmu.limit_period(event, value) > value) 2663 return -EINVAL; 2664 } 2665 2666 return 0; 2667 } 2668 2669 static int x86_pmu_aux_output_match(struct perf_event *event) 2670 { 2671 if (!(pmu.capabilities & PERF_PMU_CAP_AUX_OUTPUT)) 2672 return 0; 2673 2674 if (x86_pmu.aux_output_match) 2675 return x86_pmu.aux_output_match(event); 2676 2677 return 0; 2678 } 2679 2680 static int x86_pmu_filter_match(struct perf_event *event) 2681 { 2682 if (x86_pmu.filter_match) 2683 return x86_pmu.filter_match(event); 2684 2685 return 1; 2686 } 2687 2688 static struct pmu pmu = { 2689 .pmu_enable = x86_pmu_enable, 2690 .pmu_disable = x86_pmu_disable, 2691 2692 .attr_groups = x86_pmu_attr_groups, 2693 2694 .event_init = x86_pmu_event_init, 2695 2696 .event_mapped = x86_pmu_event_mapped, 2697 .event_unmapped = x86_pmu_event_unmapped, 2698 2699 .add = x86_pmu_add, 2700 .del = x86_pmu_del, 2701 .start = x86_pmu_start, 2702 .stop = x86_pmu_stop, 2703 .read = x86_pmu_read, 2704 2705 .start_txn = x86_pmu_start_txn, 2706 .cancel_txn = x86_pmu_cancel_txn, 2707 .commit_txn = x86_pmu_commit_txn, 2708 2709 .event_idx = x86_pmu_event_idx, 2710 .sched_task = x86_pmu_sched_task, 2711 .swap_task_ctx = x86_pmu_swap_task_ctx, 2712 .check_period = x86_pmu_check_period, 2713 2714 .aux_output_match = x86_pmu_aux_output_match, 2715 2716 .filter_match = x86_pmu_filter_match, 2717 }; 2718 2719 void arch_perf_update_userpage(struct perf_event *event, 2720 struct perf_event_mmap_page *userpg, u64 now) 2721 { 2722 struct cyc2ns_data data; 2723 u64 offset; 2724 2725 userpg->cap_user_time = 0; 2726 userpg->cap_user_time_zero = 0; 2727 userpg->cap_user_rdpmc = 2728 !!(event->hw.flags & PERF_EVENT_FLAG_USER_READ_CNT); 2729 userpg->pmc_width = x86_pmu.cntval_bits; 2730 2731 if (!using_native_sched_clock() || !sched_clock_stable()) 2732 return; 2733 2734 cyc2ns_read_begin(&data); 2735 2736 offset = data.cyc2ns_offset + __sched_clock_offset; 2737 2738 /* 2739 * Internal timekeeping for enabled/running/stopped times 2740 * is always in the local_clock domain. 2741 */ 2742 userpg->cap_user_time = 1; 2743 userpg->time_mult = data.cyc2ns_mul; 2744 userpg->time_shift = data.cyc2ns_shift; 2745 userpg->time_offset = offset - now; 2746 2747 /* 2748 * cap_user_time_zero doesn't make sense when we're using a different 2749 * time base for the records. 2750 */ 2751 if (!event->attr.use_clockid) { 2752 userpg->cap_user_time_zero = 1; 2753 userpg->time_zero = offset; 2754 } 2755 2756 cyc2ns_read_end(); 2757 } 2758 2759 /* 2760 * Determine whether the regs were taken from an irq/exception handler rather 2761 * than from perf_arch_fetch_caller_regs(). 2762 */ 2763 static bool perf_hw_regs(struct pt_regs *regs) 2764 { 2765 return regs->flags & X86_EFLAGS_FIXED; 2766 } 2767 2768 void 2769 perf_callchain_kernel(struct perf_callchain_entry_ctx *entry, struct pt_regs *regs) 2770 { 2771 struct unwind_state state; 2772 unsigned long addr; 2773 2774 if (perf_guest_state()) { 2775 /* TODO: We don't support guest os callchain now */ 2776 return; 2777 } 2778 2779 if (perf_callchain_store(entry, regs->ip)) 2780 return; 2781 2782 if (perf_hw_regs(regs)) 2783 unwind_start(&state, current, regs, NULL); 2784 else 2785 unwind_start(&state, current, NULL, (void *)regs->sp); 2786 2787 for (; !unwind_done(&state); unwind_next_frame(&state)) { 2788 addr = unwind_get_return_address(&state); 2789 if (!addr || perf_callchain_store(entry, addr)) 2790 return; 2791 } 2792 } 2793 2794 static inline int 2795 valid_user_frame(const void __user *fp, unsigned long size) 2796 { 2797 return (__range_not_ok(fp, size, TASK_SIZE) == 0); 2798 } 2799 2800 static unsigned long get_segment_base(unsigned int segment) 2801 { 2802 struct desc_struct *desc; 2803 unsigned int idx = segment >> 3; 2804 2805 if ((segment & SEGMENT_TI_MASK) == SEGMENT_LDT) { 2806 #ifdef CONFIG_MODIFY_LDT_SYSCALL 2807 struct ldt_struct *ldt; 2808 2809 /* IRQs are off, so this synchronizes with smp_store_release */ 2810 ldt = READ_ONCE(current->active_mm->context.ldt); 2811 if (!ldt || idx >= ldt->nr_entries) 2812 return 0; 2813 2814 desc = &ldt->entries[idx]; 2815 #else 2816 return 0; 2817 #endif 2818 } else { 2819 if (idx >= GDT_ENTRIES) 2820 return 0; 2821 2822 desc = raw_cpu_ptr(gdt_page.gdt) + idx; 2823 } 2824 2825 return get_desc_base(desc); 2826 } 2827 2828 #ifdef CONFIG_IA32_EMULATION 2829 2830 #include <linux/compat.h> 2831 2832 static inline int 2833 perf_callchain_user32(struct pt_regs *regs, struct perf_callchain_entry_ctx *entry) 2834 { 2835 /* 32-bit process in 64-bit kernel. */ 2836 unsigned long ss_base, cs_base; 2837 struct stack_frame_ia32 frame; 2838 const struct stack_frame_ia32 __user *fp; 2839 2840 if (user_64bit_mode(regs)) 2841 return 0; 2842 2843 cs_base = get_segment_base(regs->cs); 2844 ss_base = get_segment_base(regs->ss); 2845 2846 fp = compat_ptr(ss_base + regs->bp); 2847 pagefault_disable(); 2848 while (entry->nr < entry->max_stack) { 2849 if (!valid_user_frame(fp, sizeof(frame))) 2850 break; 2851 2852 if (__get_user(frame.next_frame, &fp->next_frame)) 2853 break; 2854 if (__get_user(frame.return_address, &fp->return_address)) 2855 break; 2856 2857 perf_callchain_store(entry, cs_base + frame.return_address); 2858 fp = compat_ptr(ss_base + frame.next_frame); 2859 } 2860 pagefault_enable(); 2861 return 1; 2862 } 2863 #else 2864 static inline int 2865 perf_callchain_user32(struct pt_regs *regs, struct perf_callchain_entry_ctx *entry) 2866 { 2867 return 0; 2868 } 2869 #endif 2870 2871 void 2872 perf_callchain_user(struct perf_callchain_entry_ctx *entry, struct pt_regs *regs) 2873 { 2874 struct stack_frame frame; 2875 const struct stack_frame __user *fp; 2876 2877 if (perf_guest_state()) { 2878 /* TODO: We don't support guest os callchain now */ 2879 return; 2880 } 2881 2882 /* 2883 * We don't know what to do with VM86 stacks.. ignore them for now. 2884 */ 2885 if (regs->flags & (X86_VM_MASK | PERF_EFLAGS_VM)) 2886 return; 2887 2888 fp = (void __user *)regs->bp; 2889 2890 perf_callchain_store(entry, regs->ip); 2891 2892 if (!nmi_uaccess_okay()) 2893 return; 2894 2895 if (perf_callchain_user32(regs, entry)) 2896 return; 2897 2898 pagefault_disable(); 2899 while (entry->nr < entry->max_stack) { 2900 if (!valid_user_frame(fp, sizeof(frame))) 2901 break; 2902 2903 if (__get_user(frame.next_frame, &fp->next_frame)) 2904 break; 2905 if (__get_user(frame.return_address, &fp->return_address)) 2906 break; 2907 2908 perf_callchain_store(entry, frame.return_address); 2909 fp = (void __user *)frame.next_frame; 2910 } 2911 pagefault_enable(); 2912 } 2913 2914 /* 2915 * Deal with code segment offsets for the various execution modes: 2916 * 2917 * VM86 - the good olde 16 bit days, where the linear address is 2918 * 20 bits and we use regs->ip + 0x10 * regs->cs. 2919 * 2920 * IA32 - Where we need to look at GDT/LDT segment descriptor tables 2921 * to figure out what the 32bit base address is. 2922 * 2923 * X32 - has TIF_X32 set, but is running in x86_64 2924 * 2925 * X86_64 - CS,DS,SS,ES are all zero based. 2926 */ 2927 static unsigned long code_segment_base(struct pt_regs *regs) 2928 { 2929 /* 2930 * For IA32 we look at the GDT/LDT segment base to convert the 2931 * effective IP to a linear address. 2932 */ 2933 2934 #ifdef CONFIG_X86_32 2935 /* 2936 * If we are in VM86 mode, add the segment offset to convert to a 2937 * linear address. 2938 */ 2939 if (regs->flags & X86_VM_MASK) 2940 return 0x10 * regs->cs; 2941 2942 if (user_mode(regs) && regs->cs != __USER_CS) 2943 return get_segment_base(regs->cs); 2944 #else 2945 if (user_mode(regs) && !user_64bit_mode(regs) && 2946 regs->cs != __USER32_CS) 2947 return get_segment_base(regs->cs); 2948 #endif 2949 return 0; 2950 } 2951 2952 unsigned long perf_instruction_pointer(struct pt_regs *regs) 2953 { 2954 if (perf_guest_state()) 2955 return perf_guest_get_ip(); 2956 2957 return regs->ip + code_segment_base(regs); 2958 } 2959 2960 unsigned long perf_misc_flags(struct pt_regs *regs) 2961 { 2962 unsigned int guest_state = perf_guest_state(); 2963 int misc = 0; 2964 2965 if (guest_state) { 2966 if (guest_state & PERF_GUEST_USER) 2967 misc |= PERF_RECORD_MISC_GUEST_USER; 2968 else 2969 misc |= PERF_RECORD_MISC_GUEST_KERNEL; 2970 } else { 2971 if (user_mode(regs)) 2972 misc |= PERF_RECORD_MISC_USER; 2973 else 2974 misc |= PERF_RECORD_MISC_KERNEL; 2975 } 2976 2977 if (regs->flags & PERF_EFLAGS_EXACT) 2978 misc |= PERF_RECORD_MISC_EXACT_IP; 2979 2980 return misc; 2981 } 2982 2983 void perf_get_x86_pmu_capability(struct x86_pmu_capability *cap) 2984 { 2985 cap->version = x86_pmu.version; 2986 /* 2987 * KVM doesn't support the hybrid PMU yet. 2988 * Return the common value in global x86_pmu, 2989 * which available for all cores. 2990 */ 2991 cap->num_counters_gp = x86_pmu.num_counters; 2992 cap->num_counters_fixed = x86_pmu.num_counters_fixed; 2993 cap->bit_width_gp = x86_pmu.cntval_bits; 2994 cap->bit_width_fixed = x86_pmu.cntval_bits; 2995 cap->events_mask = (unsigned int)x86_pmu.events_maskl; 2996 cap->events_mask_len = x86_pmu.events_mask_len; 2997 } 2998 EXPORT_SYMBOL_GPL(perf_get_x86_pmu_capability); 2999