1 /* 2 * Performance events x86 architecture code 3 * 4 * Copyright (C) 2008 Thomas Gleixner <tglx@linutronix.de> 5 * Copyright (C) 2008-2009 Red Hat, Inc., Ingo Molnar 6 * Copyright (C) 2009 Jaswinder Singh Rajput 7 * Copyright (C) 2009 Advanced Micro Devices, Inc., Robert Richter 8 * Copyright (C) 2008-2009 Red Hat, Inc., Peter Zijlstra 9 * Copyright (C) 2009 Intel Corporation, <markus.t.metzger@intel.com> 10 * Copyright (C) 2009 Google, Inc., Stephane Eranian 11 * 12 * For licencing details see kernel-base/COPYING 13 */ 14 15 #include <linux/perf_event.h> 16 #include <linux/capability.h> 17 #include <linux/notifier.h> 18 #include <linux/hardirq.h> 19 #include <linux/kprobes.h> 20 #include <linux/module.h> 21 #include <linux/kdebug.h> 22 #include <linux/sched.h> 23 #include <linux/uaccess.h> 24 #include <linux/slab.h> 25 #include <linux/cpu.h> 26 #include <linux/bitops.h> 27 #include <linux/device.h> 28 29 #include <asm/apic.h> 30 #include <asm/stacktrace.h> 31 #include <asm/nmi.h> 32 #include <asm/smp.h> 33 #include <asm/alternative.h> 34 #include <asm/mmu_context.h> 35 #include <asm/tlbflush.h> 36 #include <asm/timer.h> 37 #include <asm/desc.h> 38 #include <asm/ldt.h> 39 40 #include "perf_event.h" 41 42 struct x86_pmu x86_pmu __read_mostly; 43 44 DEFINE_PER_CPU(struct cpu_hw_events, cpu_hw_events) = { 45 .enabled = 1, 46 }; 47 48 struct static_key rdpmc_always_available = STATIC_KEY_INIT_FALSE; 49 50 u64 __read_mostly hw_cache_event_ids 51 [PERF_COUNT_HW_CACHE_MAX] 52 [PERF_COUNT_HW_CACHE_OP_MAX] 53 [PERF_COUNT_HW_CACHE_RESULT_MAX]; 54 u64 __read_mostly hw_cache_extra_regs 55 [PERF_COUNT_HW_CACHE_MAX] 56 [PERF_COUNT_HW_CACHE_OP_MAX] 57 [PERF_COUNT_HW_CACHE_RESULT_MAX]; 58 59 /* 60 * Propagate event elapsed time into the generic event. 61 * Can only be executed on the CPU where the event is active. 62 * Returns the delta events processed. 63 */ 64 u64 x86_perf_event_update(struct perf_event *event) 65 { 66 struct hw_perf_event *hwc = &event->hw; 67 int shift = 64 - x86_pmu.cntval_bits; 68 u64 prev_raw_count, new_raw_count; 69 int idx = hwc->idx; 70 s64 delta; 71 72 if (idx == INTEL_PMC_IDX_FIXED_BTS) 73 return 0; 74 75 /* 76 * Careful: an NMI might modify the previous event value. 77 * 78 * Our tactic to handle this is to first atomically read and 79 * exchange a new raw count - then add that new-prev delta 80 * count to the generic event atomically: 81 */ 82 again: 83 prev_raw_count = local64_read(&hwc->prev_count); 84 rdpmcl(hwc->event_base_rdpmc, new_raw_count); 85 86 if (local64_cmpxchg(&hwc->prev_count, prev_raw_count, 87 new_raw_count) != prev_raw_count) 88 goto again; 89 90 /* 91 * Now we have the new raw value and have updated the prev 92 * timestamp already. We can now calculate the elapsed delta 93 * (event-)time and add that to the generic event. 94 * 95 * Careful, not all hw sign-extends above the physical width 96 * of the count. 97 */ 98 delta = (new_raw_count << shift) - (prev_raw_count << shift); 99 delta >>= shift; 100 101 local64_add(delta, &event->count); 102 local64_sub(delta, &hwc->period_left); 103 104 return new_raw_count; 105 } 106 107 /* 108 * Find and validate any extra registers to set up. 109 */ 110 static int x86_pmu_extra_regs(u64 config, struct perf_event *event) 111 { 112 struct hw_perf_event_extra *reg; 113 struct extra_reg *er; 114 115 reg = &event->hw.extra_reg; 116 117 if (!x86_pmu.extra_regs) 118 return 0; 119 120 for (er = x86_pmu.extra_regs; er->msr; er++) { 121 if (er->event != (config & er->config_mask)) 122 continue; 123 if (event->attr.config1 & ~er->valid_mask) 124 return -EINVAL; 125 /* Check if the extra msrs can be safely accessed*/ 126 if (!er->extra_msr_access) 127 return -ENXIO; 128 129 reg->idx = er->idx; 130 reg->config = event->attr.config1; 131 reg->reg = er->msr; 132 break; 133 } 134 return 0; 135 } 136 137 static atomic_t active_events; 138 static atomic_t pmc_refcount; 139 static DEFINE_MUTEX(pmc_reserve_mutex); 140 141 #ifdef CONFIG_X86_LOCAL_APIC 142 143 static bool reserve_pmc_hardware(void) 144 { 145 int i; 146 147 for (i = 0; i < x86_pmu.num_counters; i++) { 148 if (!reserve_perfctr_nmi(x86_pmu_event_addr(i))) 149 goto perfctr_fail; 150 } 151 152 for (i = 0; i < x86_pmu.num_counters; i++) { 153 if (!reserve_evntsel_nmi(x86_pmu_config_addr(i))) 154 goto eventsel_fail; 155 } 156 157 return true; 158 159 eventsel_fail: 160 for (i--; i >= 0; i--) 161 release_evntsel_nmi(x86_pmu_config_addr(i)); 162 163 i = x86_pmu.num_counters; 164 165 perfctr_fail: 166 for (i--; i >= 0; i--) 167 release_perfctr_nmi(x86_pmu_event_addr(i)); 168 169 return false; 170 } 171 172 static void release_pmc_hardware(void) 173 { 174 int i; 175 176 for (i = 0; i < x86_pmu.num_counters; i++) { 177 release_perfctr_nmi(x86_pmu_event_addr(i)); 178 release_evntsel_nmi(x86_pmu_config_addr(i)); 179 } 180 } 181 182 #else 183 184 static bool reserve_pmc_hardware(void) { return true; } 185 static void release_pmc_hardware(void) {} 186 187 #endif 188 189 static bool check_hw_exists(void) 190 { 191 u64 val, val_fail, val_new= ~0; 192 int i, reg, reg_fail, ret = 0; 193 int bios_fail = 0; 194 int reg_safe = -1; 195 196 /* 197 * Check to see if the BIOS enabled any of the counters, if so 198 * complain and bail. 199 */ 200 for (i = 0; i < x86_pmu.num_counters; i++) { 201 reg = x86_pmu_config_addr(i); 202 ret = rdmsrl_safe(reg, &val); 203 if (ret) 204 goto msr_fail; 205 if (val & ARCH_PERFMON_EVENTSEL_ENABLE) { 206 bios_fail = 1; 207 val_fail = val; 208 reg_fail = reg; 209 } else { 210 reg_safe = i; 211 } 212 } 213 214 if (x86_pmu.num_counters_fixed) { 215 reg = MSR_ARCH_PERFMON_FIXED_CTR_CTRL; 216 ret = rdmsrl_safe(reg, &val); 217 if (ret) 218 goto msr_fail; 219 for (i = 0; i < x86_pmu.num_counters_fixed; i++) { 220 if (val & (0x03 << i*4)) { 221 bios_fail = 1; 222 val_fail = val; 223 reg_fail = reg; 224 } 225 } 226 } 227 228 /* 229 * If all the counters are enabled, the below test will always 230 * fail. The tools will also become useless in this scenario. 231 * Just fail and disable the hardware counters. 232 */ 233 234 if (reg_safe == -1) { 235 reg = reg_safe; 236 goto msr_fail; 237 } 238 239 /* 240 * Read the current value, change it and read it back to see if it 241 * matches, this is needed to detect certain hardware emulators 242 * (qemu/kvm) that don't trap on the MSR access and always return 0s. 243 */ 244 reg = x86_pmu_event_addr(reg_safe); 245 if (rdmsrl_safe(reg, &val)) 246 goto msr_fail; 247 val ^= 0xffffUL; 248 ret = wrmsrl_safe(reg, val); 249 ret |= rdmsrl_safe(reg, &val_new); 250 if (ret || val != val_new) 251 goto msr_fail; 252 253 /* 254 * We still allow the PMU driver to operate: 255 */ 256 if (bios_fail) { 257 pr_cont("Broken BIOS detected, complain to your hardware vendor.\n"); 258 pr_err(FW_BUG "the BIOS has corrupted hw-PMU resources (MSR %x is %Lx)\n", 259 reg_fail, val_fail); 260 } 261 262 return true; 263 264 msr_fail: 265 pr_cont("Broken PMU hardware detected, using software events only.\n"); 266 pr_info("%sFailed to access perfctr msr (MSR %x is %Lx)\n", 267 boot_cpu_has(X86_FEATURE_HYPERVISOR) ? KERN_INFO : KERN_ERR, 268 reg, val_new); 269 270 return false; 271 } 272 273 static void hw_perf_event_destroy(struct perf_event *event) 274 { 275 x86_release_hardware(); 276 atomic_dec(&active_events); 277 } 278 279 void hw_perf_lbr_event_destroy(struct perf_event *event) 280 { 281 hw_perf_event_destroy(event); 282 283 /* undo the lbr/bts event accounting */ 284 x86_del_exclusive(x86_lbr_exclusive_lbr); 285 } 286 287 static inline int x86_pmu_initialized(void) 288 { 289 return x86_pmu.handle_irq != NULL; 290 } 291 292 static inline int 293 set_ext_hw_attr(struct hw_perf_event *hwc, struct perf_event *event) 294 { 295 struct perf_event_attr *attr = &event->attr; 296 unsigned int cache_type, cache_op, cache_result; 297 u64 config, val; 298 299 config = attr->config; 300 301 cache_type = (config >> 0) & 0xff; 302 if (cache_type >= PERF_COUNT_HW_CACHE_MAX) 303 return -EINVAL; 304 305 cache_op = (config >> 8) & 0xff; 306 if (cache_op >= PERF_COUNT_HW_CACHE_OP_MAX) 307 return -EINVAL; 308 309 cache_result = (config >> 16) & 0xff; 310 if (cache_result >= PERF_COUNT_HW_CACHE_RESULT_MAX) 311 return -EINVAL; 312 313 val = hw_cache_event_ids[cache_type][cache_op][cache_result]; 314 315 if (val == 0) 316 return -ENOENT; 317 318 if (val == -1) 319 return -EINVAL; 320 321 hwc->config |= val; 322 attr->config1 = hw_cache_extra_regs[cache_type][cache_op][cache_result]; 323 return x86_pmu_extra_regs(val, event); 324 } 325 326 int x86_reserve_hardware(void) 327 { 328 int err = 0; 329 330 if (!atomic_inc_not_zero(&pmc_refcount)) { 331 mutex_lock(&pmc_reserve_mutex); 332 if (atomic_read(&pmc_refcount) == 0) { 333 if (!reserve_pmc_hardware()) 334 err = -EBUSY; 335 else 336 reserve_ds_buffers(); 337 } 338 if (!err) 339 atomic_inc(&pmc_refcount); 340 mutex_unlock(&pmc_reserve_mutex); 341 } 342 343 return err; 344 } 345 346 void x86_release_hardware(void) 347 { 348 if (atomic_dec_and_mutex_lock(&pmc_refcount, &pmc_reserve_mutex)) { 349 release_pmc_hardware(); 350 release_ds_buffers(); 351 mutex_unlock(&pmc_reserve_mutex); 352 } 353 } 354 355 /* 356 * Check if we can create event of a certain type (that no conflicting events 357 * are present). 358 */ 359 int x86_add_exclusive(unsigned int what) 360 { 361 int i; 362 363 if (!atomic_inc_not_zero(&x86_pmu.lbr_exclusive[what])) { 364 mutex_lock(&pmc_reserve_mutex); 365 for (i = 0; i < ARRAY_SIZE(x86_pmu.lbr_exclusive); i++) { 366 if (i != what && atomic_read(&x86_pmu.lbr_exclusive[i])) 367 goto fail_unlock; 368 } 369 atomic_inc(&x86_pmu.lbr_exclusive[what]); 370 mutex_unlock(&pmc_reserve_mutex); 371 } 372 373 atomic_inc(&active_events); 374 return 0; 375 376 fail_unlock: 377 mutex_unlock(&pmc_reserve_mutex); 378 return -EBUSY; 379 } 380 381 void x86_del_exclusive(unsigned int what) 382 { 383 atomic_dec(&x86_pmu.lbr_exclusive[what]); 384 atomic_dec(&active_events); 385 } 386 387 int x86_setup_perfctr(struct perf_event *event) 388 { 389 struct perf_event_attr *attr = &event->attr; 390 struct hw_perf_event *hwc = &event->hw; 391 u64 config; 392 393 if (!is_sampling_event(event)) { 394 hwc->sample_period = x86_pmu.max_period; 395 hwc->last_period = hwc->sample_period; 396 local64_set(&hwc->period_left, hwc->sample_period); 397 } 398 399 if (attr->type == PERF_TYPE_RAW) 400 return x86_pmu_extra_regs(event->attr.config, event); 401 402 if (attr->type == PERF_TYPE_HW_CACHE) 403 return set_ext_hw_attr(hwc, event); 404 405 if (attr->config >= x86_pmu.max_events) 406 return -EINVAL; 407 408 /* 409 * The generic map: 410 */ 411 config = x86_pmu.event_map(attr->config); 412 413 if (config == 0) 414 return -ENOENT; 415 416 if (config == -1LL) 417 return -EINVAL; 418 419 /* 420 * Branch tracing: 421 */ 422 if (attr->config == PERF_COUNT_HW_BRANCH_INSTRUCTIONS && 423 !attr->freq && hwc->sample_period == 1) { 424 /* BTS is not supported by this architecture. */ 425 if (!x86_pmu.bts_active) 426 return -EOPNOTSUPP; 427 428 /* BTS is currently only allowed for user-mode. */ 429 if (!attr->exclude_kernel) 430 return -EOPNOTSUPP; 431 432 /* disallow bts if conflicting events are present */ 433 if (x86_add_exclusive(x86_lbr_exclusive_lbr)) 434 return -EBUSY; 435 436 event->destroy = hw_perf_lbr_event_destroy; 437 } 438 439 hwc->config |= config; 440 441 return 0; 442 } 443 444 /* 445 * check that branch_sample_type is compatible with 446 * settings needed for precise_ip > 1 which implies 447 * using the LBR to capture ALL taken branches at the 448 * priv levels of the measurement 449 */ 450 static inline int precise_br_compat(struct perf_event *event) 451 { 452 u64 m = event->attr.branch_sample_type; 453 u64 b = 0; 454 455 /* must capture all branches */ 456 if (!(m & PERF_SAMPLE_BRANCH_ANY)) 457 return 0; 458 459 m &= PERF_SAMPLE_BRANCH_KERNEL | PERF_SAMPLE_BRANCH_USER; 460 461 if (!event->attr.exclude_user) 462 b |= PERF_SAMPLE_BRANCH_USER; 463 464 if (!event->attr.exclude_kernel) 465 b |= PERF_SAMPLE_BRANCH_KERNEL; 466 467 /* 468 * ignore PERF_SAMPLE_BRANCH_HV, not supported on x86 469 */ 470 471 return m == b; 472 } 473 474 int x86_pmu_hw_config(struct perf_event *event) 475 { 476 if (event->attr.precise_ip) { 477 int precise = 0; 478 479 /* Support for constant skid */ 480 if (x86_pmu.pebs_active && !x86_pmu.pebs_broken) { 481 precise++; 482 483 /* Support for IP fixup */ 484 if (x86_pmu.lbr_nr || x86_pmu.intel_cap.pebs_format >= 2) 485 precise++; 486 487 if (x86_pmu.pebs_prec_dist) 488 precise++; 489 } 490 491 if (event->attr.precise_ip > precise) 492 return -EOPNOTSUPP; 493 } 494 /* 495 * check that PEBS LBR correction does not conflict with 496 * whatever the user is asking with attr->branch_sample_type 497 */ 498 if (event->attr.precise_ip > 1 && x86_pmu.intel_cap.pebs_format < 2) { 499 u64 *br_type = &event->attr.branch_sample_type; 500 501 if (has_branch_stack(event)) { 502 if (!precise_br_compat(event)) 503 return -EOPNOTSUPP; 504 505 /* branch_sample_type is compatible */ 506 507 } else { 508 /* 509 * user did not specify branch_sample_type 510 * 511 * For PEBS fixups, we capture all 512 * the branches at the priv level of the 513 * event. 514 */ 515 *br_type = PERF_SAMPLE_BRANCH_ANY; 516 517 if (!event->attr.exclude_user) 518 *br_type |= PERF_SAMPLE_BRANCH_USER; 519 520 if (!event->attr.exclude_kernel) 521 *br_type |= PERF_SAMPLE_BRANCH_KERNEL; 522 } 523 } 524 525 if (event->attr.branch_sample_type & PERF_SAMPLE_BRANCH_CALL_STACK) 526 event->attach_state |= PERF_ATTACH_TASK_DATA; 527 528 /* 529 * Generate PMC IRQs: 530 * (keep 'enabled' bit clear for now) 531 */ 532 event->hw.config = ARCH_PERFMON_EVENTSEL_INT; 533 534 /* 535 * Count user and OS events unless requested not to 536 */ 537 if (!event->attr.exclude_user) 538 event->hw.config |= ARCH_PERFMON_EVENTSEL_USR; 539 if (!event->attr.exclude_kernel) 540 event->hw.config |= ARCH_PERFMON_EVENTSEL_OS; 541 542 if (event->attr.type == PERF_TYPE_RAW) 543 event->hw.config |= event->attr.config & X86_RAW_EVENT_MASK; 544 545 if (event->attr.sample_period && x86_pmu.limit_period) { 546 if (x86_pmu.limit_period(event, event->attr.sample_period) > 547 event->attr.sample_period) 548 return -EINVAL; 549 } 550 551 return x86_setup_perfctr(event); 552 } 553 554 /* 555 * Setup the hardware configuration for a given attr_type 556 */ 557 static int __x86_pmu_event_init(struct perf_event *event) 558 { 559 int err; 560 561 if (!x86_pmu_initialized()) 562 return -ENODEV; 563 564 err = x86_reserve_hardware(); 565 if (err) 566 return err; 567 568 atomic_inc(&active_events); 569 event->destroy = hw_perf_event_destroy; 570 571 event->hw.idx = -1; 572 event->hw.last_cpu = -1; 573 event->hw.last_tag = ~0ULL; 574 575 /* mark unused */ 576 event->hw.extra_reg.idx = EXTRA_REG_NONE; 577 event->hw.branch_reg.idx = EXTRA_REG_NONE; 578 579 return x86_pmu.hw_config(event); 580 } 581 582 void x86_pmu_disable_all(void) 583 { 584 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); 585 int idx; 586 587 for (idx = 0; idx < x86_pmu.num_counters; idx++) { 588 u64 val; 589 590 if (!test_bit(idx, cpuc->active_mask)) 591 continue; 592 rdmsrl(x86_pmu_config_addr(idx), val); 593 if (!(val & ARCH_PERFMON_EVENTSEL_ENABLE)) 594 continue; 595 val &= ~ARCH_PERFMON_EVENTSEL_ENABLE; 596 wrmsrl(x86_pmu_config_addr(idx), val); 597 } 598 } 599 600 /* 601 * There may be PMI landing after enabled=0. The PMI hitting could be before or 602 * after disable_all. 603 * 604 * If PMI hits before disable_all, the PMU will be disabled in the NMI handler. 605 * It will not be re-enabled in the NMI handler again, because enabled=0. After 606 * handling the NMI, disable_all will be called, which will not change the 607 * state either. If PMI hits after disable_all, the PMU is already disabled 608 * before entering NMI handler. The NMI handler will not change the state 609 * either. 610 * 611 * So either situation is harmless. 612 */ 613 static void x86_pmu_disable(struct pmu *pmu) 614 { 615 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); 616 617 if (!x86_pmu_initialized()) 618 return; 619 620 if (!cpuc->enabled) 621 return; 622 623 cpuc->n_added = 0; 624 cpuc->enabled = 0; 625 barrier(); 626 627 x86_pmu.disable_all(); 628 } 629 630 void x86_pmu_enable_all(int added) 631 { 632 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); 633 int idx; 634 635 for (idx = 0; idx < x86_pmu.num_counters; idx++) { 636 struct hw_perf_event *hwc = &cpuc->events[idx]->hw; 637 638 if (!test_bit(idx, cpuc->active_mask)) 639 continue; 640 641 __x86_pmu_enable_event(hwc, ARCH_PERFMON_EVENTSEL_ENABLE); 642 } 643 } 644 645 static struct pmu pmu; 646 647 static inline int is_x86_event(struct perf_event *event) 648 { 649 return event->pmu == &pmu; 650 } 651 652 /* 653 * Event scheduler state: 654 * 655 * Assign events iterating over all events and counters, beginning 656 * with events with least weights first. Keep the current iterator 657 * state in struct sched_state. 658 */ 659 struct sched_state { 660 int weight; 661 int event; /* event index */ 662 int counter; /* counter index */ 663 int unassigned; /* number of events to be assigned left */ 664 int nr_gp; /* number of GP counters used */ 665 unsigned long used[BITS_TO_LONGS(X86_PMC_IDX_MAX)]; 666 }; 667 668 /* Total max is X86_PMC_IDX_MAX, but we are O(n!) limited */ 669 #define SCHED_STATES_MAX 2 670 671 struct perf_sched { 672 int max_weight; 673 int max_events; 674 int max_gp; 675 int saved_states; 676 struct event_constraint **constraints; 677 struct sched_state state; 678 struct sched_state saved[SCHED_STATES_MAX]; 679 }; 680 681 /* 682 * Initialize interator that runs through all events and counters. 683 */ 684 static void perf_sched_init(struct perf_sched *sched, struct event_constraint **constraints, 685 int num, int wmin, int wmax, int gpmax) 686 { 687 int idx; 688 689 memset(sched, 0, sizeof(*sched)); 690 sched->max_events = num; 691 sched->max_weight = wmax; 692 sched->max_gp = gpmax; 693 sched->constraints = constraints; 694 695 for (idx = 0; idx < num; idx++) { 696 if (constraints[idx]->weight == wmin) 697 break; 698 } 699 700 sched->state.event = idx; /* start with min weight */ 701 sched->state.weight = wmin; 702 sched->state.unassigned = num; 703 } 704 705 static void perf_sched_save_state(struct perf_sched *sched) 706 { 707 if (WARN_ON_ONCE(sched->saved_states >= SCHED_STATES_MAX)) 708 return; 709 710 sched->saved[sched->saved_states] = sched->state; 711 sched->saved_states++; 712 } 713 714 static bool perf_sched_restore_state(struct perf_sched *sched) 715 { 716 if (!sched->saved_states) 717 return false; 718 719 sched->saved_states--; 720 sched->state = sched->saved[sched->saved_states]; 721 722 /* continue with next counter: */ 723 clear_bit(sched->state.counter++, sched->state.used); 724 725 return true; 726 } 727 728 /* 729 * Select a counter for the current event to schedule. Return true on 730 * success. 731 */ 732 static bool __perf_sched_find_counter(struct perf_sched *sched) 733 { 734 struct event_constraint *c; 735 int idx; 736 737 if (!sched->state.unassigned) 738 return false; 739 740 if (sched->state.event >= sched->max_events) 741 return false; 742 743 c = sched->constraints[sched->state.event]; 744 /* Prefer fixed purpose counters */ 745 if (c->idxmsk64 & (~0ULL << INTEL_PMC_IDX_FIXED)) { 746 idx = INTEL_PMC_IDX_FIXED; 747 for_each_set_bit_from(idx, c->idxmsk, X86_PMC_IDX_MAX) { 748 if (!__test_and_set_bit(idx, sched->state.used)) 749 goto done; 750 } 751 } 752 753 /* Grab the first unused counter starting with idx */ 754 idx = sched->state.counter; 755 for_each_set_bit_from(idx, c->idxmsk, INTEL_PMC_IDX_FIXED) { 756 if (!__test_and_set_bit(idx, sched->state.used)) { 757 if (sched->state.nr_gp++ >= sched->max_gp) 758 return false; 759 760 goto done; 761 } 762 } 763 764 return false; 765 766 done: 767 sched->state.counter = idx; 768 769 if (c->overlap) 770 perf_sched_save_state(sched); 771 772 return true; 773 } 774 775 static bool perf_sched_find_counter(struct perf_sched *sched) 776 { 777 while (!__perf_sched_find_counter(sched)) { 778 if (!perf_sched_restore_state(sched)) 779 return false; 780 } 781 782 return true; 783 } 784 785 /* 786 * Go through all unassigned events and find the next one to schedule. 787 * Take events with the least weight first. Return true on success. 788 */ 789 static bool perf_sched_next_event(struct perf_sched *sched) 790 { 791 struct event_constraint *c; 792 793 if (!sched->state.unassigned || !--sched->state.unassigned) 794 return false; 795 796 do { 797 /* next event */ 798 sched->state.event++; 799 if (sched->state.event >= sched->max_events) { 800 /* next weight */ 801 sched->state.event = 0; 802 sched->state.weight++; 803 if (sched->state.weight > sched->max_weight) 804 return false; 805 } 806 c = sched->constraints[sched->state.event]; 807 } while (c->weight != sched->state.weight); 808 809 sched->state.counter = 0; /* start with first counter */ 810 811 return true; 812 } 813 814 /* 815 * Assign a counter for each event. 816 */ 817 int perf_assign_events(struct event_constraint **constraints, int n, 818 int wmin, int wmax, int gpmax, int *assign) 819 { 820 struct perf_sched sched; 821 822 perf_sched_init(&sched, constraints, n, wmin, wmax, gpmax); 823 824 do { 825 if (!perf_sched_find_counter(&sched)) 826 break; /* failed */ 827 if (assign) 828 assign[sched.state.event] = sched.state.counter; 829 } while (perf_sched_next_event(&sched)); 830 831 return sched.state.unassigned; 832 } 833 EXPORT_SYMBOL_GPL(perf_assign_events); 834 835 int x86_schedule_events(struct cpu_hw_events *cpuc, int n, int *assign) 836 { 837 struct event_constraint *c; 838 unsigned long used_mask[BITS_TO_LONGS(X86_PMC_IDX_MAX)]; 839 struct perf_event *e; 840 int i, wmin, wmax, unsched = 0; 841 struct hw_perf_event *hwc; 842 843 bitmap_zero(used_mask, X86_PMC_IDX_MAX); 844 845 if (x86_pmu.start_scheduling) 846 x86_pmu.start_scheduling(cpuc); 847 848 for (i = 0, wmin = X86_PMC_IDX_MAX, wmax = 0; i < n; i++) { 849 cpuc->event_constraint[i] = NULL; 850 c = x86_pmu.get_event_constraints(cpuc, i, cpuc->event_list[i]); 851 cpuc->event_constraint[i] = c; 852 853 wmin = min(wmin, c->weight); 854 wmax = max(wmax, c->weight); 855 } 856 857 /* 858 * fastpath, try to reuse previous register 859 */ 860 for (i = 0; i < n; i++) { 861 hwc = &cpuc->event_list[i]->hw; 862 c = cpuc->event_constraint[i]; 863 864 /* never assigned */ 865 if (hwc->idx == -1) 866 break; 867 868 /* constraint still honored */ 869 if (!test_bit(hwc->idx, c->idxmsk)) 870 break; 871 872 /* not already used */ 873 if (test_bit(hwc->idx, used_mask)) 874 break; 875 876 __set_bit(hwc->idx, used_mask); 877 if (assign) 878 assign[i] = hwc->idx; 879 } 880 881 /* slow path */ 882 if (i != n) { 883 int gpmax = x86_pmu.num_counters; 884 885 /* 886 * Do not allow scheduling of more than half the available 887 * generic counters. 888 * 889 * This helps avoid counter starvation of sibling thread by 890 * ensuring at most half the counters cannot be in exclusive 891 * mode. There is no designated counters for the limits. Any 892 * N/2 counters can be used. This helps with events with 893 * specific counter constraints. 894 */ 895 if (is_ht_workaround_enabled() && !cpuc->is_fake && 896 READ_ONCE(cpuc->excl_cntrs->exclusive_present)) 897 gpmax /= 2; 898 899 unsched = perf_assign_events(cpuc->event_constraint, n, wmin, 900 wmax, gpmax, assign); 901 } 902 903 /* 904 * In case of success (unsched = 0), mark events as committed, 905 * so we do not put_constraint() in case new events are added 906 * and fail to be scheduled 907 * 908 * We invoke the lower level commit callback to lock the resource 909 * 910 * We do not need to do all of this in case we are called to 911 * validate an event group (assign == NULL) 912 */ 913 if (!unsched && assign) { 914 for (i = 0; i < n; i++) { 915 e = cpuc->event_list[i]; 916 e->hw.flags |= PERF_X86_EVENT_COMMITTED; 917 if (x86_pmu.commit_scheduling) 918 x86_pmu.commit_scheduling(cpuc, i, assign[i]); 919 } 920 } else { 921 for (i = 0; i < n; i++) { 922 e = cpuc->event_list[i]; 923 /* 924 * do not put_constraint() on comitted events, 925 * because they are good to go 926 */ 927 if ((e->hw.flags & PERF_X86_EVENT_COMMITTED)) 928 continue; 929 930 /* 931 * release events that failed scheduling 932 */ 933 if (x86_pmu.put_event_constraints) 934 x86_pmu.put_event_constraints(cpuc, e); 935 } 936 } 937 938 if (x86_pmu.stop_scheduling) 939 x86_pmu.stop_scheduling(cpuc); 940 941 return unsched ? -EINVAL : 0; 942 } 943 944 /* 945 * dogrp: true if must collect siblings events (group) 946 * returns total number of events and error code 947 */ 948 static int collect_events(struct cpu_hw_events *cpuc, struct perf_event *leader, bool dogrp) 949 { 950 struct perf_event *event; 951 int n, max_count; 952 953 max_count = x86_pmu.num_counters + x86_pmu.num_counters_fixed; 954 955 /* current number of events already accepted */ 956 n = cpuc->n_events; 957 958 if (is_x86_event(leader)) { 959 if (n >= max_count) 960 return -EINVAL; 961 cpuc->event_list[n] = leader; 962 n++; 963 } 964 if (!dogrp) 965 return n; 966 967 list_for_each_entry(event, &leader->sibling_list, group_entry) { 968 if (!is_x86_event(event) || 969 event->state <= PERF_EVENT_STATE_OFF) 970 continue; 971 972 if (n >= max_count) 973 return -EINVAL; 974 975 cpuc->event_list[n] = event; 976 n++; 977 } 978 return n; 979 } 980 981 static inline void x86_assign_hw_event(struct perf_event *event, 982 struct cpu_hw_events *cpuc, int i) 983 { 984 struct hw_perf_event *hwc = &event->hw; 985 986 hwc->idx = cpuc->assign[i]; 987 hwc->last_cpu = smp_processor_id(); 988 hwc->last_tag = ++cpuc->tags[i]; 989 990 if (hwc->idx == INTEL_PMC_IDX_FIXED_BTS) { 991 hwc->config_base = 0; 992 hwc->event_base = 0; 993 } else if (hwc->idx >= INTEL_PMC_IDX_FIXED) { 994 hwc->config_base = MSR_ARCH_PERFMON_FIXED_CTR_CTRL; 995 hwc->event_base = MSR_ARCH_PERFMON_FIXED_CTR0 + (hwc->idx - INTEL_PMC_IDX_FIXED); 996 hwc->event_base_rdpmc = (hwc->idx - INTEL_PMC_IDX_FIXED) | 1<<30; 997 } else { 998 hwc->config_base = x86_pmu_config_addr(hwc->idx); 999 hwc->event_base = x86_pmu_event_addr(hwc->idx); 1000 hwc->event_base_rdpmc = x86_pmu_rdpmc_index(hwc->idx); 1001 } 1002 } 1003 1004 static inline int match_prev_assignment(struct hw_perf_event *hwc, 1005 struct cpu_hw_events *cpuc, 1006 int i) 1007 { 1008 return hwc->idx == cpuc->assign[i] && 1009 hwc->last_cpu == smp_processor_id() && 1010 hwc->last_tag == cpuc->tags[i]; 1011 } 1012 1013 static void x86_pmu_start(struct perf_event *event, int flags); 1014 1015 static void x86_pmu_enable(struct pmu *pmu) 1016 { 1017 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); 1018 struct perf_event *event; 1019 struct hw_perf_event *hwc; 1020 int i, added = cpuc->n_added; 1021 1022 if (!x86_pmu_initialized()) 1023 return; 1024 1025 if (cpuc->enabled) 1026 return; 1027 1028 if (cpuc->n_added) { 1029 int n_running = cpuc->n_events - cpuc->n_added; 1030 /* 1031 * apply assignment obtained either from 1032 * hw_perf_group_sched_in() or x86_pmu_enable() 1033 * 1034 * step1: save events moving to new counters 1035 */ 1036 for (i = 0; i < n_running; i++) { 1037 event = cpuc->event_list[i]; 1038 hwc = &event->hw; 1039 1040 /* 1041 * we can avoid reprogramming counter if: 1042 * - assigned same counter as last time 1043 * - running on same CPU as last time 1044 * - no other event has used the counter since 1045 */ 1046 if (hwc->idx == -1 || 1047 match_prev_assignment(hwc, cpuc, i)) 1048 continue; 1049 1050 /* 1051 * Ensure we don't accidentally enable a stopped 1052 * counter simply because we rescheduled. 1053 */ 1054 if (hwc->state & PERF_HES_STOPPED) 1055 hwc->state |= PERF_HES_ARCH; 1056 1057 x86_pmu_stop(event, PERF_EF_UPDATE); 1058 } 1059 1060 /* 1061 * step2: reprogram moved events into new counters 1062 */ 1063 for (i = 0; i < cpuc->n_events; i++) { 1064 event = cpuc->event_list[i]; 1065 hwc = &event->hw; 1066 1067 if (!match_prev_assignment(hwc, cpuc, i)) 1068 x86_assign_hw_event(event, cpuc, i); 1069 else if (i < n_running) 1070 continue; 1071 1072 if (hwc->state & PERF_HES_ARCH) 1073 continue; 1074 1075 x86_pmu_start(event, PERF_EF_RELOAD); 1076 } 1077 cpuc->n_added = 0; 1078 perf_events_lapic_init(); 1079 } 1080 1081 cpuc->enabled = 1; 1082 barrier(); 1083 1084 x86_pmu.enable_all(added); 1085 } 1086 1087 static DEFINE_PER_CPU(u64 [X86_PMC_IDX_MAX], pmc_prev_left); 1088 1089 /* 1090 * Set the next IRQ period, based on the hwc->period_left value. 1091 * To be called with the event disabled in hw: 1092 */ 1093 int x86_perf_event_set_period(struct perf_event *event) 1094 { 1095 struct hw_perf_event *hwc = &event->hw; 1096 s64 left = local64_read(&hwc->period_left); 1097 s64 period = hwc->sample_period; 1098 int ret = 0, idx = hwc->idx; 1099 1100 if (idx == INTEL_PMC_IDX_FIXED_BTS) 1101 return 0; 1102 1103 /* 1104 * If we are way outside a reasonable range then just skip forward: 1105 */ 1106 if (unlikely(left <= -period)) { 1107 left = period; 1108 local64_set(&hwc->period_left, left); 1109 hwc->last_period = period; 1110 ret = 1; 1111 } 1112 1113 if (unlikely(left <= 0)) { 1114 left += period; 1115 local64_set(&hwc->period_left, left); 1116 hwc->last_period = period; 1117 ret = 1; 1118 } 1119 /* 1120 * Quirk: certain CPUs dont like it if just 1 hw_event is left: 1121 */ 1122 if (unlikely(left < 2)) 1123 left = 2; 1124 1125 if (left > x86_pmu.max_period) 1126 left = x86_pmu.max_period; 1127 1128 if (x86_pmu.limit_period) 1129 left = x86_pmu.limit_period(event, left); 1130 1131 per_cpu(pmc_prev_left[idx], smp_processor_id()) = left; 1132 1133 if (!(hwc->flags & PERF_X86_EVENT_AUTO_RELOAD) || 1134 local64_read(&hwc->prev_count) != (u64)-left) { 1135 /* 1136 * The hw event starts counting from this event offset, 1137 * mark it to be able to extra future deltas: 1138 */ 1139 local64_set(&hwc->prev_count, (u64)-left); 1140 1141 wrmsrl(hwc->event_base, (u64)(-left) & x86_pmu.cntval_mask); 1142 } 1143 1144 /* 1145 * Due to erratum on certan cpu we need 1146 * a second write to be sure the register 1147 * is updated properly 1148 */ 1149 if (x86_pmu.perfctr_second_write) { 1150 wrmsrl(hwc->event_base, 1151 (u64)(-left) & x86_pmu.cntval_mask); 1152 } 1153 1154 perf_event_update_userpage(event); 1155 1156 return ret; 1157 } 1158 1159 void x86_pmu_enable_event(struct perf_event *event) 1160 { 1161 if (__this_cpu_read(cpu_hw_events.enabled)) 1162 __x86_pmu_enable_event(&event->hw, 1163 ARCH_PERFMON_EVENTSEL_ENABLE); 1164 } 1165 1166 /* 1167 * Add a single event to the PMU. 1168 * 1169 * The event is added to the group of enabled events 1170 * but only if it can be scehduled with existing events. 1171 */ 1172 static int x86_pmu_add(struct perf_event *event, int flags) 1173 { 1174 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); 1175 struct hw_perf_event *hwc; 1176 int assign[X86_PMC_IDX_MAX]; 1177 int n, n0, ret; 1178 1179 hwc = &event->hw; 1180 1181 n0 = cpuc->n_events; 1182 ret = n = collect_events(cpuc, event, false); 1183 if (ret < 0) 1184 goto out; 1185 1186 hwc->state = PERF_HES_UPTODATE | PERF_HES_STOPPED; 1187 if (!(flags & PERF_EF_START)) 1188 hwc->state |= PERF_HES_ARCH; 1189 1190 /* 1191 * If group events scheduling transaction was started, 1192 * skip the schedulability test here, it will be performed 1193 * at commit time (->commit_txn) as a whole. 1194 */ 1195 if (cpuc->txn_flags & PERF_PMU_TXN_ADD) 1196 goto done_collect; 1197 1198 ret = x86_pmu.schedule_events(cpuc, n, assign); 1199 if (ret) 1200 goto out; 1201 /* 1202 * copy new assignment, now we know it is possible 1203 * will be used by hw_perf_enable() 1204 */ 1205 memcpy(cpuc->assign, assign, n*sizeof(int)); 1206 1207 done_collect: 1208 /* 1209 * Commit the collect_events() state. See x86_pmu_del() and 1210 * x86_pmu_*_txn(). 1211 */ 1212 cpuc->n_events = n; 1213 cpuc->n_added += n - n0; 1214 cpuc->n_txn += n - n0; 1215 1216 ret = 0; 1217 out: 1218 return ret; 1219 } 1220 1221 static void x86_pmu_start(struct perf_event *event, int flags) 1222 { 1223 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); 1224 int idx = event->hw.idx; 1225 1226 if (WARN_ON_ONCE(!(event->hw.state & PERF_HES_STOPPED))) 1227 return; 1228 1229 if (WARN_ON_ONCE(idx == -1)) 1230 return; 1231 1232 if (flags & PERF_EF_RELOAD) { 1233 WARN_ON_ONCE(!(event->hw.state & PERF_HES_UPTODATE)); 1234 x86_perf_event_set_period(event); 1235 } 1236 1237 event->hw.state = 0; 1238 1239 cpuc->events[idx] = event; 1240 __set_bit(idx, cpuc->active_mask); 1241 __set_bit(idx, cpuc->running); 1242 x86_pmu.enable(event); 1243 perf_event_update_userpage(event); 1244 } 1245 1246 void perf_event_print_debug(void) 1247 { 1248 u64 ctrl, status, overflow, pmc_ctrl, pmc_count, prev_left, fixed; 1249 u64 pebs, debugctl; 1250 struct cpu_hw_events *cpuc; 1251 unsigned long flags; 1252 int cpu, idx; 1253 1254 if (!x86_pmu.num_counters) 1255 return; 1256 1257 local_irq_save(flags); 1258 1259 cpu = smp_processor_id(); 1260 cpuc = &per_cpu(cpu_hw_events, cpu); 1261 1262 if (x86_pmu.version >= 2) { 1263 rdmsrl(MSR_CORE_PERF_GLOBAL_CTRL, ctrl); 1264 rdmsrl(MSR_CORE_PERF_GLOBAL_STATUS, status); 1265 rdmsrl(MSR_CORE_PERF_GLOBAL_OVF_CTRL, overflow); 1266 rdmsrl(MSR_ARCH_PERFMON_FIXED_CTR_CTRL, fixed); 1267 1268 pr_info("\n"); 1269 pr_info("CPU#%d: ctrl: %016llx\n", cpu, ctrl); 1270 pr_info("CPU#%d: status: %016llx\n", cpu, status); 1271 pr_info("CPU#%d: overflow: %016llx\n", cpu, overflow); 1272 pr_info("CPU#%d: fixed: %016llx\n", cpu, fixed); 1273 if (x86_pmu.pebs_constraints) { 1274 rdmsrl(MSR_IA32_PEBS_ENABLE, pebs); 1275 pr_info("CPU#%d: pebs: %016llx\n", cpu, pebs); 1276 } 1277 if (x86_pmu.lbr_nr) { 1278 rdmsrl(MSR_IA32_DEBUGCTLMSR, debugctl); 1279 pr_info("CPU#%d: debugctl: %016llx\n", cpu, debugctl); 1280 } 1281 } 1282 pr_info("CPU#%d: active: %016llx\n", cpu, *(u64 *)cpuc->active_mask); 1283 1284 for (idx = 0; idx < x86_pmu.num_counters; idx++) { 1285 rdmsrl(x86_pmu_config_addr(idx), pmc_ctrl); 1286 rdmsrl(x86_pmu_event_addr(idx), pmc_count); 1287 1288 prev_left = per_cpu(pmc_prev_left[idx], cpu); 1289 1290 pr_info("CPU#%d: gen-PMC%d ctrl: %016llx\n", 1291 cpu, idx, pmc_ctrl); 1292 pr_info("CPU#%d: gen-PMC%d count: %016llx\n", 1293 cpu, idx, pmc_count); 1294 pr_info("CPU#%d: gen-PMC%d left: %016llx\n", 1295 cpu, idx, prev_left); 1296 } 1297 for (idx = 0; idx < x86_pmu.num_counters_fixed; idx++) { 1298 rdmsrl(MSR_ARCH_PERFMON_FIXED_CTR0 + idx, pmc_count); 1299 1300 pr_info("CPU#%d: fixed-PMC%d count: %016llx\n", 1301 cpu, idx, pmc_count); 1302 } 1303 local_irq_restore(flags); 1304 } 1305 1306 void x86_pmu_stop(struct perf_event *event, int flags) 1307 { 1308 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); 1309 struct hw_perf_event *hwc = &event->hw; 1310 1311 if (__test_and_clear_bit(hwc->idx, cpuc->active_mask)) { 1312 x86_pmu.disable(event); 1313 cpuc->events[hwc->idx] = NULL; 1314 WARN_ON_ONCE(hwc->state & PERF_HES_STOPPED); 1315 hwc->state |= PERF_HES_STOPPED; 1316 } 1317 1318 if ((flags & PERF_EF_UPDATE) && !(hwc->state & PERF_HES_UPTODATE)) { 1319 /* 1320 * Drain the remaining delta count out of a event 1321 * that we are disabling: 1322 */ 1323 x86_perf_event_update(event); 1324 hwc->state |= PERF_HES_UPTODATE; 1325 } 1326 } 1327 1328 static void x86_pmu_del(struct perf_event *event, int flags) 1329 { 1330 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); 1331 int i; 1332 1333 /* 1334 * event is descheduled 1335 */ 1336 event->hw.flags &= ~PERF_X86_EVENT_COMMITTED; 1337 1338 /* 1339 * If we're called during a txn, we don't need to do anything. 1340 * The events never got scheduled and ->cancel_txn will truncate 1341 * the event_list. 1342 * 1343 * XXX assumes any ->del() called during a TXN will only be on 1344 * an event added during that same TXN. 1345 */ 1346 if (cpuc->txn_flags & PERF_PMU_TXN_ADD) 1347 return; 1348 1349 /* 1350 * Not a TXN, therefore cleanup properly. 1351 */ 1352 x86_pmu_stop(event, PERF_EF_UPDATE); 1353 1354 for (i = 0; i < cpuc->n_events; i++) { 1355 if (event == cpuc->event_list[i]) 1356 break; 1357 } 1358 1359 if (WARN_ON_ONCE(i == cpuc->n_events)) /* called ->del() without ->add() ? */ 1360 return; 1361 1362 /* If we have a newly added event; make sure to decrease n_added. */ 1363 if (i >= cpuc->n_events - cpuc->n_added) 1364 --cpuc->n_added; 1365 1366 if (x86_pmu.put_event_constraints) 1367 x86_pmu.put_event_constraints(cpuc, event); 1368 1369 /* Delete the array entry. */ 1370 while (++i < cpuc->n_events) { 1371 cpuc->event_list[i-1] = cpuc->event_list[i]; 1372 cpuc->event_constraint[i-1] = cpuc->event_constraint[i]; 1373 } 1374 --cpuc->n_events; 1375 1376 perf_event_update_userpage(event); 1377 } 1378 1379 int x86_pmu_handle_irq(struct pt_regs *regs) 1380 { 1381 struct perf_sample_data data; 1382 struct cpu_hw_events *cpuc; 1383 struct perf_event *event; 1384 int idx, handled = 0; 1385 u64 val; 1386 1387 cpuc = this_cpu_ptr(&cpu_hw_events); 1388 1389 /* 1390 * Some chipsets need to unmask the LVTPC in a particular spot 1391 * inside the nmi handler. As a result, the unmasking was pushed 1392 * into all the nmi handlers. 1393 * 1394 * This generic handler doesn't seem to have any issues where the 1395 * unmasking occurs so it was left at the top. 1396 */ 1397 apic_write(APIC_LVTPC, APIC_DM_NMI); 1398 1399 for (idx = 0; idx < x86_pmu.num_counters; idx++) { 1400 if (!test_bit(idx, cpuc->active_mask)) { 1401 /* 1402 * Though we deactivated the counter some cpus 1403 * might still deliver spurious interrupts still 1404 * in flight. Catch them: 1405 */ 1406 if (__test_and_clear_bit(idx, cpuc->running)) 1407 handled++; 1408 continue; 1409 } 1410 1411 event = cpuc->events[idx]; 1412 1413 val = x86_perf_event_update(event); 1414 if (val & (1ULL << (x86_pmu.cntval_bits - 1))) 1415 continue; 1416 1417 /* 1418 * event overflow 1419 */ 1420 handled++; 1421 perf_sample_data_init(&data, 0, event->hw.last_period); 1422 1423 if (!x86_perf_event_set_period(event)) 1424 continue; 1425 1426 if (perf_event_overflow(event, &data, regs)) 1427 x86_pmu_stop(event, 0); 1428 } 1429 1430 if (handled) 1431 inc_irq_stat(apic_perf_irqs); 1432 1433 return handled; 1434 } 1435 1436 void perf_events_lapic_init(void) 1437 { 1438 if (!x86_pmu.apic || !x86_pmu_initialized()) 1439 return; 1440 1441 /* 1442 * Always use NMI for PMU 1443 */ 1444 apic_write(APIC_LVTPC, APIC_DM_NMI); 1445 } 1446 1447 static int 1448 perf_event_nmi_handler(unsigned int cmd, struct pt_regs *regs) 1449 { 1450 u64 start_clock; 1451 u64 finish_clock; 1452 int ret; 1453 1454 /* 1455 * All PMUs/events that share this PMI handler should make sure to 1456 * increment active_events for their events. 1457 */ 1458 if (!atomic_read(&active_events)) 1459 return NMI_DONE; 1460 1461 start_clock = sched_clock(); 1462 ret = x86_pmu.handle_irq(regs); 1463 finish_clock = sched_clock(); 1464 1465 perf_sample_event_took(finish_clock - start_clock); 1466 1467 return ret; 1468 } 1469 NOKPROBE_SYMBOL(perf_event_nmi_handler); 1470 1471 struct event_constraint emptyconstraint; 1472 struct event_constraint unconstrained; 1473 1474 static int 1475 x86_pmu_notifier(struct notifier_block *self, unsigned long action, void *hcpu) 1476 { 1477 unsigned int cpu = (long)hcpu; 1478 struct cpu_hw_events *cpuc = &per_cpu(cpu_hw_events, cpu); 1479 int i, ret = NOTIFY_OK; 1480 1481 switch (action & ~CPU_TASKS_FROZEN) { 1482 case CPU_UP_PREPARE: 1483 for (i = 0 ; i < X86_PERF_KFREE_MAX; i++) 1484 cpuc->kfree_on_online[i] = NULL; 1485 if (x86_pmu.cpu_prepare) 1486 ret = x86_pmu.cpu_prepare(cpu); 1487 break; 1488 1489 case CPU_STARTING: 1490 if (x86_pmu.cpu_starting) 1491 x86_pmu.cpu_starting(cpu); 1492 break; 1493 1494 case CPU_ONLINE: 1495 for (i = 0 ; i < X86_PERF_KFREE_MAX; i++) { 1496 kfree(cpuc->kfree_on_online[i]); 1497 cpuc->kfree_on_online[i] = NULL; 1498 } 1499 break; 1500 1501 case CPU_DYING: 1502 if (x86_pmu.cpu_dying) 1503 x86_pmu.cpu_dying(cpu); 1504 break; 1505 1506 case CPU_UP_CANCELED: 1507 case CPU_DEAD: 1508 if (x86_pmu.cpu_dead) 1509 x86_pmu.cpu_dead(cpu); 1510 break; 1511 1512 default: 1513 break; 1514 } 1515 1516 return ret; 1517 } 1518 1519 static void __init pmu_check_apic(void) 1520 { 1521 if (cpu_has_apic) 1522 return; 1523 1524 x86_pmu.apic = 0; 1525 pr_info("no APIC, boot with the \"lapic\" boot parameter to force-enable it.\n"); 1526 pr_info("no hardware sampling interrupt available.\n"); 1527 1528 /* 1529 * If we have a PMU initialized but no APIC 1530 * interrupts, we cannot sample hardware 1531 * events (user-space has to fall back and 1532 * sample via a hrtimer based software event): 1533 */ 1534 pmu.capabilities |= PERF_PMU_CAP_NO_INTERRUPT; 1535 1536 } 1537 1538 static struct attribute_group x86_pmu_format_group = { 1539 .name = "format", 1540 .attrs = NULL, 1541 }; 1542 1543 /* 1544 * Remove all undefined events (x86_pmu.event_map(id) == 0) 1545 * out of events_attr attributes. 1546 */ 1547 static void __init filter_events(struct attribute **attrs) 1548 { 1549 struct device_attribute *d; 1550 struct perf_pmu_events_attr *pmu_attr; 1551 int offset = 0; 1552 int i, j; 1553 1554 for (i = 0; attrs[i]; i++) { 1555 d = (struct device_attribute *)attrs[i]; 1556 pmu_attr = container_of(d, struct perf_pmu_events_attr, attr); 1557 /* str trumps id */ 1558 if (pmu_attr->event_str) 1559 continue; 1560 if (x86_pmu.event_map(i + offset)) 1561 continue; 1562 1563 for (j = i; attrs[j]; j++) 1564 attrs[j] = attrs[j + 1]; 1565 1566 /* Check the shifted attr. */ 1567 i--; 1568 1569 /* 1570 * event_map() is index based, the attrs array is organized 1571 * by increasing event index. If we shift the events, then 1572 * we need to compensate for the event_map(), otherwise 1573 * we are looking up the wrong event in the map 1574 */ 1575 offset++; 1576 } 1577 } 1578 1579 /* Merge two pointer arrays */ 1580 __init struct attribute **merge_attr(struct attribute **a, struct attribute **b) 1581 { 1582 struct attribute **new; 1583 int j, i; 1584 1585 for (j = 0; a[j]; j++) 1586 ; 1587 for (i = 0; b[i]; i++) 1588 j++; 1589 j++; 1590 1591 new = kmalloc(sizeof(struct attribute *) * j, GFP_KERNEL); 1592 if (!new) 1593 return NULL; 1594 1595 j = 0; 1596 for (i = 0; a[i]; i++) 1597 new[j++] = a[i]; 1598 for (i = 0; b[i]; i++) 1599 new[j++] = b[i]; 1600 new[j] = NULL; 1601 1602 return new; 1603 } 1604 1605 ssize_t events_sysfs_show(struct device *dev, struct device_attribute *attr, char *page) 1606 { 1607 struct perf_pmu_events_attr *pmu_attr = \ 1608 container_of(attr, struct perf_pmu_events_attr, attr); 1609 u64 config = x86_pmu.event_map(pmu_attr->id); 1610 1611 /* string trumps id */ 1612 if (pmu_attr->event_str) 1613 return sprintf(page, "%s", pmu_attr->event_str); 1614 1615 return x86_pmu.events_sysfs_show(page, config); 1616 } 1617 EXPORT_SYMBOL_GPL(events_sysfs_show); 1618 1619 EVENT_ATTR(cpu-cycles, CPU_CYCLES ); 1620 EVENT_ATTR(instructions, INSTRUCTIONS ); 1621 EVENT_ATTR(cache-references, CACHE_REFERENCES ); 1622 EVENT_ATTR(cache-misses, CACHE_MISSES ); 1623 EVENT_ATTR(branch-instructions, BRANCH_INSTRUCTIONS ); 1624 EVENT_ATTR(branch-misses, BRANCH_MISSES ); 1625 EVENT_ATTR(bus-cycles, BUS_CYCLES ); 1626 EVENT_ATTR(stalled-cycles-frontend, STALLED_CYCLES_FRONTEND ); 1627 EVENT_ATTR(stalled-cycles-backend, STALLED_CYCLES_BACKEND ); 1628 EVENT_ATTR(ref-cycles, REF_CPU_CYCLES ); 1629 1630 static struct attribute *empty_attrs; 1631 1632 static struct attribute *events_attr[] = { 1633 EVENT_PTR(CPU_CYCLES), 1634 EVENT_PTR(INSTRUCTIONS), 1635 EVENT_PTR(CACHE_REFERENCES), 1636 EVENT_PTR(CACHE_MISSES), 1637 EVENT_PTR(BRANCH_INSTRUCTIONS), 1638 EVENT_PTR(BRANCH_MISSES), 1639 EVENT_PTR(BUS_CYCLES), 1640 EVENT_PTR(STALLED_CYCLES_FRONTEND), 1641 EVENT_PTR(STALLED_CYCLES_BACKEND), 1642 EVENT_PTR(REF_CPU_CYCLES), 1643 NULL, 1644 }; 1645 1646 static struct attribute_group x86_pmu_events_group = { 1647 .name = "events", 1648 .attrs = events_attr, 1649 }; 1650 1651 ssize_t x86_event_sysfs_show(char *page, u64 config, u64 event) 1652 { 1653 u64 umask = (config & ARCH_PERFMON_EVENTSEL_UMASK) >> 8; 1654 u64 cmask = (config & ARCH_PERFMON_EVENTSEL_CMASK) >> 24; 1655 bool edge = (config & ARCH_PERFMON_EVENTSEL_EDGE); 1656 bool pc = (config & ARCH_PERFMON_EVENTSEL_PIN_CONTROL); 1657 bool any = (config & ARCH_PERFMON_EVENTSEL_ANY); 1658 bool inv = (config & ARCH_PERFMON_EVENTSEL_INV); 1659 ssize_t ret; 1660 1661 /* 1662 * We have whole page size to spend and just little data 1663 * to write, so we can safely use sprintf. 1664 */ 1665 ret = sprintf(page, "event=0x%02llx", event); 1666 1667 if (umask) 1668 ret += sprintf(page + ret, ",umask=0x%02llx", umask); 1669 1670 if (edge) 1671 ret += sprintf(page + ret, ",edge"); 1672 1673 if (pc) 1674 ret += sprintf(page + ret, ",pc"); 1675 1676 if (any) 1677 ret += sprintf(page + ret, ",any"); 1678 1679 if (inv) 1680 ret += sprintf(page + ret, ",inv"); 1681 1682 if (cmask) 1683 ret += sprintf(page + ret, ",cmask=0x%02llx", cmask); 1684 1685 ret += sprintf(page + ret, "\n"); 1686 1687 return ret; 1688 } 1689 1690 static int __init init_hw_perf_events(void) 1691 { 1692 struct x86_pmu_quirk *quirk; 1693 int err; 1694 1695 pr_info("Performance Events: "); 1696 1697 switch (boot_cpu_data.x86_vendor) { 1698 case X86_VENDOR_INTEL: 1699 err = intel_pmu_init(); 1700 break; 1701 case X86_VENDOR_AMD: 1702 err = amd_pmu_init(); 1703 break; 1704 default: 1705 err = -ENOTSUPP; 1706 } 1707 if (err != 0) { 1708 pr_cont("no PMU driver, software events only.\n"); 1709 return 0; 1710 } 1711 1712 pmu_check_apic(); 1713 1714 /* sanity check that the hardware exists or is emulated */ 1715 if (!check_hw_exists()) 1716 return 0; 1717 1718 pr_cont("%s PMU driver.\n", x86_pmu.name); 1719 1720 x86_pmu.attr_rdpmc = 1; /* enable userspace RDPMC usage by default */ 1721 1722 for (quirk = x86_pmu.quirks; quirk; quirk = quirk->next) 1723 quirk->func(); 1724 1725 if (!x86_pmu.intel_ctrl) 1726 x86_pmu.intel_ctrl = (1 << x86_pmu.num_counters) - 1; 1727 1728 perf_events_lapic_init(); 1729 register_nmi_handler(NMI_LOCAL, perf_event_nmi_handler, 0, "PMI"); 1730 1731 unconstrained = (struct event_constraint) 1732 __EVENT_CONSTRAINT(0, (1ULL << x86_pmu.num_counters) - 1, 1733 0, x86_pmu.num_counters, 0, 0); 1734 1735 x86_pmu_format_group.attrs = x86_pmu.format_attrs; 1736 1737 if (x86_pmu.event_attrs) 1738 x86_pmu_events_group.attrs = x86_pmu.event_attrs; 1739 1740 if (!x86_pmu.events_sysfs_show) 1741 x86_pmu_events_group.attrs = &empty_attrs; 1742 else 1743 filter_events(x86_pmu_events_group.attrs); 1744 1745 if (x86_pmu.cpu_events) { 1746 struct attribute **tmp; 1747 1748 tmp = merge_attr(x86_pmu_events_group.attrs, x86_pmu.cpu_events); 1749 if (!WARN_ON(!tmp)) 1750 x86_pmu_events_group.attrs = tmp; 1751 } 1752 1753 pr_info("... version: %d\n", x86_pmu.version); 1754 pr_info("... bit width: %d\n", x86_pmu.cntval_bits); 1755 pr_info("... generic registers: %d\n", x86_pmu.num_counters); 1756 pr_info("... value mask: %016Lx\n", x86_pmu.cntval_mask); 1757 pr_info("... max period: %016Lx\n", x86_pmu.max_period); 1758 pr_info("... fixed-purpose events: %d\n", x86_pmu.num_counters_fixed); 1759 pr_info("... event mask: %016Lx\n", x86_pmu.intel_ctrl); 1760 1761 perf_pmu_register(&pmu, "cpu", PERF_TYPE_RAW); 1762 perf_cpu_notifier(x86_pmu_notifier); 1763 1764 return 0; 1765 } 1766 early_initcall(init_hw_perf_events); 1767 1768 static inline void x86_pmu_read(struct perf_event *event) 1769 { 1770 x86_perf_event_update(event); 1771 } 1772 1773 /* 1774 * Start group events scheduling transaction 1775 * Set the flag to make pmu::enable() not perform the 1776 * schedulability test, it will be performed at commit time 1777 * 1778 * We only support PERF_PMU_TXN_ADD transactions. Save the 1779 * transaction flags but otherwise ignore non-PERF_PMU_TXN_ADD 1780 * transactions. 1781 */ 1782 static void x86_pmu_start_txn(struct pmu *pmu, unsigned int txn_flags) 1783 { 1784 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); 1785 1786 WARN_ON_ONCE(cpuc->txn_flags); /* txn already in flight */ 1787 1788 cpuc->txn_flags = txn_flags; 1789 if (txn_flags & ~PERF_PMU_TXN_ADD) 1790 return; 1791 1792 perf_pmu_disable(pmu); 1793 __this_cpu_write(cpu_hw_events.n_txn, 0); 1794 } 1795 1796 /* 1797 * Stop group events scheduling transaction 1798 * Clear the flag and pmu::enable() will perform the 1799 * schedulability test. 1800 */ 1801 static void x86_pmu_cancel_txn(struct pmu *pmu) 1802 { 1803 unsigned int txn_flags; 1804 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); 1805 1806 WARN_ON_ONCE(!cpuc->txn_flags); /* no txn in flight */ 1807 1808 txn_flags = cpuc->txn_flags; 1809 cpuc->txn_flags = 0; 1810 if (txn_flags & ~PERF_PMU_TXN_ADD) 1811 return; 1812 1813 /* 1814 * Truncate collected array by the number of events added in this 1815 * transaction. See x86_pmu_add() and x86_pmu_*_txn(). 1816 */ 1817 __this_cpu_sub(cpu_hw_events.n_added, __this_cpu_read(cpu_hw_events.n_txn)); 1818 __this_cpu_sub(cpu_hw_events.n_events, __this_cpu_read(cpu_hw_events.n_txn)); 1819 perf_pmu_enable(pmu); 1820 } 1821 1822 /* 1823 * Commit group events scheduling transaction 1824 * Perform the group schedulability test as a whole 1825 * Return 0 if success 1826 * 1827 * Does not cancel the transaction on failure; expects the caller to do this. 1828 */ 1829 static int x86_pmu_commit_txn(struct pmu *pmu) 1830 { 1831 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); 1832 int assign[X86_PMC_IDX_MAX]; 1833 int n, ret; 1834 1835 WARN_ON_ONCE(!cpuc->txn_flags); /* no txn in flight */ 1836 1837 if (cpuc->txn_flags & ~PERF_PMU_TXN_ADD) { 1838 cpuc->txn_flags = 0; 1839 return 0; 1840 } 1841 1842 n = cpuc->n_events; 1843 1844 if (!x86_pmu_initialized()) 1845 return -EAGAIN; 1846 1847 ret = x86_pmu.schedule_events(cpuc, n, assign); 1848 if (ret) 1849 return ret; 1850 1851 /* 1852 * copy new assignment, now we know it is possible 1853 * will be used by hw_perf_enable() 1854 */ 1855 memcpy(cpuc->assign, assign, n*sizeof(int)); 1856 1857 cpuc->txn_flags = 0; 1858 perf_pmu_enable(pmu); 1859 return 0; 1860 } 1861 /* 1862 * a fake_cpuc is used to validate event groups. Due to 1863 * the extra reg logic, we need to also allocate a fake 1864 * per_core and per_cpu structure. Otherwise, group events 1865 * using extra reg may conflict without the kernel being 1866 * able to catch this when the last event gets added to 1867 * the group. 1868 */ 1869 static void free_fake_cpuc(struct cpu_hw_events *cpuc) 1870 { 1871 kfree(cpuc->shared_regs); 1872 kfree(cpuc); 1873 } 1874 1875 static struct cpu_hw_events *allocate_fake_cpuc(void) 1876 { 1877 struct cpu_hw_events *cpuc; 1878 int cpu = raw_smp_processor_id(); 1879 1880 cpuc = kzalloc(sizeof(*cpuc), GFP_KERNEL); 1881 if (!cpuc) 1882 return ERR_PTR(-ENOMEM); 1883 1884 /* only needed, if we have extra_regs */ 1885 if (x86_pmu.extra_regs) { 1886 cpuc->shared_regs = allocate_shared_regs(cpu); 1887 if (!cpuc->shared_regs) 1888 goto error; 1889 } 1890 cpuc->is_fake = 1; 1891 return cpuc; 1892 error: 1893 free_fake_cpuc(cpuc); 1894 return ERR_PTR(-ENOMEM); 1895 } 1896 1897 /* 1898 * validate that we can schedule this event 1899 */ 1900 static int validate_event(struct perf_event *event) 1901 { 1902 struct cpu_hw_events *fake_cpuc; 1903 struct event_constraint *c; 1904 int ret = 0; 1905 1906 fake_cpuc = allocate_fake_cpuc(); 1907 if (IS_ERR(fake_cpuc)) 1908 return PTR_ERR(fake_cpuc); 1909 1910 c = x86_pmu.get_event_constraints(fake_cpuc, -1, event); 1911 1912 if (!c || !c->weight) 1913 ret = -EINVAL; 1914 1915 if (x86_pmu.put_event_constraints) 1916 x86_pmu.put_event_constraints(fake_cpuc, event); 1917 1918 free_fake_cpuc(fake_cpuc); 1919 1920 return ret; 1921 } 1922 1923 /* 1924 * validate a single event group 1925 * 1926 * validation include: 1927 * - check events are compatible which each other 1928 * - events do not compete for the same counter 1929 * - number of events <= number of counters 1930 * 1931 * validation ensures the group can be loaded onto the 1932 * PMU if it was the only group available. 1933 */ 1934 static int validate_group(struct perf_event *event) 1935 { 1936 struct perf_event *leader = event->group_leader; 1937 struct cpu_hw_events *fake_cpuc; 1938 int ret = -EINVAL, n; 1939 1940 fake_cpuc = allocate_fake_cpuc(); 1941 if (IS_ERR(fake_cpuc)) 1942 return PTR_ERR(fake_cpuc); 1943 /* 1944 * the event is not yet connected with its 1945 * siblings therefore we must first collect 1946 * existing siblings, then add the new event 1947 * before we can simulate the scheduling 1948 */ 1949 n = collect_events(fake_cpuc, leader, true); 1950 if (n < 0) 1951 goto out; 1952 1953 fake_cpuc->n_events = n; 1954 n = collect_events(fake_cpuc, event, false); 1955 if (n < 0) 1956 goto out; 1957 1958 fake_cpuc->n_events = n; 1959 1960 ret = x86_pmu.schedule_events(fake_cpuc, n, NULL); 1961 1962 out: 1963 free_fake_cpuc(fake_cpuc); 1964 return ret; 1965 } 1966 1967 static int x86_pmu_event_init(struct perf_event *event) 1968 { 1969 struct pmu *tmp; 1970 int err; 1971 1972 switch (event->attr.type) { 1973 case PERF_TYPE_RAW: 1974 case PERF_TYPE_HARDWARE: 1975 case PERF_TYPE_HW_CACHE: 1976 break; 1977 1978 default: 1979 return -ENOENT; 1980 } 1981 1982 err = __x86_pmu_event_init(event); 1983 if (!err) { 1984 /* 1985 * we temporarily connect event to its pmu 1986 * such that validate_group() can classify 1987 * it as an x86 event using is_x86_event() 1988 */ 1989 tmp = event->pmu; 1990 event->pmu = &pmu; 1991 1992 if (event->group_leader != event) 1993 err = validate_group(event); 1994 else 1995 err = validate_event(event); 1996 1997 event->pmu = tmp; 1998 } 1999 if (err) { 2000 if (event->destroy) 2001 event->destroy(event); 2002 } 2003 2004 if (ACCESS_ONCE(x86_pmu.attr_rdpmc)) 2005 event->hw.flags |= PERF_X86_EVENT_RDPMC_ALLOWED; 2006 2007 return err; 2008 } 2009 2010 static void refresh_pce(void *ignored) 2011 { 2012 if (current->mm) 2013 load_mm_cr4(current->mm); 2014 } 2015 2016 static void x86_pmu_event_mapped(struct perf_event *event) 2017 { 2018 if (!(event->hw.flags & PERF_X86_EVENT_RDPMC_ALLOWED)) 2019 return; 2020 2021 if (atomic_inc_return(¤t->mm->context.perf_rdpmc_allowed) == 1) 2022 on_each_cpu_mask(mm_cpumask(current->mm), refresh_pce, NULL, 1); 2023 } 2024 2025 static void x86_pmu_event_unmapped(struct perf_event *event) 2026 { 2027 if (!current->mm) 2028 return; 2029 2030 if (!(event->hw.flags & PERF_X86_EVENT_RDPMC_ALLOWED)) 2031 return; 2032 2033 if (atomic_dec_and_test(¤t->mm->context.perf_rdpmc_allowed)) 2034 on_each_cpu_mask(mm_cpumask(current->mm), refresh_pce, NULL, 1); 2035 } 2036 2037 static int x86_pmu_event_idx(struct perf_event *event) 2038 { 2039 int idx = event->hw.idx; 2040 2041 if (!(event->hw.flags & PERF_X86_EVENT_RDPMC_ALLOWED)) 2042 return 0; 2043 2044 if (x86_pmu.num_counters_fixed && idx >= INTEL_PMC_IDX_FIXED) { 2045 idx -= INTEL_PMC_IDX_FIXED; 2046 idx |= 1 << 30; 2047 } 2048 2049 return idx + 1; 2050 } 2051 2052 static ssize_t get_attr_rdpmc(struct device *cdev, 2053 struct device_attribute *attr, 2054 char *buf) 2055 { 2056 return snprintf(buf, 40, "%d\n", x86_pmu.attr_rdpmc); 2057 } 2058 2059 static ssize_t set_attr_rdpmc(struct device *cdev, 2060 struct device_attribute *attr, 2061 const char *buf, size_t count) 2062 { 2063 unsigned long val; 2064 ssize_t ret; 2065 2066 ret = kstrtoul(buf, 0, &val); 2067 if (ret) 2068 return ret; 2069 2070 if (val > 2) 2071 return -EINVAL; 2072 2073 if (x86_pmu.attr_rdpmc_broken) 2074 return -ENOTSUPP; 2075 2076 if ((val == 2) != (x86_pmu.attr_rdpmc == 2)) { 2077 /* 2078 * Changing into or out of always available, aka 2079 * perf-event-bypassing mode. This path is extremely slow, 2080 * but only root can trigger it, so it's okay. 2081 */ 2082 if (val == 2) 2083 static_key_slow_inc(&rdpmc_always_available); 2084 else 2085 static_key_slow_dec(&rdpmc_always_available); 2086 on_each_cpu(refresh_pce, NULL, 1); 2087 } 2088 2089 x86_pmu.attr_rdpmc = val; 2090 2091 return count; 2092 } 2093 2094 static DEVICE_ATTR(rdpmc, S_IRUSR | S_IWUSR, get_attr_rdpmc, set_attr_rdpmc); 2095 2096 static struct attribute *x86_pmu_attrs[] = { 2097 &dev_attr_rdpmc.attr, 2098 NULL, 2099 }; 2100 2101 static struct attribute_group x86_pmu_attr_group = { 2102 .attrs = x86_pmu_attrs, 2103 }; 2104 2105 static const struct attribute_group *x86_pmu_attr_groups[] = { 2106 &x86_pmu_attr_group, 2107 &x86_pmu_format_group, 2108 &x86_pmu_events_group, 2109 NULL, 2110 }; 2111 2112 static void x86_pmu_sched_task(struct perf_event_context *ctx, bool sched_in) 2113 { 2114 if (x86_pmu.sched_task) 2115 x86_pmu.sched_task(ctx, sched_in); 2116 } 2117 2118 void perf_check_microcode(void) 2119 { 2120 if (x86_pmu.check_microcode) 2121 x86_pmu.check_microcode(); 2122 } 2123 EXPORT_SYMBOL_GPL(perf_check_microcode); 2124 2125 static struct pmu pmu = { 2126 .pmu_enable = x86_pmu_enable, 2127 .pmu_disable = x86_pmu_disable, 2128 2129 .attr_groups = x86_pmu_attr_groups, 2130 2131 .event_init = x86_pmu_event_init, 2132 2133 .event_mapped = x86_pmu_event_mapped, 2134 .event_unmapped = x86_pmu_event_unmapped, 2135 2136 .add = x86_pmu_add, 2137 .del = x86_pmu_del, 2138 .start = x86_pmu_start, 2139 .stop = x86_pmu_stop, 2140 .read = x86_pmu_read, 2141 2142 .start_txn = x86_pmu_start_txn, 2143 .cancel_txn = x86_pmu_cancel_txn, 2144 .commit_txn = x86_pmu_commit_txn, 2145 2146 .event_idx = x86_pmu_event_idx, 2147 .sched_task = x86_pmu_sched_task, 2148 .task_ctx_size = sizeof(struct x86_perf_task_context), 2149 }; 2150 2151 void arch_perf_update_userpage(struct perf_event *event, 2152 struct perf_event_mmap_page *userpg, u64 now) 2153 { 2154 struct cyc2ns_data *data; 2155 2156 userpg->cap_user_time = 0; 2157 userpg->cap_user_time_zero = 0; 2158 userpg->cap_user_rdpmc = 2159 !!(event->hw.flags & PERF_X86_EVENT_RDPMC_ALLOWED); 2160 userpg->pmc_width = x86_pmu.cntval_bits; 2161 2162 if (!sched_clock_stable()) 2163 return; 2164 2165 data = cyc2ns_read_begin(); 2166 2167 /* 2168 * Internal timekeeping for enabled/running/stopped times 2169 * is always in the local_clock domain. 2170 */ 2171 userpg->cap_user_time = 1; 2172 userpg->time_mult = data->cyc2ns_mul; 2173 userpg->time_shift = data->cyc2ns_shift; 2174 userpg->time_offset = data->cyc2ns_offset - now; 2175 2176 /* 2177 * cap_user_time_zero doesn't make sense when we're using a different 2178 * time base for the records. 2179 */ 2180 if (event->clock == &local_clock) { 2181 userpg->cap_user_time_zero = 1; 2182 userpg->time_zero = data->cyc2ns_offset; 2183 } 2184 2185 cyc2ns_read_end(data); 2186 } 2187 2188 /* 2189 * callchain support 2190 */ 2191 2192 static int backtrace_stack(void *data, char *name) 2193 { 2194 return 0; 2195 } 2196 2197 static int backtrace_address(void *data, unsigned long addr, int reliable) 2198 { 2199 struct perf_callchain_entry *entry = data; 2200 2201 return perf_callchain_store(entry, addr); 2202 } 2203 2204 static const struct stacktrace_ops backtrace_ops = { 2205 .stack = backtrace_stack, 2206 .address = backtrace_address, 2207 .walk_stack = print_context_stack_bp, 2208 }; 2209 2210 void 2211 perf_callchain_kernel(struct perf_callchain_entry *entry, struct pt_regs *regs) 2212 { 2213 if (perf_guest_cbs && perf_guest_cbs->is_in_guest()) { 2214 /* TODO: We don't support guest os callchain now */ 2215 return; 2216 } 2217 2218 perf_callchain_store(entry, regs->ip); 2219 2220 dump_trace(NULL, regs, NULL, 0, &backtrace_ops, entry); 2221 } 2222 2223 static inline int 2224 valid_user_frame(const void __user *fp, unsigned long size) 2225 { 2226 return (__range_not_ok(fp, size, TASK_SIZE) == 0); 2227 } 2228 2229 static unsigned long get_segment_base(unsigned int segment) 2230 { 2231 struct desc_struct *desc; 2232 int idx = segment >> 3; 2233 2234 if ((segment & SEGMENT_TI_MASK) == SEGMENT_LDT) { 2235 #ifdef CONFIG_MODIFY_LDT_SYSCALL 2236 struct ldt_struct *ldt; 2237 2238 if (idx > LDT_ENTRIES) 2239 return 0; 2240 2241 /* IRQs are off, so this synchronizes with smp_store_release */ 2242 ldt = lockless_dereference(current->active_mm->context.ldt); 2243 if (!ldt || idx > ldt->size) 2244 return 0; 2245 2246 desc = &ldt->entries[idx]; 2247 #else 2248 return 0; 2249 #endif 2250 } else { 2251 if (idx > GDT_ENTRIES) 2252 return 0; 2253 2254 desc = raw_cpu_ptr(gdt_page.gdt) + idx; 2255 } 2256 2257 return get_desc_base(desc); 2258 } 2259 2260 #ifdef CONFIG_IA32_EMULATION 2261 2262 #include <asm/compat.h> 2263 2264 static inline int 2265 perf_callchain_user32(struct pt_regs *regs, struct perf_callchain_entry *entry) 2266 { 2267 /* 32-bit process in 64-bit kernel. */ 2268 unsigned long ss_base, cs_base; 2269 struct stack_frame_ia32 frame; 2270 const void __user *fp; 2271 2272 if (!test_thread_flag(TIF_IA32)) 2273 return 0; 2274 2275 cs_base = get_segment_base(regs->cs); 2276 ss_base = get_segment_base(regs->ss); 2277 2278 fp = compat_ptr(ss_base + regs->bp); 2279 pagefault_disable(); 2280 while (entry->nr < PERF_MAX_STACK_DEPTH) { 2281 unsigned long bytes; 2282 frame.next_frame = 0; 2283 frame.return_address = 0; 2284 2285 if (!access_ok(VERIFY_READ, fp, 8)) 2286 break; 2287 2288 bytes = __copy_from_user_nmi(&frame.next_frame, fp, 4); 2289 if (bytes != 0) 2290 break; 2291 bytes = __copy_from_user_nmi(&frame.return_address, fp+4, 4); 2292 if (bytes != 0) 2293 break; 2294 2295 if (!valid_user_frame(fp, sizeof(frame))) 2296 break; 2297 2298 perf_callchain_store(entry, cs_base + frame.return_address); 2299 fp = compat_ptr(ss_base + frame.next_frame); 2300 } 2301 pagefault_enable(); 2302 return 1; 2303 } 2304 #else 2305 static inline int 2306 perf_callchain_user32(struct pt_regs *regs, struct perf_callchain_entry *entry) 2307 { 2308 return 0; 2309 } 2310 #endif 2311 2312 void 2313 perf_callchain_user(struct perf_callchain_entry *entry, struct pt_regs *regs) 2314 { 2315 struct stack_frame frame; 2316 const void __user *fp; 2317 2318 if (perf_guest_cbs && perf_guest_cbs->is_in_guest()) { 2319 /* TODO: We don't support guest os callchain now */ 2320 return; 2321 } 2322 2323 /* 2324 * We don't know what to do with VM86 stacks.. ignore them for now. 2325 */ 2326 if (regs->flags & (X86_VM_MASK | PERF_EFLAGS_VM)) 2327 return; 2328 2329 fp = (void __user *)regs->bp; 2330 2331 perf_callchain_store(entry, regs->ip); 2332 2333 if (!current->mm) 2334 return; 2335 2336 if (perf_callchain_user32(regs, entry)) 2337 return; 2338 2339 pagefault_disable(); 2340 while (entry->nr < PERF_MAX_STACK_DEPTH) { 2341 unsigned long bytes; 2342 frame.next_frame = NULL; 2343 frame.return_address = 0; 2344 2345 if (!access_ok(VERIFY_READ, fp, 16)) 2346 break; 2347 2348 bytes = __copy_from_user_nmi(&frame.next_frame, fp, 8); 2349 if (bytes != 0) 2350 break; 2351 bytes = __copy_from_user_nmi(&frame.return_address, fp+8, 8); 2352 if (bytes != 0) 2353 break; 2354 2355 if (!valid_user_frame(fp, sizeof(frame))) 2356 break; 2357 2358 perf_callchain_store(entry, frame.return_address); 2359 fp = (void __user *)frame.next_frame; 2360 } 2361 pagefault_enable(); 2362 } 2363 2364 /* 2365 * Deal with code segment offsets for the various execution modes: 2366 * 2367 * VM86 - the good olde 16 bit days, where the linear address is 2368 * 20 bits and we use regs->ip + 0x10 * regs->cs. 2369 * 2370 * IA32 - Where we need to look at GDT/LDT segment descriptor tables 2371 * to figure out what the 32bit base address is. 2372 * 2373 * X32 - has TIF_X32 set, but is running in x86_64 2374 * 2375 * X86_64 - CS,DS,SS,ES are all zero based. 2376 */ 2377 static unsigned long code_segment_base(struct pt_regs *regs) 2378 { 2379 /* 2380 * For IA32 we look at the GDT/LDT segment base to convert the 2381 * effective IP to a linear address. 2382 */ 2383 2384 #ifdef CONFIG_X86_32 2385 /* 2386 * If we are in VM86 mode, add the segment offset to convert to a 2387 * linear address. 2388 */ 2389 if (regs->flags & X86_VM_MASK) 2390 return 0x10 * regs->cs; 2391 2392 if (user_mode(regs) && regs->cs != __USER_CS) 2393 return get_segment_base(regs->cs); 2394 #else 2395 if (user_mode(regs) && !user_64bit_mode(regs) && 2396 regs->cs != __USER32_CS) 2397 return get_segment_base(regs->cs); 2398 #endif 2399 return 0; 2400 } 2401 2402 unsigned long perf_instruction_pointer(struct pt_regs *regs) 2403 { 2404 if (perf_guest_cbs && perf_guest_cbs->is_in_guest()) 2405 return perf_guest_cbs->get_guest_ip(); 2406 2407 return regs->ip + code_segment_base(regs); 2408 } 2409 2410 unsigned long perf_misc_flags(struct pt_regs *regs) 2411 { 2412 int misc = 0; 2413 2414 if (perf_guest_cbs && perf_guest_cbs->is_in_guest()) { 2415 if (perf_guest_cbs->is_user_mode()) 2416 misc |= PERF_RECORD_MISC_GUEST_USER; 2417 else 2418 misc |= PERF_RECORD_MISC_GUEST_KERNEL; 2419 } else { 2420 if (user_mode(regs)) 2421 misc |= PERF_RECORD_MISC_USER; 2422 else 2423 misc |= PERF_RECORD_MISC_KERNEL; 2424 } 2425 2426 if (regs->flags & PERF_EFLAGS_EXACT) 2427 misc |= PERF_RECORD_MISC_EXACT_IP; 2428 2429 return misc; 2430 } 2431 2432 void perf_get_x86_pmu_capability(struct x86_pmu_capability *cap) 2433 { 2434 cap->version = x86_pmu.version; 2435 cap->num_counters_gp = x86_pmu.num_counters; 2436 cap->num_counters_fixed = x86_pmu.num_counters_fixed; 2437 cap->bit_width_gp = x86_pmu.cntval_bits; 2438 cap->bit_width_fixed = x86_pmu.cntval_bits; 2439 cap->events_mask = (unsigned int)x86_pmu.events_maskl; 2440 cap->events_mask_len = x86_pmu.events_mask_len; 2441 } 2442 EXPORT_SYMBOL_GPL(perf_get_x86_pmu_capability); 2443