1 #undef DEBUG 2 3 /* 4 * ARM performance counter support. 5 * 6 * Copyright (C) 2009 picoChip Designs, Ltd., Jamie Iles 7 * Copyright (C) 2010 ARM Ltd., Will Deacon <will.deacon@arm.com> 8 * 9 * This code is based on the sparc64 perf event code, which is in turn based 10 * on the x86 code. 11 */ 12 #define pr_fmt(fmt) "hw perfevents: " fmt 13 14 #include <linux/bitmap.h> 15 #include <linux/cpumask.h> 16 #include <linux/cpu_pm.h> 17 #include <linux/export.h> 18 #include <linux/kernel.h> 19 #include <linux/perf/arm_pmu.h> 20 #include <linux/platform_device.h> 21 #include <linux/slab.h> 22 #include <linux/sched/clock.h> 23 #include <linux/spinlock.h> 24 #include <linux/irq.h> 25 #include <linux/irqdesc.h> 26 27 #include <asm/irq_regs.h> 28 29 static int 30 armpmu_map_cache_event(const unsigned (*cache_map) 31 [PERF_COUNT_HW_CACHE_MAX] 32 [PERF_COUNT_HW_CACHE_OP_MAX] 33 [PERF_COUNT_HW_CACHE_RESULT_MAX], 34 u64 config) 35 { 36 unsigned int cache_type, cache_op, cache_result, ret; 37 38 cache_type = (config >> 0) & 0xff; 39 if (cache_type >= PERF_COUNT_HW_CACHE_MAX) 40 return -EINVAL; 41 42 cache_op = (config >> 8) & 0xff; 43 if (cache_op >= PERF_COUNT_HW_CACHE_OP_MAX) 44 return -EINVAL; 45 46 cache_result = (config >> 16) & 0xff; 47 if (cache_result >= PERF_COUNT_HW_CACHE_RESULT_MAX) 48 return -EINVAL; 49 50 if (!cache_map) 51 return -ENOENT; 52 53 ret = (int)(*cache_map)[cache_type][cache_op][cache_result]; 54 55 if (ret == CACHE_OP_UNSUPPORTED) 56 return -ENOENT; 57 58 return ret; 59 } 60 61 static int 62 armpmu_map_hw_event(const unsigned (*event_map)[PERF_COUNT_HW_MAX], u64 config) 63 { 64 int mapping; 65 66 if (config >= PERF_COUNT_HW_MAX) 67 return -EINVAL; 68 69 if (!event_map) 70 return -ENOENT; 71 72 mapping = (*event_map)[config]; 73 return mapping == HW_OP_UNSUPPORTED ? -ENOENT : mapping; 74 } 75 76 static int 77 armpmu_map_raw_event(u32 raw_event_mask, u64 config) 78 { 79 return (int)(config & raw_event_mask); 80 } 81 82 int 83 armpmu_map_event(struct perf_event *event, 84 const unsigned (*event_map)[PERF_COUNT_HW_MAX], 85 const unsigned (*cache_map) 86 [PERF_COUNT_HW_CACHE_MAX] 87 [PERF_COUNT_HW_CACHE_OP_MAX] 88 [PERF_COUNT_HW_CACHE_RESULT_MAX], 89 u32 raw_event_mask) 90 { 91 u64 config = event->attr.config; 92 int type = event->attr.type; 93 94 if (type == event->pmu->type) 95 return armpmu_map_raw_event(raw_event_mask, config); 96 97 switch (type) { 98 case PERF_TYPE_HARDWARE: 99 return armpmu_map_hw_event(event_map, config); 100 case PERF_TYPE_HW_CACHE: 101 return armpmu_map_cache_event(cache_map, config); 102 case PERF_TYPE_RAW: 103 return armpmu_map_raw_event(raw_event_mask, config); 104 } 105 106 return -ENOENT; 107 } 108 109 int armpmu_event_set_period(struct perf_event *event) 110 { 111 struct arm_pmu *armpmu = to_arm_pmu(event->pmu); 112 struct hw_perf_event *hwc = &event->hw; 113 s64 left = local64_read(&hwc->period_left); 114 s64 period = hwc->sample_period; 115 int ret = 0; 116 117 if (unlikely(left <= -period)) { 118 left = period; 119 local64_set(&hwc->period_left, left); 120 hwc->last_period = period; 121 ret = 1; 122 } 123 124 if (unlikely(left <= 0)) { 125 left += period; 126 local64_set(&hwc->period_left, left); 127 hwc->last_period = period; 128 ret = 1; 129 } 130 131 /* 132 * Limit the maximum period to prevent the counter value 133 * from overtaking the one we are about to program. In 134 * effect we are reducing max_period to account for 135 * interrupt latency (and we are being very conservative). 136 */ 137 if (left > (armpmu->max_period >> 1)) 138 left = armpmu->max_period >> 1; 139 140 local64_set(&hwc->prev_count, (u64)-left); 141 142 armpmu->write_counter(event, (u64)(-left) & 0xffffffff); 143 144 perf_event_update_userpage(event); 145 146 return ret; 147 } 148 149 u64 armpmu_event_update(struct perf_event *event) 150 { 151 struct arm_pmu *armpmu = to_arm_pmu(event->pmu); 152 struct hw_perf_event *hwc = &event->hw; 153 u64 delta, prev_raw_count, new_raw_count; 154 155 again: 156 prev_raw_count = local64_read(&hwc->prev_count); 157 new_raw_count = armpmu->read_counter(event); 158 159 if (local64_cmpxchg(&hwc->prev_count, prev_raw_count, 160 new_raw_count) != prev_raw_count) 161 goto again; 162 163 delta = (new_raw_count - prev_raw_count) & armpmu->max_period; 164 165 local64_add(delta, &event->count); 166 local64_sub(delta, &hwc->period_left); 167 168 return new_raw_count; 169 } 170 171 static void 172 armpmu_read(struct perf_event *event) 173 { 174 armpmu_event_update(event); 175 } 176 177 static void 178 armpmu_stop(struct perf_event *event, int flags) 179 { 180 struct arm_pmu *armpmu = to_arm_pmu(event->pmu); 181 struct hw_perf_event *hwc = &event->hw; 182 183 /* 184 * ARM pmu always has to update the counter, so ignore 185 * PERF_EF_UPDATE, see comments in armpmu_start(). 186 */ 187 if (!(hwc->state & PERF_HES_STOPPED)) { 188 armpmu->disable(event); 189 armpmu_event_update(event); 190 hwc->state |= PERF_HES_STOPPED | PERF_HES_UPTODATE; 191 } 192 } 193 194 static void armpmu_start(struct perf_event *event, int flags) 195 { 196 struct arm_pmu *armpmu = to_arm_pmu(event->pmu); 197 struct hw_perf_event *hwc = &event->hw; 198 199 /* 200 * ARM pmu always has to reprogram the period, so ignore 201 * PERF_EF_RELOAD, see the comment below. 202 */ 203 if (flags & PERF_EF_RELOAD) 204 WARN_ON_ONCE(!(hwc->state & PERF_HES_UPTODATE)); 205 206 hwc->state = 0; 207 /* 208 * Set the period again. Some counters can't be stopped, so when we 209 * were stopped we simply disabled the IRQ source and the counter 210 * may have been left counting. If we don't do this step then we may 211 * get an interrupt too soon or *way* too late if the overflow has 212 * happened since disabling. 213 */ 214 armpmu_event_set_period(event); 215 armpmu->enable(event); 216 } 217 218 static void 219 armpmu_del(struct perf_event *event, int flags) 220 { 221 struct arm_pmu *armpmu = to_arm_pmu(event->pmu); 222 struct pmu_hw_events *hw_events = this_cpu_ptr(armpmu->hw_events); 223 struct hw_perf_event *hwc = &event->hw; 224 int idx = hwc->idx; 225 226 armpmu_stop(event, PERF_EF_UPDATE); 227 hw_events->events[idx] = NULL; 228 clear_bit(idx, hw_events->used_mask); 229 if (armpmu->clear_event_idx) 230 armpmu->clear_event_idx(hw_events, event); 231 232 perf_event_update_userpage(event); 233 } 234 235 static int 236 armpmu_add(struct perf_event *event, int flags) 237 { 238 struct arm_pmu *armpmu = to_arm_pmu(event->pmu); 239 struct pmu_hw_events *hw_events = this_cpu_ptr(armpmu->hw_events); 240 struct hw_perf_event *hwc = &event->hw; 241 int idx; 242 243 /* An event following a process won't be stopped earlier */ 244 if (!cpumask_test_cpu(smp_processor_id(), &armpmu->supported_cpus)) 245 return -ENOENT; 246 247 /* If we don't have a space for the counter then finish early. */ 248 idx = armpmu->get_event_idx(hw_events, event); 249 if (idx < 0) 250 return idx; 251 252 /* 253 * If there is an event in the counter we are going to use then make 254 * sure it is disabled. 255 */ 256 event->hw.idx = idx; 257 armpmu->disable(event); 258 hw_events->events[idx] = event; 259 260 hwc->state = PERF_HES_STOPPED | PERF_HES_UPTODATE; 261 if (flags & PERF_EF_START) 262 armpmu_start(event, PERF_EF_RELOAD); 263 264 /* Propagate our changes to the userspace mapping. */ 265 perf_event_update_userpage(event); 266 267 return 0; 268 } 269 270 static int 271 validate_event(struct pmu *pmu, struct pmu_hw_events *hw_events, 272 struct perf_event *event) 273 { 274 struct arm_pmu *armpmu; 275 276 if (is_software_event(event)) 277 return 1; 278 279 /* 280 * Reject groups spanning multiple HW PMUs (e.g. CPU + CCI). The 281 * core perf code won't check that the pmu->ctx == leader->ctx 282 * until after pmu->event_init(event). 283 */ 284 if (event->pmu != pmu) 285 return 0; 286 287 if (event->state < PERF_EVENT_STATE_OFF) 288 return 1; 289 290 if (event->state == PERF_EVENT_STATE_OFF && !event->attr.enable_on_exec) 291 return 1; 292 293 armpmu = to_arm_pmu(event->pmu); 294 return armpmu->get_event_idx(hw_events, event) >= 0; 295 } 296 297 static int 298 validate_group(struct perf_event *event) 299 { 300 struct perf_event *sibling, *leader = event->group_leader; 301 struct pmu_hw_events fake_pmu; 302 303 /* 304 * Initialise the fake PMU. We only need to populate the 305 * used_mask for the purposes of validation. 306 */ 307 memset(&fake_pmu.used_mask, 0, sizeof(fake_pmu.used_mask)); 308 309 if (!validate_event(event->pmu, &fake_pmu, leader)) 310 return -EINVAL; 311 312 list_for_each_entry(sibling, &leader->sibling_list, group_entry) { 313 if (!validate_event(event->pmu, &fake_pmu, sibling)) 314 return -EINVAL; 315 } 316 317 if (!validate_event(event->pmu, &fake_pmu, event)) 318 return -EINVAL; 319 320 return 0; 321 } 322 323 static struct arm_pmu_platdata *armpmu_get_platdata(struct arm_pmu *armpmu) 324 { 325 struct platform_device *pdev = armpmu->plat_device; 326 327 return pdev ? dev_get_platdata(&pdev->dev) : NULL; 328 } 329 330 static irqreturn_t armpmu_dispatch_irq(int irq, void *dev) 331 { 332 struct arm_pmu *armpmu; 333 struct arm_pmu_platdata *plat; 334 int ret; 335 u64 start_clock, finish_clock; 336 337 /* 338 * we request the IRQ with a (possibly percpu) struct arm_pmu**, but 339 * the handlers expect a struct arm_pmu*. The percpu_irq framework will 340 * do any necessary shifting, we just need to perform the first 341 * dereference. 342 */ 343 armpmu = *(void **)dev; 344 345 plat = armpmu_get_platdata(armpmu); 346 347 start_clock = sched_clock(); 348 if (plat && plat->handle_irq) 349 ret = plat->handle_irq(irq, armpmu, armpmu->handle_irq); 350 else 351 ret = armpmu->handle_irq(irq, armpmu); 352 finish_clock = sched_clock(); 353 354 perf_sample_event_took(finish_clock - start_clock); 355 return ret; 356 } 357 358 static int 359 event_requires_mode_exclusion(struct perf_event_attr *attr) 360 { 361 return attr->exclude_idle || attr->exclude_user || 362 attr->exclude_kernel || attr->exclude_hv; 363 } 364 365 static int 366 __hw_perf_event_init(struct perf_event *event) 367 { 368 struct arm_pmu *armpmu = to_arm_pmu(event->pmu); 369 struct hw_perf_event *hwc = &event->hw; 370 int mapping; 371 372 mapping = armpmu->map_event(event); 373 374 if (mapping < 0) { 375 pr_debug("event %x:%llx not supported\n", event->attr.type, 376 event->attr.config); 377 return mapping; 378 } 379 380 /* 381 * We don't assign an index until we actually place the event onto 382 * hardware. Use -1 to signify that we haven't decided where to put it 383 * yet. For SMP systems, each core has it's own PMU so we can't do any 384 * clever allocation or constraints checking at this point. 385 */ 386 hwc->idx = -1; 387 hwc->config_base = 0; 388 hwc->config = 0; 389 hwc->event_base = 0; 390 391 /* 392 * Check whether we need to exclude the counter from certain modes. 393 */ 394 if ((!armpmu->set_event_filter || 395 armpmu->set_event_filter(hwc, &event->attr)) && 396 event_requires_mode_exclusion(&event->attr)) { 397 pr_debug("ARM performance counters do not support " 398 "mode exclusion\n"); 399 return -EOPNOTSUPP; 400 } 401 402 /* 403 * Store the event encoding into the config_base field. 404 */ 405 hwc->config_base |= (unsigned long)mapping; 406 407 if (!is_sampling_event(event)) { 408 /* 409 * For non-sampling runs, limit the sample_period to half 410 * of the counter width. That way, the new counter value 411 * is far less likely to overtake the previous one unless 412 * you have some serious IRQ latency issues. 413 */ 414 hwc->sample_period = armpmu->max_period >> 1; 415 hwc->last_period = hwc->sample_period; 416 local64_set(&hwc->period_left, hwc->sample_period); 417 } 418 419 if (event->group_leader != event) { 420 if (validate_group(event) != 0) 421 return -EINVAL; 422 } 423 424 return 0; 425 } 426 427 static int armpmu_event_init(struct perf_event *event) 428 { 429 struct arm_pmu *armpmu = to_arm_pmu(event->pmu); 430 431 /* 432 * Reject CPU-affine events for CPUs that are of a different class to 433 * that which this PMU handles. Process-following events (where 434 * event->cpu == -1) can be migrated between CPUs, and thus we have to 435 * reject them later (in armpmu_add) if they're scheduled on a 436 * different class of CPU. 437 */ 438 if (event->cpu != -1 && 439 !cpumask_test_cpu(event->cpu, &armpmu->supported_cpus)) 440 return -ENOENT; 441 442 /* does not support taken branch sampling */ 443 if (has_branch_stack(event)) 444 return -EOPNOTSUPP; 445 446 if (armpmu->map_event(event) == -ENOENT) 447 return -ENOENT; 448 449 return __hw_perf_event_init(event); 450 } 451 452 static void armpmu_enable(struct pmu *pmu) 453 { 454 struct arm_pmu *armpmu = to_arm_pmu(pmu); 455 struct pmu_hw_events *hw_events = this_cpu_ptr(armpmu->hw_events); 456 int enabled = bitmap_weight(hw_events->used_mask, armpmu->num_events); 457 458 /* For task-bound events we may be called on other CPUs */ 459 if (!cpumask_test_cpu(smp_processor_id(), &armpmu->supported_cpus)) 460 return; 461 462 if (enabled) 463 armpmu->start(armpmu); 464 } 465 466 static void armpmu_disable(struct pmu *pmu) 467 { 468 struct arm_pmu *armpmu = to_arm_pmu(pmu); 469 470 /* For task-bound events we may be called on other CPUs */ 471 if (!cpumask_test_cpu(smp_processor_id(), &armpmu->supported_cpus)) 472 return; 473 474 armpmu->stop(armpmu); 475 } 476 477 /* 478 * In heterogeneous systems, events are specific to a particular 479 * microarchitecture, and aren't suitable for another. Thus, only match CPUs of 480 * the same microarchitecture. 481 */ 482 static int armpmu_filter_match(struct perf_event *event) 483 { 484 struct arm_pmu *armpmu = to_arm_pmu(event->pmu); 485 unsigned int cpu = smp_processor_id(); 486 return cpumask_test_cpu(cpu, &armpmu->supported_cpus); 487 } 488 489 static ssize_t armpmu_cpumask_show(struct device *dev, 490 struct device_attribute *attr, char *buf) 491 { 492 struct arm_pmu *armpmu = to_arm_pmu(dev_get_drvdata(dev)); 493 return cpumap_print_to_pagebuf(true, buf, &armpmu->supported_cpus); 494 } 495 496 static DEVICE_ATTR(cpus, S_IRUGO, armpmu_cpumask_show, NULL); 497 498 static struct attribute *armpmu_common_attrs[] = { 499 &dev_attr_cpus.attr, 500 NULL, 501 }; 502 503 static struct attribute_group armpmu_common_attr_group = { 504 .attrs = armpmu_common_attrs, 505 }; 506 507 /* Set at runtime when we know what CPU type we are. */ 508 static struct arm_pmu *__oprofile_cpu_pmu; 509 510 /* 511 * Despite the names, these two functions are CPU-specific and are used 512 * by the OProfile/perf code. 513 */ 514 const char *perf_pmu_name(void) 515 { 516 if (!__oprofile_cpu_pmu) 517 return NULL; 518 519 return __oprofile_cpu_pmu->name; 520 } 521 EXPORT_SYMBOL_GPL(perf_pmu_name); 522 523 int perf_num_counters(void) 524 { 525 int max_events = 0; 526 527 if (__oprofile_cpu_pmu != NULL) 528 max_events = __oprofile_cpu_pmu->num_events; 529 530 return max_events; 531 } 532 EXPORT_SYMBOL_GPL(perf_num_counters); 533 534 void armpmu_free_irq(struct arm_pmu *armpmu, int cpu) 535 { 536 struct pmu_hw_events __percpu *hw_events = armpmu->hw_events; 537 int irq = per_cpu(hw_events->irq, cpu); 538 539 if (!cpumask_test_and_clear_cpu(cpu, &armpmu->active_irqs)) 540 return; 541 542 if (irq_is_percpu(irq)) { 543 free_percpu_irq(irq, &hw_events->percpu_pmu); 544 cpumask_clear(&armpmu->active_irqs); 545 return; 546 } 547 548 free_irq(irq, per_cpu_ptr(&hw_events->percpu_pmu, cpu)); 549 } 550 551 void armpmu_free_irqs(struct arm_pmu *armpmu) 552 { 553 int cpu; 554 555 for_each_cpu(cpu, &armpmu->supported_cpus) 556 armpmu_free_irq(armpmu, cpu); 557 } 558 559 int armpmu_request_irq(struct arm_pmu *armpmu, int cpu) 560 { 561 int err = 0; 562 struct pmu_hw_events __percpu *hw_events = armpmu->hw_events; 563 const irq_handler_t handler = armpmu_dispatch_irq; 564 int irq = per_cpu(hw_events->irq, cpu); 565 if (!irq) 566 return 0; 567 568 if (irq_is_percpu(irq) && cpumask_empty(&armpmu->active_irqs)) { 569 err = request_percpu_irq(irq, handler, "arm-pmu", 570 &hw_events->percpu_pmu); 571 } else if (irq_is_percpu(irq)) { 572 int other_cpu = cpumask_first(&armpmu->active_irqs); 573 int other_irq = per_cpu(hw_events->irq, other_cpu); 574 575 if (irq != other_irq) { 576 pr_warn("mismatched PPIs detected.\n"); 577 err = -EINVAL; 578 goto err_out; 579 } 580 } else { 581 struct arm_pmu_platdata *platdata = armpmu_get_platdata(armpmu); 582 unsigned long irq_flags; 583 584 err = irq_force_affinity(irq, cpumask_of(cpu)); 585 586 if (err && num_possible_cpus() > 1) { 587 pr_warn("unable to set irq affinity (irq=%d, cpu=%u)\n", 588 irq, cpu); 589 goto err_out; 590 } 591 592 if (platdata && platdata->irq_flags) { 593 irq_flags = platdata->irq_flags; 594 } else { 595 irq_flags = IRQF_PERCPU | 596 IRQF_NOBALANCING | 597 IRQF_NO_THREAD; 598 } 599 600 err = request_irq(irq, handler, irq_flags, "arm-pmu", 601 per_cpu_ptr(&hw_events->percpu_pmu, cpu)); 602 } 603 604 if (err) 605 goto err_out; 606 607 cpumask_set_cpu(cpu, &armpmu->active_irqs); 608 return 0; 609 610 err_out: 611 pr_err("unable to request IRQ%d for ARM PMU counters\n", irq); 612 return err; 613 } 614 615 int armpmu_request_irqs(struct arm_pmu *armpmu) 616 { 617 int cpu, err; 618 619 for_each_cpu(cpu, &armpmu->supported_cpus) { 620 err = armpmu_request_irq(armpmu, cpu); 621 if (err) 622 break; 623 } 624 625 return err; 626 } 627 628 static int armpmu_get_cpu_irq(struct arm_pmu *pmu, int cpu) 629 { 630 struct pmu_hw_events __percpu *hw_events = pmu->hw_events; 631 return per_cpu(hw_events->irq, cpu); 632 } 633 634 /* 635 * PMU hardware loses all context when a CPU goes offline. 636 * When a CPU is hotplugged back in, since some hardware registers are 637 * UNKNOWN at reset, the PMU must be explicitly reset to avoid reading 638 * junk values out of them. 639 */ 640 static int arm_perf_starting_cpu(unsigned int cpu, struct hlist_node *node) 641 { 642 struct arm_pmu *pmu = hlist_entry_safe(node, struct arm_pmu, node); 643 int irq; 644 645 if (!cpumask_test_cpu(cpu, &pmu->supported_cpus)) 646 return 0; 647 if (pmu->reset) 648 pmu->reset(pmu); 649 650 irq = armpmu_get_cpu_irq(pmu, cpu); 651 if (irq) { 652 if (irq_is_percpu(irq)) { 653 enable_percpu_irq(irq, IRQ_TYPE_NONE); 654 return 0; 655 } 656 } 657 658 return 0; 659 } 660 661 static int arm_perf_teardown_cpu(unsigned int cpu, struct hlist_node *node) 662 { 663 struct arm_pmu *pmu = hlist_entry_safe(node, struct arm_pmu, node); 664 int irq; 665 666 if (!cpumask_test_cpu(cpu, &pmu->supported_cpus)) 667 return 0; 668 669 irq = armpmu_get_cpu_irq(pmu, cpu); 670 if (irq && irq_is_percpu(irq)) 671 disable_percpu_irq(irq); 672 673 return 0; 674 } 675 676 #ifdef CONFIG_CPU_PM 677 static void cpu_pm_pmu_setup(struct arm_pmu *armpmu, unsigned long cmd) 678 { 679 struct pmu_hw_events *hw_events = this_cpu_ptr(armpmu->hw_events); 680 struct perf_event *event; 681 int idx; 682 683 for (idx = 0; idx < armpmu->num_events; idx++) { 684 /* 685 * If the counter is not used skip it, there is no 686 * need of stopping/restarting it. 687 */ 688 if (!test_bit(idx, hw_events->used_mask)) 689 continue; 690 691 event = hw_events->events[idx]; 692 693 switch (cmd) { 694 case CPU_PM_ENTER: 695 /* 696 * Stop and update the counter 697 */ 698 armpmu_stop(event, PERF_EF_UPDATE); 699 break; 700 case CPU_PM_EXIT: 701 case CPU_PM_ENTER_FAILED: 702 /* 703 * Restore and enable the counter. 704 * armpmu_start() indirectly calls 705 * 706 * perf_event_update_userpage() 707 * 708 * that requires RCU read locking to be functional, 709 * wrap the call within RCU_NONIDLE to make the 710 * RCU subsystem aware this cpu is not idle from 711 * an RCU perspective for the armpmu_start() call 712 * duration. 713 */ 714 RCU_NONIDLE(armpmu_start(event, PERF_EF_RELOAD)); 715 break; 716 default: 717 break; 718 } 719 } 720 } 721 722 static int cpu_pm_pmu_notify(struct notifier_block *b, unsigned long cmd, 723 void *v) 724 { 725 struct arm_pmu *armpmu = container_of(b, struct arm_pmu, cpu_pm_nb); 726 struct pmu_hw_events *hw_events = this_cpu_ptr(armpmu->hw_events); 727 int enabled = bitmap_weight(hw_events->used_mask, armpmu->num_events); 728 729 if (!cpumask_test_cpu(smp_processor_id(), &armpmu->supported_cpus)) 730 return NOTIFY_DONE; 731 732 /* 733 * Always reset the PMU registers on power-up even if 734 * there are no events running. 735 */ 736 if (cmd == CPU_PM_EXIT && armpmu->reset) 737 armpmu->reset(armpmu); 738 739 if (!enabled) 740 return NOTIFY_OK; 741 742 switch (cmd) { 743 case CPU_PM_ENTER: 744 armpmu->stop(armpmu); 745 cpu_pm_pmu_setup(armpmu, cmd); 746 break; 747 case CPU_PM_EXIT: 748 cpu_pm_pmu_setup(armpmu, cmd); 749 case CPU_PM_ENTER_FAILED: 750 armpmu->start(armpmu); 751 break; 752 default: 753 return NOTIFY_DONE; 754 } 755 756 return NOTIFY_OK; 757 } 758 759 static int cpu_pm_pmu_register(struct arm_pmu *cpu_pmu) 760 { 761 cpu_pmu->cpu_pm_nb.notifier_call = cpu_pm_pmu_notify; 762 return cpu_pm_register_notifier(&cpu_pmu->cpu_pm_nb); 763 } 764 765 static void cpu_pm_pmu_unregister(struct arm_pmu *cpu_pmu) 766 { 767 cpu_pm_unregister_notifier(&cpu_pmu->cpu_pm_nb); 768 } 769 #else 770 static inline int cpu_pm_pmu_register(struct arm_pmu *cpu_pmu) { return 0; } 771 static inline void cpu_pm_pmu_unregister(struct arm_pmu *cpu_pmu) { } 772 #endif 773 774 static int cpu_pmu_init(struct arm_pmu *cpu_pmu) 775 { 776 int err; 777 778 err = cpuhp_state_add_instance(CPUHP_AP_PERF_ARM_STARTING, 779 &cpu_pmu->node); 780 if (err) 781 goto out; 782 783 err = cpu_pm_pmu_register(cpu_pmu); 784 if (err) 785 goto out_unregister; 786 787 return 0; 788 789 out_unregister: 790 cpuhp_state_remove_instance_nocalls(CPUHP_AP_PERF_ARM_STARTING, 791 &cpu_pmu->node); 792 out: 793 return err; 794 } 795 796 static void cpu_pmu_destroy(struct arm_pmu *cpu_pmu) 797 { 798 cpu_pm_pmu_unregister(cpu_pmu); 799 cpuhp_state_remove_instance_nocalls(CPUHP_AP_PERF_ARM_STARTING, 800 &cpu_pmu->node); 801 } 802 803 struct arm_pmu *armpmu_alloc(void) 804 { 805 struct arm_pmu *pmu; 806 int cpu; 807 808 pmu = kzalloc(sizeof(*pmu), GFP_KERNEL); 809 if (!pmu) { 810 pr_info("failed to allocate PMU device!\n"); 811 goto out; 812 } 813 814 pmu->hw_events = alloc_percpu(struct pmu_hw_events); 815 if (!pmu->hw_events) { 816 pr_info("failed to allocate per-cpu PMU data.\n"); 817 goto out_free_pmu; 818 } 819 820 pmu->pmu = (struct pmu) { 821 .pmu_enable = armpmu_enable, 822 .pmu_disable = armpmu_disable, 823 .event_init = armpmu_event_init, 824 .add = armpmu_add, 825 .del = armpmu_del, 826 .start = armpmu_start, 827 .stop = armpmu_stop, 828 .read = armpmu_read, 829 .filter_match = armpmu_filter_match, 830 .attr_groups = pmu->attr_groups, 831 /* 832 * This is a CPU PMU potentially in a heterogeneous 833 * configuration (e.g. big.LITTLE). This is not an uncore PMU, 834 * and we have taken ctx sharing into account (e.g. with our 835 * pmu::filter_match callback and pmu::event_init group 836 * validation). 837 */ 838 .capabilities = PERF_PMU_CAP_HETEROGENEOUS_CPUS, 839 }; 840 841 pmu->attr_groups[ARMPMU_ATTR_GROUP_COMMON] = 842 &armpmu_common_attr_group; 843 844 for_each_possible_cpu(cpu) { 845 struct pmu_hw_events *events; 846 847 events = per_cpu_ptr(pmu->hw_events, cpu); 848 raw_spin_lock_init(&events->pmu_lock); 849 events->percpu_pmu = pmu; 850 } 851 852 return pmu; 853 854 out_free_pmu: 855 kfree(pmu); 856 out: 857 return NULL; 858 } 859 860 void armpmu_free(struct arm_pmu *pmu) 861 { 862 free_percpu(pmu->hw_events); 863 kfree(pmu); 864 } 865 866 int armpmu_register(struct arm_pmu *pmu) 867 { 868 int ret; 869 870 ret = cpu_pmu_init(pmu); 871 if (ret) 872 return ret; 873 874 ret = perf_pmu_register(&pmu->pmu, pmu->name, -1); 875 if (ret) 876 goto out_destroy; 877 878 if (!__oprofile_cpu_pmu) 879 __oprofile_cpu_pmu = pmu; 880 881 pr_info("enabled with %s PMU driver, %d counters available\n", 882 pmu->name, pmu->num_events); 883 884 return 0; 885 886 out_destroy: 887 cpu_pmu_destroy(pmu); 888 return ret; 889 } 890 891 static int arm_pmu_hp_init(void) 892 { 893 int ret; 894 895 ret = cpuhp_setup_state_multi(CPUHP_AP_PERF_ARM_STARTING, 896 "perf/arm/pmu:starting", 897 arm_perf_starting_cpu, 898 arm_perf_teardown_cpu); 899 if (ret) 900 pr_err("CPU hotplug notifier for ARM PMU could not be registered: %d\n", 901 ret); 902 return ret; 903 } 904 subsys_initcall(arm_pmu_hp_init); 905