1 #undef DEBUG 2 3 /* 4 * ARM performance counter support. 5 * 6 * Copyright (C) 2009 picoChip Designs, Ltd., Jamie Iles 7 * Copyright (C) 2010 ARM Ltd., Will Deacon <will.deacon@arm.com> 8 * 9 * This code is based on the sparc64 perf event code, which is in turn based 10 * on the x86 code. 11 */ 12 #define pr_fmt(fmt) "hw perfevents: " fmt 13 14 #include <linux/bitmap.h> 15 #include <linux/cpumask.h> 16 #include <linux/cpu_pm.h> 17 #include <linux/export.h> 18 #include <linux/kernel.h> 19 #include <linux/of_device.h> 20 #include <linux/perf/arm_pmu.h> 21 #include <linux/platform_device.h> 22 #include <linux/slab.h> 23 #include <linux/spinlock.h> 24 #include <linux/irq.h> 25 #include <linux/irqdesc.h> 26 27 #include <asm/cputype.h> 28 #include <asm/irq_regs.h> 29 30 static int 31 armpmu_map_cache_event(const unsigned (*cache_map) 32 [PERF_COUNT_HW_CACHE_MAX] 33 [PERF_COUNT_HW_CACHE_OP_MAX] 34 [PERF_COUNT_HW_CACHE_RESULT_MAX], 35 u64 config) 36 { 37 unsigned int cache_type, cache_op, cache_result, ret; 38 39 cache_type = (config >> 0) & 0xff; 40 if (cache_type >= PERF_COUNT_HW_CACHE_MAX) 41 return -EINVAL; 42 43 cache_op = (config >> 8) & 0xff; 44 if (cache_op >= PERF_COUNT_HW_CACHE_OP_MAX) 45 return -EINVAL; 46 47 cache_result = (config >> 16) & 0xff; 48 if (cache_result >= PERF_COUNT_HW_CACHE_RESULT_MAX) 49 return -EINVAL; 50 51 ret = (int)(*cache_map)[cache_type][cache_op][cache_result]; 52 53 if (ret == CACHE_OP_UNSUPPORTED) 54 return -ENOENT; 55 56 return ret; 57 } 58 59 static int 60 armpmu_map_hw_event(const unsigned (*event_map)[PERF_COUNT_HW_MAX], u64 config) 61 { 62 int mapping; 63 64 if (config >= PERF_COUNT_HW_MAX) 65 return -EINVAL; 66 67 mapping = (*event_map)[config]; 68 return mapping == HW_OP_UNSUPPORTED ? -ENOENT : mapping; 69 } 70 71 static int 72 armpmu_map_raw_event(u32 raw_event_mask, u64 config) 73 { 74 return (int)(config & raw_event_mask); 75 } 76 77 int 78 armpmu_map_event(struct perf_event *event, 79 const unsigned (*event_map)[PERF_COUNT_HW_MAX], 80 const unsigned (*cache_map) 81 [PERF_COUNT_HW_CACHE_MAX] 82 [PERF_COUNT_HW_CACHE_OP_MAX] 83 [PERF_COUNT_HW_CACHE_RESULT_MAX], 84 u32 raw_event_mask) 85 { 86 u64 config = event->attr.config; 87 int type = event->attr.type; 88 89 if (type == event->pmu->type) 90 return armpmu_map_raw_event(raw_event_mask, config); 91 92 switch (type) { 93 case PERF_TYPE_HARDWARE: 94 return armpmu_map_hw_event(event_map, config); 95 case PERF_TYPE_HW_CACHE: 96 return armpmu_map_cache_event(cache_map, config); 97 case PERF_TYPE_RAW: 98 return armpmu_map_raw_event(raw_event_mask, config); 99 } 100 101 return -ENOENT; 102 } 103 104 int armpmu_event_set_period(struct perf_event *event) 105 { 106 struct arm_pmu *armpmu = to_arm_pmu(event->pmu); 107 struct hw_perf_event *hwc = &event->hw; 108 s64 left = local64_read(&hwc->period_left); 109 s64 period = hwc->sample_period; 110 int ret = 0; 111 112 if (unlikely(left <= -period)) { 113 left = period; 114 local64_set(&hwc->period_left, left); 115 hwc->last_period = period; 116 ret = 1; 117 } 118 119 if (unlikely(left <= 0)) { 120 left += period; 121 local64_set(&hwc->period_left, left); 122 hwc->last_period = period; 123 ret = 1; 124 } 125 126 /* 127 * Limit the maximum period to prevent the counter value 128 * from overtaking the one we are about to program. In 129 * effect we are reducing max_period to account for 130 * interrupt latency (and we are being very conservative). 131 */ 132 if (left > (armpmu->max_period >> 1)) 133 left = armpmu->max_period >> 1; 134 135 local64_set(&hwc->prev_count, (u64)-left); 136 137 armpmu->write_counter(event, (u64)(-left) & 0xffffffff); 138 139 perf_event_update_userpage(event); 140 141 return ret; 142 } 143 144 u64 armpmu_event_update(struct perf_event *event) 145 { 146 struct arm_pmu *armpmu = to_arm_pmu(event->pmu); 147 struct hw_perf_event *hwc = &event->hw; 148 u64 delta, prev_raw_count, new_raw_count; 149 150 again: 151 prev_raw_count = local64_read(&hwc->prev_count); 152 new_raw_count = armpmu->read_counter(event); 153 154 if (local64_cmpxchg(&hwc->prev_count, prev_raw_count, 155 new_raw_count) != prev_raw_count) 156 goto again; 157 158 delta = (new_raw_count - prev_raw_count) & armpmu->max_period; 159 160 local64_add(delta, &event->count); 161 local64_sub(delta, &hwc->period_left); 162 163 return new_raw_count; 164 } 165 166 static void 167 armpmu_read(struct perf_event *event) 168 { 169 armpmu_event_update(event); 170 } 171 172 static void 173 armpmu_stop(struct perf_event *event, int flags) 174 { 175 struct arm_pmu *armpmu = to_arm_pmu(event->pmu); 176 struct hw_perf_event *hwc = &event->hw; 177 178 /* 179 * ARM pmu always has to update the counter, so ignore 180 * PERF_EF_UPDATE, see comments in armpmu_start(). 181 */ 182 if (!(hwc->state & PERF_HES_STOPPED)) { 183 armpmu->disable(event); 184 armpmu_event_update(event); 185 hwc->state |= PERF_HES_STOPPED | PERF_HES_UPTODATE; 186 } 187 } 188 189 static void armpmu_start(struct perf_event *event, int flags) 190 { 191 struct arm_pmu *armpmu = to_arm_pmu(event->pmu); 192 struct hw_perf_event *hwc = &event->hw; 193 194 /* 195 * ARM pmu always has to reprogram the period, so ignore 196 * PERF_EF_RELOAD, see the comment below. 197 */ 198 if (flags & PERF_EF_RELOAD) 199 WARN_ON_ONCE(!(hwc->state & PERF_HES_UPTODATE)); 200 201 hwc->state = 0; 202 /* 203 * Set the period again. Some counters can't be stopped, so when we 204 * were stopped we simply disabled the IRQ source and the counter 205 * may have been left counting. If we don't do this step then we may 206 * get an interrupt too soon or *way* too late if the overflow has 207 * happened since disabling. 208 */ 209 armpmu_event_set_period(event); 210 armpmu->enable(event); 211 } 212 213 static void 214 armpmu_del(struct perf_event *event, int flags) 215 { 216 struct arm_pmu *armpmu = to_arm_pmu(event->pmu); 217 struct pmu_hw_events *hw_events = this_cpu_ptr(armpmu->hw_events); 218 struct hw_perf_event *hwc = &event->hw; 219 int idx = hwc->idx; 220 221 armpmu_stop(event, PERF_EF_UPDATE); 222 hw_events->events[idx] = NULL; 223 clear_bit(idx, hw_events->used_mask); 224 if (armpmu->clear_event_idx) 225 armpmu->clear_event_idx(hw_events, event); 226 227 perf_event_update_userpage(event); 228 } 229 230 static int 231 armpmu_add(struct perf_event *event, int flags) 232 { 233 struct arm_pmu *armpmu = to_arm_pmu(event->pmu); 234 struct pmu_hw_events *hw_events = this_cpu_ptr(armpmu->hw_events); 235 struct hw_perf_event *hwc = &event->hw; 236 int idx; 237 int err = 0; 238 239 /* An event following a process won't be stopped earlier */ 240 if (!cpumask_test_cpu(smp_processor_id(), &armpmu->supported_cpus)) 241 return -ENOENT; 242 243 perf_pmu_disable(event->pmu); 244 245 /* If we don't have a space for the counter then finish early. */ 246 idx = armpmu->get_event_idx(hw_events, event); 247 if (idx < 0) { 248 err = idx; 249 goto out; 250 } 251 252 /* 253 * If there is an event in the counter we are going to use then make 254 * sure it is disabled. 255 */ 256 event->hw.idx = idx; 257 armpmu->disable(event); 258 hw_events->events[idx] = event; 259 260 hwc->state = PERF_HES_STOPPED | PERF_HES_UPTODATE; 261 if (flags & PERF_EF_START) 262 armpmu_start(event, PERF_EF_RELOAD); 263 264 /* Propagate our changes to the userspace mapping. */ 265 perf_event_update_userpage(event); 266 267 out: 268 perf_pmu_enable(event->pmu); 269 return err; 270 } 271 272 static int 273 validate_event(struct pmu *pmu, struct pmu_hw_events *hw_events, 274 struct perf_event *event) 275 { 276 struct arm_pmu *armpmu; 277 278 if (is_software_event(event)) 279 return 1; 280 281 /* 282 * Reject groups spanning multiple HW PMUs (e.g. CPU + CCI). The 283 * core perf code won't check that the pmu->ctx == leader->ctx 284 * until after pmu->event_init(event). 285 */ 286 if (event->pmu != pmu) 287 return 0; 288 289 if (event->state < PERF_EVENT_STATE_OFF) 290 return 1; 291 292 if (event->state == PERF_EVENT_STATE_OFF && !event->attr.enable_on_exec) 293 return 1; 294 295 armpmu = to_arm_pmu(event->pmu); 296 return armpmu->get_event_idx(hw_events, event) >= 0; 297 } 298 299 static int 300 validate_group(struct perf_event *event) 301 { 302 struct perf_event *sibling, *leader = event->group_leader; 303 struct pmu_hw_events fake_pmu; 304 305 /* 306 * Initialise the fake PMU. We only need to populate the 307 * used_mask for the purposes of validation. 308 */ 309 memset(&fake_pmu.used_mask, 0, sizeof(fake_pmu.used_mask)); 310 311 if (!validate_event(event->pmu, &fake_pmu, leader)) 312 return -EINVAL; 313 314 list_for_each_entry(sibling, &leader->sibling_list, group_entry) { 315 if (!validate_event(event->pmu, &fake_pmu, sibling)) 316 return -EINVAL; 317 } 318 319 if (!validate_event(event->pmu, &fake_pmu, event)) 320 return -EINVAL; 321 322 return 0; 323 } 324 325 static irqreturn_t armpmu_dispatch_irq(int irq, void *dev) 326 { 327 struct arm_pmu *armpmu; 328 struct platform_device *plat_device; 329 struct arm_pmu_platdata *plat; 330 int ret; 331 u64 start_clock, finish_clock; 332 333 /* 334 * we request the IRQ with a (possibly percpu) struct arm_pmu**, but 335 * the handlers expect a struct arm_pmu*. The percpu_irq framework will 336 * do any necessary shifting, we just need to perform the first 337 * dereference. 338 */ 339 armpmu = *(void **)dev; 340 plat_device = armpmu->plat_device; 341 plat = dev_get_platdata(&plat_device->dev); 342 343 start_clock = sched_clock(); 344 if (plat && plat->handle_irq) 345 ret = plat->handle_irq(irq, armpmu, armpmu->handle_irq); 346 else 347 ret = armpmu->handle_irq(irq, armpmu); 348 finish_clock = sched_clock(); 349 350 perf_sample_event_took(finish_clock - start_clock); 351 return ret; 352 } 353 354 static void 355 armpmu_release_hardware(struct arm_pmu *armpmu) 356 { 357 armpmu->free_irq(armpmu); 358 } 359 360 static int 361 armpmu_reserve_hardware(struct arm_pmu *armpmu) 362 { 363 int err = armpmu->request_irq(armpmu, armpmu_dispatch_irq); 364 if (err) { 365 armpmu_release_hardware(armpmu); 366 return err; 367 } 368 369 return 0; 370 } 371 372 static void 373 hw_perf_event_destroy(struct perf_event *event) 374 { 375 struct arm_pmu *armpmu = to_arm_pmu(event->pmu); 376 atomic_t *active_events = &armpmu->active_events; 377 struct mutex *pmu_reserve_mutex = &armpmu->reserve_mutex; 378 379 if (atomic_dec_and_mutex_lock(active_events, pmu_reserve_mutex)) { 380 armpmu_release_hardware(armpmu); 381 mutex_unlock(pmu_reserve_mutex); 382 } 383 } 384 385 static int 386 event_requires_mode_exclusion(struct perf_event_attr *attr) 387 { 388 return attr->exclude_idle || attr->exclude_user || 389 attr->exclude_kernel || attr->exclude_hv; 390 } 391 392 static int 393 __hw_perf_event_init(struct perf_event *event) 394 { 395 struct arm_pmu *armpmu = to_arm_pmu(event->pmu); 396 struct hw_perf_event *hwc = &event->hw; 397 int mapping; 398 399 mapping = armpmu->map_event(event); 400 401 if (mapping < 0) { 402 pr_debug("event %x:%llx not supported\n", event->attr.type, 403 event->attr.config); 404 return mapping; 405 } 406 407 /* 408 * We don't assign an index until we actually place the event onto 409 * hardware. Use -1 to signify that we haven't decided where to put it 410 * yet. For SMP systems, each core has it's own PMU so we can't do any 411 * clever allocation or constraints checking at this point. 412 */ 413 hwc->idx = -1; 414 hwc->config_base = 0; 415 hwc->config = 0; 416 hwc->event_base = 0; 417 418 /* 419 * Check whether we need to exclude the counter from certain modes. 420 */ 421 if ((!armpmu->set_event_filter || 422 armpmu->set_event_filter(hwc, &event->attr)) && 423 event_requires_mode_exclusion(&event->attr)) { 424 pr_debug("ARM performance counters do not support " 425 "mode exclusion\n"); 426 return -EOPNOTSUPP; 427 } 428 429 /* 430 * Store the event encoding into the config_base field. 431 */ 432 hwc->config_base |= (unsigned long)mapping; 433 434 if (!is_sampling_event(event)) { 435 /* 436 * For non-sampling runs, limit the sample_period to half 437 * of the counter width. That way, the new counter value 438 * is far less likely to overtake the previous one unless 439 * you have some serious IRQ latency issues. 440 */ 441 hwc->sample_period = armpmu->max_period >> 1; 442 hwc->last_period = hwc->sample_period; 443 local64_set(&hwc->period_left, hwc->sample_period); 444 } 445 446 if (event->group_leader != event) { 447 if (validate_group(event) != 0) 448 return -EINVAL; 449 } 450 451 return 0; 452 } 453 454 static int armpmu_event_init(struct perf_event *event) 455 { 456 struct arm_pmu *armpmu = to_arm_pmu(event->pmu); 457 int err = 0; 458 atomic_t *active_events = &armpmu->active_events; 459 460 /* 461 * Reject CPU-affine events for CPUs that are of a different class to 462 * that which this PMU handles. Process-following events (where 463 * event->cpu == -1) can be migrated between CPUs, and thus we have to 464 * reject them later (in armpmu_add) if they're scheduled on a 465 * different class of CPU. 466 */ 467 if (event->cpu != -1 && 468 !cpumask_test_cpu(event->cpu, &armpmu->supported_cpus)) 469 return -ENOENT; 470 471 /* does not support taken branch sampling */ 472 if (has_branch_stack(event)) 473 return -EOPNOTSUPP; 474 475 if (armpmu->map_event(event) == -ENOENT) 476 return -ENOENT; 477 478 event->destroy = hw_perf_event_destroy; 479 480 if (!atomic_inc_not_zero(active_events)) { 481 mutex_lock(&armpmu->reserve_mutex); 482 if (atomic_read(active_events) == 0) 483 err = armpmu_reserve_hardware(armpmu); 484 485 if (!err) 486 atomic_inc(active_events); 487 mutex_unlock(&armpmu->reserve_mutex); 488 } 489 490 if (err) 491 return err; 492 493 err = __hw_perf_event_init(event); 494 if (err) 495 hw_perf_event_destroy(event); 496 497 return err; 498 } 499 500 static void armpmu_enable(struct pmu *pmu) 501 { 502 struct arm_pmu *armpmu = to_arm_pmu(pmu); 503 struct pmu_hw_events *hw_events = this_cpu_ptr(armpmu->hw_events); 504 int enabled = bitmap_weight(hw_events->used_mask, armpmu->num_events); 505 506 /* For task-bound events we may be called on other CPUs */ 507 if (!cpumask_test_cpu(smp_processor_id(), &armpmu->supported_cpus)) 508 return; 509 510 if (enabled) 511 armpmu->start(armpmu); 512 } 513 514 static void armpmu_disable(struct pmu *pmu) 515 { 516 struct arm_pmu *armpmu = to_arm_pmu(pmu); 517 518 /* For task-bound events we may be called on other CPUs */ 519 if (!cpumask_test_cpu(smp_processor_id(), &armpmu->supported_cpus)) 520 return; 521 522 armpmu->stop(armpmu); 523 } 524 525 /* 526 * In heterogeneous systems, events are specific to a particular 527 * microarchitecture, and aren't suitable for another. Thus, only match CPUs of 528 * the same microarchitecture. 529 */ 530 static int armpmu_filter_match(struct perf_event *event) 531 { 532 struct arm_pmu *armpmu = to_arm_pmu(event->pmu); 533 unsigned int cpu = smp_processor_id(); 534 return cpumask_test_cpu(cpu, &armpmu->supported_cpus); 535 } 536 537 static void armpmu_init(struct arm_pmu *armpmu) 538 { 539 atomic_set(&armpmu->active_events, 0); 540 mutex_init(&armpmu->reserve_mutex); 541 542 armpmu->pmu = (struct pmu) { 543 .pmu_enable = armpmu_enable, 544 .pmu_disable = armpmu_disable, 545 .event_init = armpmu_event_init, 546 .add = armpmu_add, 547 .del = armpmu_del, 548 .start = armpmu_start, 549 .stop = armpmu_stop, 550 .read = armpmu_read, 551 .filter_match = armpmu_filter_match, 552 }; 553 } 554 555 /* Set at runtime when we know what CPU type we are. */ 556 static struct arm_pmu *__oprofile_cpu_pmu; 557 558 /* 559 * Despite the names, these two functions are CPU-specific and are used 560 * by the OProfile/perf code. 561 */ 562 const char *perf_pmu_name(void) 563 { 564 if (!__oprofile_cpu_pmu) 565 return NULL; 566 567 return __oprofile_cpu_pmu->name; 568 } 569 EXPORT_SYMBOL_GPL(perf_pmu_name); 570 571 int perf_num_counters(void) 572 { 573 int max_events = 0; 574 575 if (__oprofile_cpu_pmu != NULL) 576 max_events = __oprofile_cpu_pmu->num_events; 577 578 return max_events; 579 } 580 EXPORT_SYMBOL_GPL(perf_num_counters); 581 582 static void cpu_pmu_enable_percpu_irq(void *data) 583 { 584 int irq = *(int *)data; 585 586 enable_percpu_irq(irq, IRQ_TYPE_NONE); 587 } 588 589 static void cpu_pmu_disable_percpu_irq(void *data) 590 { 591 int irq = *(int *)data; 592 593 disable_percpu_irq(irq); 594 } 595 596 static void cpu_pmu_free_irq(struct arm_pmu *cpu_pmu) 597 { 598 int i, irq, irqs; 599 struct platform_device *pmu_device = cpu_pmu->plat_device; 600 struct pmu_hw_events __percpu *hw_events = cpu_pmu->hw_events; 601 602 irqs = min(pmu_device->num_resources, num_possible_cpus()); 603 604 irq = platform_get_irq(pmu_device, 0); 605 if (irq >= 0 && irq_is_percpu(irq)) { 606 on_each_cpu(cpu_pmu_disable_percpu_irq, &irq, 1); 607 free_percpu_irq(irq, &hw_events->percpu_pmu); 608 } else { 609 for (i = 0; i < irqs; ++i) { 610 int cpu = i; 611 612 if (cpu_pmu->irq_affinity) 613 cpu = cpu_pmu->irq_affinity[i]; 614 615 if (!cpumask_test_and_clear_cpu(cpu, &cpu_pmu->active_irqs)) 616 continue; 617 irq = platform_get_irq(pmu_device, i); 618 if (irq >= 0) 619 free_irq(irq, per_cpu_ptr(&hw_events->percpu_pmu, cpu)); 620 } 621 } 622 } 623 624 static int cpu_pmu_request_irq(struct arm_pmu *cpu_pmu, irq_handler_t handler) 625 { 626 int i, err, irq, irqs; 627 struct platform_device *pmu_device = cpu_pmu->plat_device; 628 struct pmu_hw_events __percpu *hw_events = cpu_pmu->hw_events; 629 630 if (!pmu_device) 631 return -ENODEV; 632 633 irqs = min(pmu_device->num_resources, num_possible_cpus()); 634 if (irqs < 1) { 635 pr_warn_once("perf/ARM: No irqs for PMU defined, sampling events not supported\n"); 636 return 0; 637 } 638 639 irq = platform_get_irq(pmu_device, 0); 640 if (irq >= 0 && irq_is_percpu(irq)) { 641 err = request_percpu_irq(irq, handler, "arm-pmu", 642 &hw_events->percpu_pmu); 643 if (err) { 644 pr_err("unable to request IRQ%d for ARM PMU counters\n", 645 irq); 646 return err; 647 } 648 on_each_cpu(cpu_pmu_enable_percpu_irq, &irq, 1); 649 } else { 650 for (i = 0; i < irqs; ++i) { 651 int cpu = i; 652 653 err = 0; 654 irq = platform_get_irq(pmu_device, i); 655 if (irq < 0) 656 continue; 657 658 if (cpu_pmu->irq_affinity) 659 cpu = cpu_pmu->irq_affinity[i]; 660 661 /* 662 * If we have a single PMU interrupt that we can't shift, 663 * assume that we're running on a uniprocessor machine and 664 * continue. Otherwise, continue without this interrupt. 665 */ 666 if (irq_set_affinity(irq, cpumask_of(cpu)) && irqs > 1) { 667 pr_warn("unable to set irq affinity (irq=%d, cpu=%u)\n", 668 irq, cpu); 669 continue; 670 } 671 672 err = request_irq(irq, handler, 673 IRQF_NOBALANCING | IRQF_NO_THREAD, "arm-pmu", 674 per_cpu_ptr(&hw_events->percpu_pmu, cpu)); 675 if (err) { 676 pr_err("unable to request IRQ%d for ARM PMU counters\n", 677 irq); 678 return err; 679 } 680 681 cpumask_set_cpu(cpu, &cpu_pmu->active_irqs); 682 } 683 } 684 685 return 0; 686 } 687 688 /* 689 * PMU hardware loses all context when a CPU goes offline. 690 * When a CPU is hotplugged back in, since some hardware registers are 691 * UNKNOWN at reset, the PMU must be explicitly reset to avoid reading 692 * junk values out of them. 693 */ 694 static int cpu_pmu_notify(struct notifier_block *b, unsigned long action, 695 void *hcpu) 696 { 697 int cpu = (unsigned long)hcpu; 698 struct arm_pmu *pmu = container_of(b, struct arm_pmu, hotplug_nb); 699 700 if ((action & ~CPU_TASKS_FROZEN) != CPU_STARTING) 701 return NOTIFY_DONE; 702 703 if (!cpumask_test_cpu(cpu, &pmu->supported_cpus)) 704 return NOTIFY_DONE; 705 706 if (pmu->reset) 707 pmu->reset(pmu); 708 else 709 return NOTIFY_DONE; 710 711 return NOTIFY_OK; 712 } 713 714 #ifdef CONFIG_CPU_PM 715 static void cpu_pm_pmu_setup(struct arm_pmu *armpmu, unsigned long cmd) 716 { 717 struct pmu_hw_events *hw_events = this_cpu_ptr(armpmu->hw_events); 718 struct perf_event *event; 719 int idx; 720 721 for (idx = 0; idx < armpmu->num_events; idx++) { 722 /* 723 * If the counter is not used skip it, there is no 724 * need of stopping/restarting it. 725 */ 726 if (!test_bit(idx, hw_events->used_mask)) 727 continue; 728 729 event = hw_events->events[idx]; 730 731 switch (cmd) { 732 case CPU_PM_ENTER: 733 /* 734 * Stop and update the counter 735 */ 736 armpmu_stop(event, PERF_EF_UPDATE); 737 break; 738 case CPU_PM_EXIT: 739 case CPU_PM_ENTER_FAILED: 740 /* 741 * Restore and enable the counter. 742 * armpmu_start() indirectly calls 743 * 744 * perf_event_update_userpage() 745 * 746 * that requires RCU read locking to be functional, 747 * wrap the call within RCU_NONIDLE to make the 748 * RCU subsystem aware this cpu is not idle from 749 * an RCU perspective for the armpmu_start() call 750 * duration. 751 */ 752 RCU_NONIDLE(armpmu_start(event, PERF_EF_RELOAD)); 753 break; 754 default: 755 break; 756 } 757 } 758 } 759 760 static int cpu_pm_pmu_notify(struct notifier_block *b, unsigned long cmd, 761 void *v) 762 { 763 struct arm_pmu *armpmu = container_of(b, struct arm_pmu, cpu_pm_nb); 764 struct pmu_hw_events *hw_events = this_cpu_ptr(armpmu->hw_events); 765 int enabled = bitmap_weight(hw_events->used_mask, armpmu->num_events); 766 767 if (!cpumask_test_cpu(smp_processor_id(), &armpmu->supported_cpus)) 768 return NOTIFY_DONE; 769 770 /* 771 * Always reset the PMU registers on power-up even if 772 * there are no events running. 773 */ 774 if (cmd == CPU_PM_EXIT && armpmu->reset) 775 armpmu->reset(armpmu); 776 777 if (!enabled) 778 return NOTIFY_OK; 779 780 switch (cmd) { 781 case CPU_PM_ENTER: 782 armpmu->stop(armpmu); 783 cpu_pm_pmu_setup(armpmu, cmd); 784 break; 785 case CPU_PM_EXIT: 786 cpu_pm_pmu_setup(armpmu, cmd); 787 case CPU_PM_ENTER_FAILED: 788 armpmu->start(armpmu); 789 break; 790 default: 791 return NOTIFY_DONE; 792 } 793 794 return NOTIFY_OK; 795 } 796 797 static int cpu_pm_pmu_register(struct arm_pmu *cpu_pmu) 798 { 799 cpu_pmu->cpu_pm_nb.notifier_call = cpu_pm_pmu_notify; 800 return cpu_pm_register_notifier(&cpu_pmu->cpu_pm_nb); 801 } 802 803 static void cpu_pm_pmu_unregister(struct arm_pmu *cpu_pmu) 804 { 805 cpu_pm_unregister_notifier(&cpu_pmu->cpu_pm_nb); 806 } 807 #else 808 static inline int cpu_pm_pmu_register(struct arm_pmu *cpu_pmu) { return 0; } 809 static inline void cpu_pm_pmu_unregister(struct arm_pmu *cpu_pmu) { } 810 #endif 811 812 static int cpu_pmu_init(struct arm_pmu *cpu_pmu) 813 { 814 int err; 815 int cpu; 816 struct pmu_hw_events __percpu *cpu_hw_events; 817 818 cpu_hw_events = alloc_percpu(struct pmu_hw_events); 819 if (!cpu_hw_events) 820 return -ENOMEM; 821 822 cpu_pmu->hotplug_nb.notifier_call = cpu_pmu_notify; 823 err = register_cpu_notifier(&cpu_pmu->hotplug_nb); 824 if (err) 825 goto out_hw_events; 826 827 err = cpu_pm_pmu_register(cpu_pmu); 828 if (err) 829 goto out_unregister; 830 831 for_each_possible_cpu(cpu) { 832 struct pmu_hw_events *events = per_cpu_ptr(cpu_hw_events, cpu); 833 raw_spin_lock_init(&events->pmu_lock); 834 events->percpu_pmu = cpu_pmu; 835 } 836 837 cpu_pmu->hw_events = cpu_hw_events; 838 cpu_pmu->request_irq = cpu_pmu_request_irq; 839 cpu_pmu->free_irq = cpu_pmu_free_irq; 840 841 /* Ensure the PMU has sane values out of reset. */ 842 if (cpu_pmu->reset) 843 on_each_cpu_mask(&cpu_pmu->supported_cpus, cpu_pmu->reset, 844 cpu_pmu, 1); 845 846 /* If no interrupts available, set the corresponding capability flag */ 847 if (!platform_get_irq(cpu_pmu->plat_device, 0)) 848 cpu_pmu->pmu.capabilities |= PERF_PMU_CAP_NO_INTERRUPT; 849 850 /* 851 * This is a CPU PMU potentially in a heterogeneous configuration (e.g. 852 * big.LITTLE). This is not an uncore PMU, and we have taken ctx 853 * sharing into account (e.g. with our pmu::filter_match callback and 854 * pmu::event_init group validation). 855 */ 856 cpu_pmu->pmu.capabilities |= PERF_PMU_CAP_HETEROGENEOUS_CPUS; 857 858 return 0; 859 860 out_unregister: 861 unregister_cpu_notifier(&cpu_pmu->hotplug_nb); 862 out_hw_events: 863 free_percpu(cpu_hw_events); 864 return err; 865 } 866 867 static void cpu_pmu_destroy(struct arm_pmu *cpu_pmu) 868 { 869 cpu_pm_pmu_unregister(cpu_pmu); 870 unregister_cpu_notifier(&cpu_pmu->hotplug_nb); 871 free_percpu(cpu_pmu->hw_events); 872 } 873 874 /* 875 * CPU PMU identification and probing. 876 */ 877 static int probe_current_pmu(struct arm_pmu *pmu, 878 const struct pmu_probe_info *info) 879 { 880 int cpu = get_cpu(); 881 unsigned int cpuid = read_cpuid_id(); 882 int ret = -ENODEV; 883 884 pr_info("probing PMU on CPU %d\n", cpu); 885 886 for (; info->init != NULL; info++) { 887 if ((cpuid & info->mask) != info->cpuid) 888 continue; 889 ret = info->init(pmu); 890 break; 891 } 892 893 put_cpu(); 894 return ret; 895 } 896 897 static int of_pmu_irq_cfg(struct arm_pmu *pmu) 898 { 899 int *irqs, i = 0; 900 bool using_spi = false; 901 struct platform_device *pdev = pmu->plat_device; 902 903 irqs = kcalloc(pdev->num_resources, sizeof(*irqs), GFP_KERNEL); 904 if (!irqs) 905 return -ENOMEM; 906 907 do { 908 struct device_node *dn; 909 int cpu, irq; 910 911 /* See if we have an affinity entry */ 912 dn = of_parse_phandle(pdev->dev.of_node, "interrupt-affinity", i); 913 if (!dn) 914 break; 915 916 /* Check the IRQ type and prohibit a mix of PPIs and SPIs */ 917 irq = platform_get_irq(pdev, i); 918 if (irq >= 0) { 919 bool spi = !irq_is_percpu(irq); 920 921 if (i > 0 && spi != using_spi) { 922 pr_err("PPI/SPI IRQ type mismatch for %s!\n", 923 dn->name); 924 kfree(irqs); 925 return -EINVAL; 926 } 927 928 using_spi = spi; 929 } 930 931 /* Now look up the logical CPU number */ 932 for_each_possible_cpu(cpu) { 933 struct device_node *cpu_dn; 934 935 cpu_dn = of_cpu_device_node_get(cpu); 936 of_node_put(cpu_dn); 937 938 if (dn == cpu_dn) 939 break; 940 } 941 942 if (cpu >= nr_cpu_ids) { 943 pr_warn("Failed to find logical CPU for %s\n", 944 dn->name); 945 of_node_put(dn); 946 cpumask_setall(&pmu->supported_cpus); 947 break; 948 } 949 of_node_put(dn); 950 951 /* For SPIs, we need to track the affinity per IRQ */ 952 if (using_spi) { 953 if (i >= pdev->num_resources) 954 break; 955 956 irqs[i] = cpu; 957 } 958 959 /* Keep track of the CPUs containing this PMU type */ 960 cpumask_set_cpu(cpu, &pmu->supported_cpus); 961 i++; 962 } while (1); 963 964 /* If we didn't manage to parse anything, claim to support all CPUs */ 965 if (cpumask_weight(&pmu->supported_cpus) == 0) 966 cpumask_setall(&pmu->supported_cpus); 967 968 /* If we matched up the IRQ affinities, use them to route the SPIs */ 969 if (using_spi && i == pdev->num_resources) 970 pmu->irq_affinity = irqs; 971 else 972 kfree(irqs); 973 974 return 0; 975 } 976 977 int arm_pmu_device_probe(struct platform_device *pdev, 978 const struct of_device_id *of_table, 979 const struct pmu_probe_info *probe_table) 980 { 981 const struct of_device_id *of_id; 982 const int (*init_fn)(struct arm_pmu *); 983 struct device_node *node = pdev->dev.of_node; 984 struct arm_pmu *pmu; 985 int ret = -ENODEV; 986 987 pmu = kzalloc(sizeof(struct arm_pmu), GFP_KERNEL); 988 if (!pmu) { 989 pr_info("failed to allocate PMU device!\n"); 990 return -ENOMEM; 991 } 992 993 armpmu_init(pmu); 994 995 pmu->plat_device = pdev; 996 997 if (node && (of_id = of_match_node(of_table, pdev->dev.of_node))) { 998 init_fn = of_id->data; 999 1000 pmu->secure_access = of_property_read_bool(pdev->dev.of_node, 1001 "secure-reg-access"); 1002 1003 /* arm64 systems boot only as non-secure */ 1004 if (IS_ENABLED(CONFIG_ARM64) && pmu->secure_access) { 1005 pr_warn("ignoring \"secure-reg-access\" property for arm64\n"); 1006 pmu->secure_access = false; 1007 } 1008 1009 ret = of_pmu_irq_cfg(pmu); 1010 if (!ret) 1011 ret = init_fn(pmu); 1012 } else { 1013 ret = probe_current_pmu(pmu, probe_table); 1014 cpumask_setall(&pmu->supported_cpus); 1015 } 1016 1017 if (ret) { 1018 pr_info("%s: failed to probe PMU!\n", of_node_full_name(node)); 1019 goto out_free; 1020 } 1021 1022 ret = cpu_pmu_init(pmu); 1023 if (ret) 1024 goto out_free; 1025 1026 ret = perf_pmu_register(&pmu->pmu, pmu->name, -1); 1027 if (ret) 1028 goto out_destroy; 1029 1030 if (!__oprofile_cpu_pmu) 1031 __oprofile_cpu_pmu = pmu; 1032 1033 pr_info("enabled with %s PMU driver, %d counters available\n", 1034 pmu->name, pmu->num_events); 1035 1036 return 0; 1037 1038 out_destroy: 1039 cpu_pmu_destroy(pmu); 1040 out_free: 1041 pr_info("%s: failed to register PMU devices!\n", 1042 of_node_full_name(node)); 1043 kfree(pmu->irq_affinity); 1044 kfree(pmu); 1045 return ret; 1046 } 1047