1 /* 2 * Linux performance counter support for MIPS. 3 * 4 * Copyright (C) 2010 MIPS Technologies, Inc. 5 * Copyright (C) 2011 Cavium Networks, Inc. 6 * Author: Deng-Cheng Zhu 7 * 8 * This code is based on the implementation for ARM, which is in turn 9 * based on the sparc64 perf event code and the x86 code. Performance 10 * counter access is based on the MIPS Oprofile code. And the callchain 11 * support references the code of MIPS stacktrace.c. 12 * 13 * This program is free software; you can redistribute it and/or modify 14 * it under the terms of the GNU General Public License version 2 as 15 * published by the Free Software Foundation. 16 */ 17 18 #include <linux/cpumask.h> 19 #include <linux/interrupt.h> 20 #include <linux/smp.h> 21 #include <linux/kernel.h> 22 #include <linux/perf_event.h> 23 #include <linux/uaccess.h> 24 25 #include <asm/irq.h> 26 #include <asm/irq_regs.h> 27 #include <asm/stacktrace.h> 28 #include <asm/time.h> /* For perf_irq */ 29 30 #define MIPS_MAX_HWEVENTS 4 31 #define MIPS_TCS_PER_COUNTER 2 32 #define MIPS_CPUID_TO_COUNTER_MASK (MIPS_TCS_PER_COUNTER - 1) 33 34 struct cpu_hw_events { 35 /* Array of events on this cpu. */ 36 struct perf_event *events[MIPS_MAX_HWEVENTS]; 37 38 /* 39 * Set the bit (indexed by the counter number) when the counter 40 * is used for an event. 41 */ 42 unsigned long used_mask[BITS_TO_LONGS(MIPS_MAX_HWEVENTS)]; 43 44 /* 45 * Software copy of the control register for each performance counter. 46 * MIPS CPUs vary in performance counters. They use this differently, 47 * and even may not use it. 48 */ 49 unsigned int saved_ctrl[MIPS_MAX_HWEVENTS]; 50 }; 51 DEFINE_PER_CPU(struct cpu_hw_events, cpu_hw_events) = { 52 .saved_ctrl = {0}, 53 }; 54 55 /* The description of MIPS performance events. */ 56 struct mips_perf_event { 57 unsigned int event_id; 58 /* 59 * MIPS performance counters are indexed starting from 0. 60 * CNTR_EVEN indicates the indexes of the counters to be used are 61 * even numbers. 62 */ 63 unsigned int cntr_mask; 64 #define CNTR_EVEN 0x55555555 65 #define CNTR_ODD 0xaaaaaaaa 66 #define CNTR_ALL 0xffffffff 67 #ifdef CONFIG_MIPS_MT_SMP 68 enum { 69 T = 0, 70 V = 1, 71 P = 2, 72 } range; 73 #else 74 #define T 75 #define V 76 #define P 77 #endif 78 }; 79 80 static struct mips_perf_event raw_event; 81 static DEFINE_MUTEX(raw_event_mutex); 82 83 #define C(x) PERF_COUNT_HW_CACHE_##x 84 85 struct mips_pmu { 86 u64 max_period; 87 u64 valid_count; 88 u64 overflow; 89 const char *name; 90 int irq; 91 u64 (*read_counter)(unsigned int idx); 92 void (*write_counter)(unsigned int idx, u64 val); 93 const struct mips_perf_event *(*map_raw_event)(u64 config); 94 const struct mips_perf_event (*general_event_map)[PERF_COUNT_HW_MAX]; 95 const struct mips_perf_event (*cache_event_map) 96 [PERF_COUNT_HW_CACHE_MAX] 97 [PERF_COUNT_HW_CACHE_OP_MAX] 98 [PERF_COUNT_HW_CACHE_RESULT_MAX]; 99 unsigned int num_counters; 100 }; 101 102 static struct mips_pmu mipspmu; 103 104 #define M_PERFCTL_EVENT(event) (((event) << MIPS_PERFCTRL_EVENT_S) & \ 105 MIPS_PERFCTRL_EVENT) 106 #define M_PERFCTL_VPEID(vpe) ((vpe) << MIPS_PERFCTRL_VPEID_S) 107 108 #ifdef CONFIG_CPU_BMIPS5000 109 #define M_PERFCTL_MT_EN(filter) 0 110 #else /* !CONFIG_CPU_BMIPS5000 */ 111 #define M_PERFCTL_MT_EN(filter) (filter) 112 #endif /* CONFIG_CPU_BMIPS5000 */ 113 114 #define M_TC_EN_ALL M_PERFCTL_MT_EN(MIPS_PERFCTRL_MT_EN_ALL) 115 #define M_TC_EN_VPE M_PERFCTL_MT_EN(MIPS_PERFCTRL_MT_EN_VPE) 116 #define M_TC_EN_TC M_PERFCTL_MT_EN(MIPS_PERFCTRL_MT_EN_TC) 117 118 #define M_PERFCTL_COUNT_EVENT_WHENEVER (MIPS_PERFCTRL_EXL | \ 119 MIPS_PERFCTRL_K | \ 120 MIPS_PERFCTRL_U | \ 121 MIPS_PERFCTRL_S | \ 122 MIPS_PERFCTRL_IE) 123 124 #ifdef CONFIG_MIPS_MT_SMP 125 #define M_PERFCTL_CONFIG_MASK 0x3fff801f 126 #else 127 #define M_PERFCTL_CONFIG_MASK 0x1f 128 #endif 129 130 131 #ifdef CONFIG_MIPS_PERF_SHARED_TC_COUNTERS 132 static int cpu_has_mipsmt_pertccounters; 133 134 static DEFINE_RWLOCK(pmuint_rwlock); 135 136 #if defined(CONFIG_CPU_BMIPS5000) 137 #define vpe_id() (cpu_has_mipsmt_pertccounters ? \ 138 0 : (smp_processor_id() & MIPS_CPUID_TO_COUNTER_MASK)) 139 #else 140 /* 141 * FIXME: For VSMP, vpe_id() is redefined for Perf-events, because 142 * cpu_data[cpuid].vpe_id reports 0 for _both_ CPUs. 143 */ 144 #define vpe_id() (cpu_has_mipsmt_pertccounters ? \ 145 0 : smp_processor_id()) 146 #endif 147 148 /* Copied from op_model_mipsxx.c */ 149 static unsigned int vpe_shift(void) 150 { 151 if (num_possible_cpus() > 1) 152 return 1; 153 154 return 0; 155 } 156 157 static unsigned int counters_total_to_per_cpu(unsigned int counters) 158 { 159 return counters >> vpe_shift(); 160 } 161 162 #else /* !CONFIG_MIPS_PERF_SHARED_TC_COUNTERS */ 163 #define vpe_id() 0 164 165 #endif /* CONFIG_MIPS_PERF_SHARED_TC_COUNTERS */ 166 167 static void resume_local_counters(void); 168 static void pause_local_counters(void); 169 static irqreturn_t mipsxx_pmu_handle_irq(int, void *); 170 static int mipsxx_pmu_handle_shared_irq(void); 171 172 static unsigned int mipsxx_pmu_swizzle_perf_idx(unsigned int idx) 173 { 174 if (vpe_id() == 1) 175 idx = (idx + 2) & 3; 176 return idx; 177 } 178 179 static u64 mipsxx_pmu_read_counter(unsigned int idx) 180 { 181 idx = mipsxx_pmu_swizzle_perf_idx(idx); 182 183 switch (idx) { 184 case 0: 185 /* 186 * The counters are unsigned, we must cast to truncate 187 * off the high bits. 188 */ 189 return (u32)read_c0_perfcntr0(); 190 case 1: 191 return (u32)read_c0_perfcntr1(); 192 case 2: 193 return (u32)read_c0_perfcntr2(); 194 case 3: 195 return (u32)read_c0_perfcntr3(); 196 default: 197 WARN_ONCE(1, "Invalid performance counter number (%d)\n", idx); 198 return 0; 199 } 200 } 201 202 static u64 mipsxx_pmu_read_counter_64(unsigned int idx) 203 { 204 idx = mipsxx_pmu_swizzle_perf_idx(idx); 205 206 switch (idx) { 207 case 0: 208 return read_c0_perfcntr0_64(); 209 case 1: 210 return read_c0_perfcntr1_64(); 211 case 2: 212 return read_c0_perfcntr2_64(); 213 case 3: 214 return read_c0_perfcntr3_64(); 215 default: 216 WARN_ONCE(1, "Invalid performance counter number (%d)\n", idx); 217 return 0; 218 } 219 } 220 221 static void mipsxx_pmu_write_counter(unsigned int idx, u64 val) 222 { 223 idx = mipsxx_pmu_swizzle_perf_idx(idx); 224 225 switch (idx) { 226 case 0: 227 write_c0_perfcntr0(val); 228 return; 229 case 1: 230 write_c0_perfcntr1(val); 231 return; 232 case 2: 233 write_c0_perfcntr2(val); 234 return; 235 case 3: 236 write_c0_perfcntr3(val); 237 return; 238 } 239 } 240 241 static void mipsxx_pmu_write_counter_64(unsigned int idx, u64 val) 242 { 243 idx = mipsxx_pmu_swizzle_perf_idx(idx); 244 245 switch (idx) { 246 case 0: 247 write_c0_perfcntr0_64(val); 248 return; 249 case 1: 250 write_c0_perfcntr1_64(val); 251 return; 252 case 2: 253 write_c0_perfcntr2_64(val); 254 return; 255 case 3: 256 write_c0_perfcntr3_64(val); 257 return; 258 } 259 } 260 261 static unsigned int mipsxx_pmu_read_control(unsigned int idx) 262 { 263 idx = mipsxx_pmu_swizzle_perf_idx(idx); 264 265 switch (idx) { 266 case 0: 267 return read_c0_perfctrl0(); 268 case 1: 269 return read_c0_perfctrl1(); 270 case 2: 271 return read_c0_perfctrl2(); 272 case 3: 273 return read_c0_perfctrl3(); 274 default: 275 WARN_ONCE(1, "Invalid performance counter number (%d)\n", idx); 276 return 0; 277 } 278 } 279 280 static void mipsxx_pmu_write_control(unsigned int idx, unsigned int val) 281 { 282 idx = mipsxx_pmu_swizzle_perf_idx(idx); 283 284 switch (idx) { 285 case 0: 286 write_c0_perfctrl0(val); 287 return; 288 case 1: 289 write_c0_perfctrl1(val); 290 return; 291 case 2: 292 write_c0_perfctrl2(val); 293 return; 294 case 3: 295 write_c0_perfctrl3(val); 296 return; 297 } 298 } 299 300 static int mipsxx_pmu_alloc_counter(struct cpu_hw_events *cpuc, 301 struct hw_perf_event *hwc) 302 { 303 int i; 304 305 /* 306 * We only need to care the counter mask. The range has been 307 * checked definitely. 308 */ 309 unsigned long cntr_mask = (hwc->event_base >> 8) & 0xffff; 310 311 for (i = mipspmu.num_counters - 1; i >= 0; i--) { 312 /* 313 * Note that some MIPS perf events can be counted by both 314 * even and odd counters, wheresas many other are only by 315 * even _or_ odd counters. This introduces an issue that 316 * when the former kind of event takes the counter the 317 * latter kind of event wants to use, then the "counter 318 * allocation" for the latter event will fail. In fact if 319 * they can be dynamically swapped, they both feel happy. 320 * But here we leave this issue alone for now. 321 */ 322 if (test_bit(i, &cntr_mask) && 323 !test_and_set_bit(i, cpuc->used_mask)) 324 return i; 325 } 326 327 return -EAGAIN; 328 } 329 330 static void mipsxx_pmu_enable_event(struct hw_perf_event *evt, int idx) 331 { 332 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); 333 334 WARN_ON(idx < 0 || idx >= mipspmu.num_counters); 335 336 cpuc->saved_ctrl[idx] = M_PERFCTL_EVENT(evt->event_base & 0xff) | 337 (evt->config_base & M_PERFCTL_CONFIG_MASK) | 338 /* Make sure interrupt enabled. */ 339 MIPS_PERFCTRL_IE; 340 if (IS_ENABLED(CONFIG_CPU_BMIPS5000)) 341 /* enable the counter for the calling thread */ 342 cpuc->saved_ctrl[idx] |= 343 (1 << (12 + vpe_id())) | BRCM_PERFCTRL_TC; 344 345 /* 346 * We do not actually let the counter run. Leave it until start(). 347 */ 348 } 349 350 static void mipsxx_pmu_disable_event(int idx) 351 { 352 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); 353 unsigned long flags; 354 355 WARN_ON(idx < 0 || idx >= mipspmu.num_counters); 356 357 local_irq_save(flags); 358 cpuc->saved_ctrl[idx] = mipsxx_pmu_read_control(idx) & 359 ~M_PERFCTL_COUNT_EVENT_WHENEVER; 360 mipsxx_pmu_write_control(idx, cpuc->saved_ctrl[idx]); 361 local_irq_restore(flags); 362 } 363 364 static int mipspmu_event_set_period(struct perf_event *event, 365 struct hw_perf_event *hwc, 366 int idx) 367 { 368 u64 left = local64_read(&hwc->period_left); 369 u64 period = hwc->sample_period; 370 int ret = 0; 371 372 if (unlikely((left + period) & (1ULL << 63))) { 373 /* left underflowed by more than period. */ 374 left = period; 375 local64_set(&hwc->period_left, left); 376 hwc->last_period = period; 377 ret = 1; 378 } else if (unlikely((left + period) <= period)) { 379 /* left underflowed by less than period. */ 380 left += period; 381 local64_set(&hwc->period_left, left); 382 hwc->last_period = period; 383 ret = 1; 384 } 385 386 if (left > mipspmu.max_period) { 387 left = mipspmu.max_period; 388 local64_set(&hwc->period_left, left); 389 } 390 391 local64_set(&hwc->prev_count, mipspmu.overflow - left); 392 393 mipspmu.write_counter(idx, mipspmu.overflow - left); 394 395 perf_event_update_userpage(event); 396 397 return ret; 398 } 399 400 static void mipspmu_event_update(struct perf_event *event, 401 struct hw_perf_event *hwc, 402 int idx) 403 { 404 u64 prev_raw_count, new_raw_count; 405 u64 delta; 406 407 again: 408 prev_raw_count = local64_read(&hwc->prev_count); 409 new_raw_count = mipspmu.read_counter(idx); 410 411 if (local64_cmpxchg(&hwc->prev_count, prev_raw_count, 412 new_raw_count) != prev_raw_count) 413 goto again; 414 415 delta = new_raw_count - prev_raw_count; 416 417 local64_add(delta, &event->count); 418 local64_sub(delta, &hwc->period_left); 419 } 420 421 static void mipspmu_start(struct perf_event *event, int flags) 422 { 423 struct hw_perf_event *hwc = &event->hw; 424 425 if (flags & PERF_EF_RELOAD) 426 WARN_ON_ONCE(!(hwc->state & PERF_HES_UPTODATE)); 427 428 hwc->state = 0; 429 430 /* Set the period for the event. */ 431 mipspmu_event_set_period(event, hwc, hwc->idx); 432 433 /* Enable the event. */ 434 mipsxx_pmu_enable_event(hwc, hwc->idx); 435 } 436 437 static void mipspmu_stop(struct perf_event *event, int flags) 438 { 439 struct hw_perf_event *hwc = &event->hw; 440 441 if (!(hwc->state & PERF_HES_STOPPED)) { 442 /* We are working on a local event. */ 443 mipsxx_pmu_disable_event(hwc->idx); 444 barrier(); 445 mipspmu_event_update(event, hwc, hwc->idx); 446 hwc->state |= PERF_HES_STOPPED | PERF_HES_UPTODATE; 447 } 448 } 449 450 static int mipspmu_add(struct perf_event *event, int flags) 451 { 452 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); 453 struct hw_perf_event *hwc = &event->hw; 454 int idx; 455 int err = 0; 456 457 perf_pmu_disable(event->pmu); 458 459 /* To look for a free counter for this event. */ 460 idx = mipsxx_pmu_alloc_counter(cpuc, hwc); 461 if (idx < 0) { 462 err = idx; 463 goto out; 464 } 465 466 /* 467 * If there is an event in the counter we are going to use then 468 * make sure it is disabled. 469 */ 470 event->hw.idx = idx; 471 mipsxx_pmu_disable_event(idx); 472 cpuc->events[idx] = event; 473 474 hwc->state = PERF_HES_STOPPED | PERF_HES_UPTODATE; 475 if (flags & PERF_EF_START) 476 mipspmu_start(event, PERF_EF_RELOAD); 477 478 /* Propagate our changes to the userspace mapping. */ 479 perf_event_update_userpage(event); 480 481 out: 482 perf_pmu_enable(event->pmu); 483 return err; 484 } 485 486 static void mipspmu_del(struct perf_event *event, int flags) 487 { 488 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); 489 struct hw_perf_event *hwc = &event->hw; 490 int idx = hwc->idx; 491 492 WARN_ON(idx < 0 || idx >= mipspmu.num_counters); 493 494 mipspmu_stop(event, PERF_EF_UPDATE); 495 cpuc->events[idx] = NULL; 496 clear_bit(idx, cpuc->used_mask); 497 498 perf_event_update_userpage(event); 499 } 500 501 static void mipspmu_read(struct perf_event *event) 502 { 503 struct hw_perf_event *hwc = &event->hw; 504 505 /* Don't read disabled counters! */ 506 if (hwc->idx < 0) 507 return; 508 509 mipspmu_event_update(event, hwc, hwc->idx); 510 } 511 512 static void mipspmu_enable(struct pmu *pmu) 513 { 514 #ifdef CONFIG_MIPS_PERF_SHARED_TC_COUNTERS 515 write_unlock(&pmuint_rwlock); 516 #endif 517 resume_local_counters(); 518 } 519 520 /* 521 * MIPS performance counters can be per-TC. The control registers can 522 * not be directly accessed across CPUs. Hence if we want to do global 523 * control, we need cross CPU calls. on_each_cpu() can help us, but we 524 * can not make sure this function is called with interrupts enabled. So 525 * here we pause local counters and then grab a rwlock and leave the 526 * counters on other CPUs alone. If any counter interrupt raises while 527 * we own the write lock, simply pause local counters on that CPU and 528 * spin in the handler. Also we know we won't be switched to another 529 * CPU after pausing local counters and before grabbing the lock. 530 */ 531 static void mipspmu_disable(struct pmu *pmu) 532 { 533 pause_local_counters(); 534 #ifdef CONFIG_MIPS_PERF_SHARED_TC_COUNTERS 535 write_lock(&pmuint_rwlock); 536 #endif 537 } 538 539 static atomic_t active_events = ATOMIC_INIT(0); 540 static DEFINE_MUTEX(pmu_reserve_mutex); 541 static int (*save_perf_irq)(void); 542 543 static int mipspmu_get_irq(void) 544 { 545 int err; 546 547 if (mipspmu.irq >= 0) { 548 /* Request my own irq handler. */ 549 err = request_irq(mipspmu.irq, mipsxx_pmu_handle_irq, 550 IRQF_PERCPU | IRQF_NOBALANCING | 551 IRQF_NO_THREAD | IRQF_NO_SUSPEND | 552 IRQF_SHARED, 553 "mips_perf_pmu", &mipspmu); 554 if (err) { 555 pr_warn("Unable to request IRQ%d for MIPS performance counters!\n", 556 mipspmu.irq); 557 } 558 } else if (cp0_perfcount_irq < 0) { 559 /* 560 * We are sharing the irq number with the timer interrupt. 561 */ 562 save_perf_irq = perf_irq; 563 perf_irq = mipsxx_pmu_handle_shared_irq; 564 err = 0; 565 } else { 566 pr_warn("The platform hasn't properly defined its interrupt controller\n"); 567 err = -ENOENT; 568 } 569 570 return err; 571 } 572 573 static void mipspmu_free_irq(void) 574 { 575 if (mipspmu.irq >= 0) 576 free_irq(mipspmu.irq, &mipspmu); 577 else if (cp0_perfcount_irq < 0) 578 perf_irq = save_perf_irq; 579 } 580 581 /* 582 * mipsxx/rm9000/loongson2 have different performance counters, they have 583 * specific low-level init routines. 584 */ 585 static void reset_counters(void *arg); 586 static int __hw_perf_event_init(struct perf_event *event); 587 588 static void hw_perf_event_destroy(struct perf_event *event) 589 { 590 if (atomic_dec_and_mutex_lock(&active_events, 591 &pmu_reserve_mutex)) { 592 /* 593 * We must not call the destroy function with interrupts 594 * disabled. 595 */ 596 on_each_cpu(reset_counters, 597 (void *)(long)mipspmu.num_counters, 1); 598 mipspmu_free_irq(); 599 mutex_unlock(&pmu_reserve_mutex); 600 } 601 } 602 603 static int mipspmu_event_init(struct perf_event *event) 604 { 605 int err = 0; 606 607 /* does not support taken branch sampling */ 608 if (has_branch_stack(event)) 609 return -EOPNOTSUPP; 610 611 switch (event->attr.type) { 612 case PERF_TYPE_RAW: 613 case PERF_TYPE_HARDWARE: 614 case PERF_TYPE_HW_CACHE: 615 break; 616 617 default: 618 return -ENOENT; 619 } 620 621 if (event->cpu >= 0 && !cpu_online(event->cpu)) 622 return -ENODEV; 623 624 if (!atomic_inc_not_zero(&active_events)) { 625 mutex_lock(&pmu_reserve_mutex); 626 if (atomic_read(&active_events) == 0) 627 err = mipspmu_get_irq(); 628 629 if (!err) 630 atomic_inc(&active_events); 631 mutex_unlock(&pmu_reserve_mutex); 632 } 633 634 if (err) 635 return err; 636 637 return __hw_perf_event_init(event); 638 } 639 640 static struct pmu pmu = { 641 .pmu_enable = mipspmu_enable, 642 .pmu_disable = mipspmu_disable, 643 .event_init = mipspmu_event_init, 644 .add = mipspmu_add, 645 .del = mipspmu_del, 646 .start = mipspmu_start, 647 .stop = mipspmu_stop, 648 .read = mipspmu_read, 649 }; 650 651 static unsigned int mipspmu_perf_event_encode(const struct mips_perf_event *pev) 652 { 653 /* 654 * Top 8 bits for range, next 16 bits for cntr_mask, lowest 8 bits for 655 * event_id. 656 */ 657 #ifdef CONFIG_MIPS_MT_SMP 658 return ((unsigned int)pev->range << 24) | 659 (pev->cntr_mask & 0xffff00) | 660 (pev->event_id & 0xff); 661 #else 662 return (pev->cntr_mask & 0xffff00) | 663 (pev->event_id & 0xff); 664 #endif 665 } 666 667 static const struct mips_perf_event *mipspmu_map_general_event(int idx) 668 { 669 670 if ((*mipspmu.general_event_map)[idx].cntr_mask == 0) 671 return ERR_PTR(-EOPNOTSUPP); 672 return &(*mipspmu.general_event_map)[idx]; 673 } 674 675 static const struct mips_perf_event *mipspmu_map_cache_event(u64 config) 676 { 677 unsigned int cache_type, cache_op, cache_result; 678 const struct mips_perf_event *pev; 679 680 cache_type = (config >> 0) & 0xff; 681 if (cache_type >= PERF_COUNT_HW_CACHE_MAX) 682 return ERR_PTR(-EINVAL); 683 684 cache_op = (config >> 8) & 0xff; 685 if (cache_op >= PERF_COUNT_HW_CACHE_OP_MAX) 686 return ERR_PTR(-EINVAL); 687 688 cache_result = (config >> 16) & 0xff; 689 if (cache_result >= PERF_COUNT_HW_CACHE_RESULT_MAX) 690 return ERR_PTR(-EINVAL); 691 692 pev = &((*mipspmu.cache_event_map) 693 [cache_type] 694 [cache_op] 695 [cache_result]); 696 697 if (pev->cntr_mask == 0) 698 return ERR_PTR(-EOPNOTSUPP); 699 700 return pev; 701 702 } 703 704 static int validate_group(struct perf_event *event) 705 { 706 struct perf_event *sibling, *leader = event->group_leader; 707 struct cpu_hw_events fake_cpuc; 708 709 memset(&fake_cpuc, 0, sizeof(fake_cpuc)); 710 711 if (mipsxx_pmu_alloc_counter(&fake_cpuc, &leader->hw) < 0) 712 return -EINVAL; 713 714 list_for_each_entry(sibling, &leader->sibling_list, group_entry) { 715 if (mipsxx_pmu_alloc_counter(&fake_cpuc, &sibling->hw) < 0) 716 return -EINVAL; 717 } 718 719 if (mipsxx_pmu_alloc_counter(&fake_cpuc, &event->hw) < 0) 720 return -EINVAL; 721 722 return 0; 723 } 724 725 /* This is needed by specific irq handlers in perf_event_*.c */ 726 static void handle_associated_event(struct cpu_hw_events *cpuc, 727 int idx, struct perf_sample_data *data, 728 struct pt_regs *regs) 729 { 730 struct perf_event *event = cpuc->events[idx]; 731 struct hw_perf_event *hwc = &event->hw; 732 733 mipspmu_event_update(event, hwc, idx); 734 data->period = event->hw.last_period; 735 if (!mipspmu_event_set_period(event, hwc, idx)) 736 return; 737 738 if (perf_event_overflow(event, data, regs)) 739 mipsxx_pmu_disable_event(idx); 740 } 741 742 743 static int __n_counters(void) 744 { 745 if (!cpu_has_perf) 746 return 0; 747 if (!(read_c0_perfctrl0() & MIPS_PERFCTRL_M)) 748 return 1; 749 if (!(read_c0_perfctrl1() & MIPS_PERFCTRL_M)) 750 return 2; 751 if (!(read_c0_perfctrl2() & MIPS_PERFCTRL_M)) 752 return 3; 753 754 return 4; 755 } 756 757 static int n_counters(void) 758 { 759 int counters; 760 761 switch (current_cpu_type()) { 762 case CPU_R10000: 763 counters = 2; 764 break; 765 766 case CPU_R12000: 767 case CPU_R14000: 768 case CPU_R16000: 769 counters = 4; 770 break; 771 772 default: 773 counters = __n_counters(); 774 } 775 776 return counters; 777 } 778 779 static void reset_counters(void *arg) 780 { 781 int counters = (int)(long)arg; 782 switch (counters) { 783 case 4: 784 mipsxx_pmu_write_control(3, 0); 785 mipspmu.write_counter(3, 0); 786 case 3: 787 mipsxx_pmu_write_control(2, 0); 788 mipspmu.write_counter(2, 0); 789 case 2: 790 mipsxx_pmu_write_control(1, 0); 791 mipspmu.write_counter(1, 0); 792 case 1: 793 mipsxx_pmu_write_control(0, 0); 794 mipspmu.write_counter(0, 0); 795 } 796 } 797 798 /* 24K/34K/1004K/interAptiv/loongson1 cores share the same event map. */ 799 static const struct mips_perf_event mipsxxcore_event_map 800 [PERF_COUNT_HW_MAX] = { 801 [PERF_COUNT_HW_CPU_CYCLES] = { 0x00, CNTR_EVEN | CNTR_ODD, P }, 802 [PERF_COUNT_HW_INSTRUCTIONS] = { 0x01, CNTR_EVEN | CNTR_ODD, T }, 803 [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = { 0x02, CNTR_EVEN, T }, 804 [PERF_COUNT_HW_BRANCH_MISSES] = { 0x02, CNTR_ODD, T }, 805 }; 806 807 /* 74K/proAptiv core has different branch event code. */ 808 static const struct mips_perf_event mipsxxcore_event_map2 809 [PERF_COUNT_HW_MAX] = { 810 [PERF_COUNT_HW_CPU_CYCLES] = { 0x00, CNTR_EVEN | CNTR_ODD, P }, 811 [PERF_COUNT_HW_INSTRUCTIONS] = { 0x01, CNTR_EVEN | CNTR_ODD, T }, 812 [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = { 0x27, CNTR_EVEN, T }, 813 [PERF_COUNT_HW_BRANCH_MISSES] = { 0x27, CNTR_ODD, T }, 814 }; 815 816 static const struct mips_perf_event i6x00_event_map[PERF_COUNT_HW_MAX] = { 817 [PERF_COUNT_HW_CPU_CYCLES] = { 0x00, CNTR_EVEN | CNTR_ODD }, 818 [PERF_COUNT_HW_INSTRUCTIONS] = { 0x01, CNTR_EVEN | CNTR_ODD }, 819 /* These only count dcache, not icache */ 820 [PERF_COUNT_HW_CACHE_REFERENCES] = { 0x45, CNTR_EVEN | CNTR_ODD }, 821 [PERF_COUNT_HW_CACHE_MISSES] = { 0x48, CNTR_EVEN | CNTR_ODD }, 822 [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = { 0x15, CNTR_EVEN | CNTR_ODD }, 823 [PERF_COUNT_HW_BRANCH_MISSES] = { 0x16, CNTR_EVEN | CNTR_ODD }, 824 }; 825 826 static const struct mips_perf_event loongson3_event_map[PERF_COUNT_HW_MAX] = { 827 [PERF_COUNT_HW_CPU_CYCLES] = { 0x00, CNTR_EVEN }, 828 [PERF_COUNT_HW_INSTRUCTIONS] = { 0x00, CNTR_ODD }, 829 [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = { 0x01, CNTR_EVEN }, 830 [PERF_COUNT_HW_BRANCH_MISSES] = { 0x01, CNTR_ODD }, 831 }; 832 833 static const struct mips_perf_event octeon_event_map[PERF_COUNT_HW_MAX] = { 834 [PERF_COUNT_HW_CPU_CYCLES] = { 0x01, CNTR_ALL }, 835 [PERF_COUNT_HW_INSTRUCTIONS] = { 0x03, CNTR_ALL }, 836 [PERF_COUNT_HW_CACHE_REFERENCES] = { 0x2b, CNTR_ALL }, 837 [PERF_COUNT_HW_CACHE_MISSES] = { 0x2e, CNTR_ALL }, 838 [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = { 0x08, CNTR_ALL }, 839 [PERF_COUNT_HW_BRANCH_MISSES] = { 0x09, CNTR_ALL }, 840 [PERF_COUNT_HW_BUS_CYCLES] = { 0x25, CNTR_ALL }, 841 }; 842 843 static const struct mips_perf_event bmips5000_event_map 844 [PERF_COUNT_HW_MAX] = { 845 [PERF_COUNT_HW_CPU_CYCLES] = { 0x00, CNTR_EVEN | CNTR_ODD, T }, 846 [PERF_COUNT_HW_INSTRUCTIONS] = { 0x01, CNTR_EVEN | CNTR_ODD, T }, 847 [PERF_COUNT_HW_BRANCH_MISSES] = { 0x02, CNTR_ODD, T }, 848 }; 849 850 static const struct mips_perf_event xlp_event_map[PERF_COUNT_HW_MAX] = { 851 [PERF_COUNT_HW_CPU_CYCLES] = { 0x01, CNTR_ALL }, 852 [PERF_COUNT_HW_INSTRUCTIONS] = { 0x18, CNTR_ALL }, /* PAPI_TOT_INS */ 853 [PERF_COUNT_HW_CACHE_REFERENCES] = { 0x04, CNTR_ALL }, /* PAPI_L1_ICA */ 854 [PERF_COUNT_HW_CACHE_MISSES] = { 0x07, CNTR_ALL }, /* PAPI_L1_ICM */ 855 [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = { 0x1b, CNTR_ALL }, /* PAPI_BR_CN */ 856 [PERF_COUNT_HW_BRANCH_MISSES] = { 0x1c, CNTR_ALL }, /* PAPI_BR_MSP */ 857 }; 858 859 /* 24K/34K/1004K/interAptiv/loongson1 cores share the same cache event map. */ 860 static const struct mips_perf_event mipsxxcore_cache_map 861 [PERF_COUNT_HW_CACHE_MAX] 862 [PERF_COUNT_HW_CACHE_OP_MAX] 863 [PERF_COUNT_HW_CACHE_RESULT_MAX] = { 864 [C(L1D)] = { 865 /* 866 * Like some other architectures (e.g. ARM), the performance 867 * counters don't differentiate between read and write 868 * accesses/misses, so this isn't strictly correct, but it's the 869 * best we can do. Writes and reads get combined. 870 */ 871 [C(OP_READ)] = { 872 [C(RESULT_ACCESS)] = { 0x0a, CNTR_EVEN, T }, 873 [C(RESULT_MISS)] = { 0x0b, CNTR_EVEN | CNTR_ODD, T }, 874 }, 875 [C(OP_WRITE)] = { 876 [C(RESULT_ACCESS)] = { 0x0a, CNTR_EVEN, T }, 877 [C(RESULT_MISS)] = { 0x0b, CNTR_EVEN | CNTR_ODD, T }, 878 }, 879 }, 880 [C(L1I)] = { 881 [C(OP_READ)] = { 882 [C(RESULT_ACCESS)] = { 0x09, CNTR_EVEN, T }, 883 [C(RESULT_MISS)] = { 0x09, CNTR_ODD, T }, 884 }, 885 [C(OP_WRITE)] = { 886 [C(RESULT_ACCESS)] = { 0x09, CNTR_EVEN, T }, 887 [C(RESULT_MISS)] = { 0x09, CNTR_ODD, T }, 888 }, 889 [C(OP_PREFETCH)] = { 890 [C(RESULT_ACCESS)] = { 0x14, CNTR_EVEN, T }, 891 /* 892 * Note that MIPS has only "hit" events countable for 893 * the prefetch operation. 894 */ 895 }, 896 }, 897 [C(LL)] = { 898 [C(OP_READ)] = { 899 [C(RESULT_ACCESS)] = { 0x15, CNTR_ODD, P }, 900 [C(RESULT_MISS)] = { 0x16, CNTR_EVEN, P }, 901 }, 902 [C(OP_WRITE)] = { 903 [C(RESULT_ACCESS)] = { 0x15, CNTR_ODD, P }, 904 [C(RESULT_MISS)] = { 0x16, CNTR_EVEN, P }, 905 }, 906 }, 907 [C(DTLB)] = { 908 [C(OP_READ)] = { 909 [C(RESULT_ACCESS)] = { 0x06, CNTR_EVEN, T }, 910 [C(RESULT_MISS)] = { 0x06, CNTR_ODD, T }, 911 }, 912 [C(OP_WRITE)] = { 913 [C(RESULT_ACCESS)] = { 0x06, CNTR_EVEN, T }, 914 [C(RESULT_MISS)] = { 0x06, CNTR_ODD, T }, 915 }, 916 }, 917 [C(ITLB)] = { 918 [C(OP_READ)] = { 919 [C(RESULT_ACCESS)] = { 0x05, CNTR_EVEN, T }, 920 [C(RESULT_MISS)] = { 0x05, CNTR_ODD, T }, 921 }, 922 [C(OP_WRITE)] = { 923 [C(RESULT_ACCESS)] = { 0x05, CNTR_EVEN, T }, 924 [C(RESULT_MISS)] = { 0x05, CNTR_ODD, T }, 925 }, 926 }, 927 [C(BPU)] = { 928 /* Using the same code for *HW_BRANCH* */ 929 [C(OP_READ)] = { 930 [C(RESULT_ACCESS)] = { 0x02, CNTR_EVEN, T }, 931 [C(RESULT_MISS)] = { 0x02, CNTR_ODD, T }, 932 }, 933 [C(OP_WRITE)] = { 934 [C(RESULT_ACCESS)] = { 0x02, CNTR_EVEN, T }, 935 [C(RESULT_MISS)] = { 0x02, CNTR_ODD, T }, 936 }, 937 }, 938 }; 939 940 /* 74K/proAptiv core has completely different cache event map. */ 941 static const struct mips_perf_event mipsxxcore_cache_map2 942 [PERF_COUNT_HW_CACHE_MAX] 943 [PERF_COUNT_HW_CACHE_OP_MAX] 944 [PERF_COUNT_HW_CACHE_RESULT_MAX] = { 945 [C(L1D)] = { 946 /* 947 * Like some other architectures (e.g. ARM), the performance 948 * counters don't differentiate between read and write 949 * accesses/misses, so this isn't strictly correct, but it's the 950 * best we can do. Writes and reads get combined. 951 */ 952 [C(OP_READ)] = { 953 [C(RESULT_ACCESS)] = { 0x17, CNTR_ODD, T }, 954 [C(RESULT_MISS)] = { 0x18, CNTR_ODD, T }, 955 }, 956 [C(OP_WRITE)] = { 957 [C(RESULT_ACCESS)] = { 0x17, CNTR_ODD, T }, 958 [C(RESULT_MISS)] = { 0x18, CNTR_ODD, T }, 959 }, 960 }, 961 [C(L1I)] = { 962 [C(OP_READ)] = { 963 [C(RESULT_ACCESS)] = { 0x06, CNTR_EVEN, T }, 964 [C(RESULT_MISS)] = { 0x06, CNTR_ODD, T }, 965 }, 966 [C(OP_WRITE)] = { 967 [C(RESULT_ACCESS)] = { 0x06, CNTR_EVEN, T }, 968 [C(RESULT_MISS)] = { 0x06, CNTR_ODD, T }, 969 }, 970 [C(OP_PREFETCH)] = { 971 [C(RESULT_ACCESS)] = { 0x34, CNTR_EVEN, T }, 972 /* 973 * Note that MIPS has only "hit" events countable for 974 * the prefetch operation. 975 */ 976 }, 977 }, 978 [C(LL)] = { 979 [C(OP_READ)] = { 980 [C(RESULT_ACCESS)] = { 0x1c, CNTR_ODD, P }, 981 [C(RESULT_MISS)] = { 0x1d, CNTR_EVEN, P }, 982 }, 983 [C(OP_WRITE)] = { 984 [C(RESULT_ACCESS)] = { 0x1c, CNTR_ODD, P }, 985 [C(RESULT_MISS)] = { 0x1d, CNTR_EVEN, P }, 986 }, 987 }, 988 /* 989 * 74K core does not have specific DTLB events. proAptiv core has 990 * "speculative" DTLB events which are numbered 0x63 (even/odd) and 991 * not included here. One can use raw events if really needed. 992 */ 993 [C(ITLB)] = { 994 [C(OP_READ)] = { 995 [C(RESULT_ACCESS)] = { 0x04, CNTR_EVEN, T }, 996 [C(RESULT_MISS)] = { 0x04, CNTR_ODD, T }, 997 }, 998 [C(OP_WRITE)] = { 999 [C(RESULT_ACCESS)] = { 0x04, CNTR_EVEN, T }, 1000 [C(RESULT_MISS)] = { 0x04, CNTR_ODD, T }, 1001 }, 1002 }, 1003 [C(BPU)] = { 1004 /* Using the same code for *HW_BRANCH* */ 1005 [C(OP_READ)] = { 1006 [C(RESULT_ACCESS)] = { 0x27, CNTR_EVEN, T }, 1007 [C(RESULT_MISS)] = { 0x27, CNTR_ODD, T }, 1008 }, 1009 [C(OP_WRITE)] = { 1010 [C(RESULT_ACCESS)] = { 0x27, CNTR_EVEN, T }, 1011 [C(RESULT_MISS)] = { 0x27, CNTR_ODD, T }, 1012 }, 1013 }, 1014 }; 1015 1016 static const struct mips_perf_event i6x00_cache_map 1017 [PERF_COUNT_HW_CACHE_MAX] 1018 [PERF_COUNT_HW_CACHE_OP_MAX] 1019 [PERF_COUNT_HW_CACHE_RESULT_MAX] = { 1020 [C(L1D)] = { 1021 [C(OP_READ)] = { 1022 [C(RESULT_ACCESS)] = { 0x46, CNTR_EVEN | CNTR_ODD }, 1023 [C(RESULT_MISS)] = { 0x49, CNTR_EVEN | CNTR_ODD }, 1024 }, 1025 [C(OP_WRITE)] = { 1026 [C(RESULT_ACCESS)] = { 0x47, CNTR_EVEN | CNTR_ODD }, 1027 [C(RESULT_MISS)] = { 0x4a, CNTR_EVEN | CNTR_ODD }, 1028 }, 1029 }, 1030 [C(L1I)] = { 1031 [C(OP_READ)] = { 1032 [C(RESULT_ACCESS)] = { 0x84, CNTR_EVEN | CNTR_ODD }, 1033 [C(RESULT_MISS)] = { 0x85, CNTR_EVEN | CNTR_ODD }, 1034 }, 1035 }, 1036 [C(DTLB)] = { 1037 /* Can't distinguish read & write */ 1038 [C(OP_READ)] = { 1039 [C(RESULT_ACCESS)] = { 0x40, CNTR_EVEN | CNTR_ODD }, 1040 [C(RESULT_MISS)] = { 0x41, CNTR_EVEN | CNTR_ODD }, 1041 }, 1042 [C(OP_WRITE)] = { 1043 [C(RESULT_ACCESS)] = { 0x40, CNTR_EVEN | CNTR_ODD }, 1044 [C(RESULT_MISS)] = { 0x41, CNTR_EVEN | CNTR_ODD }, 1045 }, 1046 }, 1047 [C(BPU)] = { 1048 /* Conditional branches / mispredicted */ 1049 [C(OP_READ)] = { 1050 [C(RESULT_ACCESS)] = { 0x15, CNTR_EVEN | CNTR_ODD }, 1051 [C(RESULT_MISS)] = { 0x16, CNTR_EVEN | CNTR_ODD }, 1052 }, 1053 }, 1054 }; 1055 1056 static const struct mips_perf_event loongson3_cache_map 1057 [PERF_COUNT_HW_CACHE_MAX] 1058 [PERF_COUNT_HW_CACHE_OP_MAX] 1059 [PERF_COUNT_HW_CACHE_RESULT_MAX] = { 1060 [C(L1D)] = { 1061 /* 1062 * Like some other architectures (e.g. ARM), the performance 1063 * counters don't differentiate between read and write 1064 * accesses/misses, so this isn't strictly correct, but it's the 1065 * best we can do. Writes and reads get combined. 1066 */ 1067 [C(OP_READ)] = { 1068 [C(RESULT_MISS)] = { 0x04, CNTR_ODD }, 1069 }, 1070 [C(OP_WRITE)] = { 1071 [C(RESULT_MISS)] = { 0x04, CNTR_ODD }, 1072 }, 1073 }, 1074 [C(L1I)] = { 1075 [C(OP_READ)] = { 1076 [C(RESULT_MISS)] = { 0x04, CNTR_EVEN }, 1077 }, 1078 [C(OP_WRITE)] = { 1079 [C(RESULT_MISS)] = { 0x04, CNTR_EVEN }, 1080 }, 1081 }, 1082 [C(DTLB)] = { 1083 [C(OP_READ)] = { 1084 [C(RESULT_MISS)] = { 0x09, CNTR_ODD }, 1085 }, 1086 [C(OP_WRITE)] = { 1087 [C(RESULT_MISS)] = { 0x09, CNTR_ODD }, 1088 }, 1089 }, 1090 [C(ITLB)] = { 1091 [C(OP_READ)] = { 1092 [C(RESULT_MISS)] = { 0x0c, CNTR_ODD }, 1093 }, 1094 [C(OP_WRITE)] = { 1095 [C(RESULT_MISS)] = { 0x0c, CNTR_ODD }, 1096 }, 1097 }, 1098 [C(BPU)] = { 1099 /* Using the same code for *HW_BRANCH* */ 1100 [C(OP_READ)] = { 1101 [C(RESULT_ACCESS)] = { 0x02, CNTR_EVEN }, 1102 [C(RESULT_MISS)] = { 0x02, CNTR_ODD }, 1103 }, 1104 [C(OP_WRITE)] = { 1105 [C(RESULT_ACCESS)] = { 0x02, CNTR_EVEN }, 1106 [C(RESULT_MISS)] = { 0x02, CNTR_ODD }, 1107 }, 1108 }, 1109 }; 1110 1111 /* BMIPS5000 */ 1112 static const struct mips_perf_event bmips5000_cache_map 1113 [PERF_COUNT_HW_CACHE_MAX] 1114 [PERF_COUNT_HW_CACHE_OP_MAX] 1115 [PERF_COUNT_HW_CACHE_RESULT_MAX] = { 1116 [C(L1D)] = { 1117 /* 1118 * Like some other architectures (e.g. ARM), the performance 1119 * counters don't differentiate between read and write 1120 * accesses/misses, so this isn't strictly correct, but it's the 1121 * best we can do. Writes and reads get combined. 1122 */ 1123 [C(OP_READ)] = { 1124 [C(RESULT_ACCESS)] = { 12, CNTR_EVEN, T }, 1125 [C(RESULT_MISS)] = { 12, CNTR_ODD, T }, 1126 }, 1127 [C(OP_WRITE)] = { 1128 [C(RESULT_ACCESS)] = { 12, CNTR_EVEN, T }, 1129 [C(RESULT_MISS)] = { 12, CNTR_ODD, T }, 1130 }, 1131 }, 1132 [C(L1I)] = { 1133 [C(OP_READ)] = { 1134 [C(RESULT_ACCESS)] = { 10, CNTR_EVEN, T }, 1135 [C(RESULT_MISS)] = { 10, CNTR_ODD, T }, 1136 }, 1137 [C(OP_WRITE)] = { 1138 [C(RESULT_ACCESS)] = { 10, CNTR_EVEN, T }, 1139 [C(RESULT_MISS)] = { 10, CNTR_ODD, T }, 1140 }, 1141 [C(OP_PREFETCH)] = { 1142 [C(RESULT_ACCESS)] = { 23, CNTR_EVEN, T }, 1143 /* 1144 * Note that MIPS has only "hit" events countable for 1145 * the prefetch operation. 1146 */ 1147 }, 1148 }, 1149 [C(LL)] = { 1150 [C(OP_READ)] = { 1151 [C(RESULT_ACCESS)] = { 28, CNTR_EVEN, P }, 1152 [C(RESULT_MISS)] = { 28, CNTR_ODD, P }, 1153 }, 1154 [C(OP_WRITE)] = { 1155 [C(RESULT_ACCESS)] = { 28, CNTR_EVEN, P }, 1156 [C(RESULT_MISS)] = { 28, CNTR_ODD, P }, 1157 }, 1158 }, 1159 [C(BPU)] = { 1160 /* Using the same code for *HW_BRANCH* */ 1161 [C(OP_READ)] = { 1162 [C(RESULT_MISS)] = { 0x02, CNTR_ODD, T }, 1163 }, 1164 [C(OP_WRITE)] = { 1165 [C(RESULT_MISS)] = { 0x02, CNTR_ODD, T }, 1166 }, 1167 }, 1168 }; 1169 1170 1171 static const struct mips_perf_event octeon_cache_map 1172 [PERF_COUNT_HW_CACHE_MAX] 1173 [PERF_COUNT_HW_CACHE_OP_MAX] 1174 [PERF_COUNT_HW_CACHE_RESULT_MAX] = { 1175 [C(L1D)] = { 1176 [C(OP_READ)] = { 1177 [C(RESULT_ACCESS)] = { 0x2b, CNTR_ALL }, 1178 [C(RESULT_MISS)] = { 0x2e, CNTR_ALL }, 1179 }, 1180 [C(OP_WRITE)] = { 1181 [C(RESULT_ACCESS)] = { 0x30, CNTR_ALL }, 1182 }, 1183 }, 1184 [C(L1I)] = { 1185 [C(OP_READ)] = { 1186 [C(RESULT_ACCESS)] = { 0x18, CNTR_ALL }, 1187 }, 1188 [C(OP_PREFETCH)] = { 1189 [C(RESULT_ACCESS)] = { 0x19, CNTR_ALL }, 1190 }, 1191 }, 1192 [C(DTLB)] = { 1193 /* 1194 * Only general DTLB misses are counted use the same event for 1195 * read and write. 1196 */ 1197 [C(OP_READ)] = { 1198 [C(RESULT_MISS)] = { 0x35, CNTR_ALL }, 1199 }, 1200 [C(OP_WRITE)] = { 1201 [C(RESULT_MISS)] = { 0x35, CNTR_ALL }, 1202 }, 1203 }, 1204 [C(ITLB)] = { 1205 [C(OP_READ)] = { 1206 [C(RESULT_MISS)] = { 0x37, CNTR_ALL }, 1207 }, 1208 }, 1209 }; 1210 1211 static const struct mips_perf_event xlp_cache_map 1212 [PERF_COUNT_HW_CACHE_MAX] 1213 [PERF_COUNT_HW_CACHE_OP_MAX] 1214 [PERF_COUNT_HW_CACHE_RESULT_MAX] = { 1215 [C(L1D)] = { 1216 [C(OP_READ)] = { 1217 [C(RESULT_ACCESS)] = { 0x31, CNTR_ALL }, /* PAPI_L1_DCR */ 1218 [C(RESULT_MISS)] = { 0x30, CNTR_ALL }, /* PAPI_L1_LDM */ 1219 }, 1220 [C(OP_WRITE)] = { 1221 [C(RESULT_ACCESS)] = { 0x2f, CNTR_ALL }, /* PAPI_L1_DCW */ 1222 [C(RESULT_MISS)] = { 0x2e, CNTR_ALL }, /* PAPI_L1_STM */ 1223 }, 1224 }, 1225 [C(L1I)] = { 1226 [C(OP_READ)] = { 1227 [C(RESULT_ACCESS)] = { 0x04, CNTR_ALL }, /* PAPI_L1_ICA */ 1228 [C(RESULT_MISS)] = { 0x07, CNTR_ALL }, /* PAPI_L1_ICM */ 1229 }, 1230 }, 1231 [C(LL)] = { 1232 [C(OP_READ)] = { 1233 [C(RESULT_ACCESS)] = { 0x35, CNTR_ALL }, /* PAPI_L2_DCR */ 1234 [C(RESULT_MISS)] = { 0x37, CNTR_ALL }, /* PAPI_L2_LDM */ 1235 }, 1236 [C(OP_WRITE)] = { 1237 [C(RESULT_ACCESS)] = { 0x34, CNTR_ALL }, /* PAPI_L2_DCA */ 1238 [C(RESULT_MISS)] = { 0x36, CNTR_ALL }, /* PAPI_L2_DCM */ 1239 }, 1240 }, 1241 [C(DTLB)] = { 1242 /* 1243 * Only general DTLB misses are counted use the same event for 1244 * read and write. 1245 */ 1246 [C(OP_READ)] = { 1247 [C(RESULT_MISS)] = { 0x2d, CNTR_ALL }, /* PAPI_TLB_DM */ 1248 }, 1249 [C(OP_WRITE)] = { 1250 [C(RESULT_MISS)] = { 0x2d, CNTR_ALL }, /* PAPI_TLB_DM */ 1251 }, 1252 }, 1253 [C(ITLB)] = { 1254 [C(OP_READ)] = { 1255 [C(RESULT_MISS)] = { 0x08, CNTR_ALL }, /* PAPI_TLB_IM */ 1256 }, 1257 [C(OP_WRITE)] = { 1258 [C(RESULT_MISS)] = { 0x08, CNTR_ALL }, /* PAPI_TLB_IM */ 1259 }, 1260 }, 1261 [C(BPU)] = { 1262 [C(OP_READ)] = { 1263 [C(RESULT_MISS)] = { 0x25, CNTR_ALL }, 1264 }, 1265 }, 1266 }; 1267 1268 #ifdef CONFIG_MIPS_MT_SMP 1269 static void check_and_calc_range(struct perf_event *event, 1270 const struct mips_perf_event *pev) 1271 { 1272 struct hw_perf_event *hwc = &event->hw; 1273 1274 if (event->cpu >= 0) { 1275 if (pev->range > V) { 1276 /* 1277 * The user selected an event that is processor 1278 * wide, while expecting it to be VPE wide. 1279 */ 1280 hwc->config_base |= M_TC_EN_ALL; 1281 } else { 1282 /* 1283 * FIXME: cpu_data[event->cpu].vpe_id reports 0 1284 * for both CPUs. 1285 */ 1286 hwc->config_base |= M_PERFCTL_VPEID(event->cpu); 1287 hwc->config_base |= M_TC_EN_VPE; 1288 } 1289 } else 1290 hwc->config_base |= M_TC_EN_ALL; 1291 } 1292 #else 1293 static void check_and_calc_range(struct perf_event *event, 1294 const struct mips_perf_event *pev) 1295 { 1296 } 1297 #endif 1298 1299 static int __hw_perf_event_init(struct perf_event *event) 1300 { 1301 struct perf_event_attr *attr = &event->attr; 1302 struct hw_perf_event *hwc = &event->hw; 1303 const struct mips_perf_event *pev; 1304 int err; 1305 1306 /* Returning MIPS event descriptor for generic perf event. */ 1307 if (PERF_TYPE_HARDWARE == event->attr.type) { 1308 if (event->attr.config >= PERF_COUNT_HW_MAX) 1309 return -EINVAL; 1310 pev = mipspmu_map_general_event(event->attr.config); 1311 } else if (PERF_TYPE_HW_CACHE == event->attr.type) { 1312 pev = mipspmu_map_cache_event(event->attr.config); 1313 } else if (PERF_TYPE_RAW == event->attr.type) { 1314 /* We are working on the global raw event. */ 1315 mutex_lock(&raw_event_mutex); 1316 pev = mipspmu.map_raw_event(event->attr.config); 1317 } else { 1318 /* The event type is not (yet) supported. */ 1319 return -EOPNOTSUPP; 1320 } 1321 1322 if (IS_ERR(pev)) { 1323 if (PERF_TYPE_RAW == event->attr.type) 1324 mutex_unlock(&raw_event_mutex); 1325 return PTR_ERR(pev); 1326 } 1327 1328 /* 1329 * We allow max flexibility on how each individual counter shared 1330 * by the single CPU operates (the mode exclusion and the range). 1331 */ 1332 hwc->config_base = MIPS_PERFCTRL_IE; 1333 1334 /* Calculate range bits and validate it. */ 1335 if (num_possible_cpus() > 1) 1336 check_and_calc_range(event, pev); 1337 1338 hwc->event_base = mipspmu_perf_event_encode(pev); 1339 if (PERF_TYPE_RAW == event->attr.type) 1340 mutex_unlock(&raw_event_mutex); 1341 1342 if (!attr->exclude_user) 1343 hwc->config_base |= MIPS_PERFCTRL_U; 1344 if (!attr->exclude_kernel) { 1345 hwc->config_base |= MIPS_PERFCTRL_K; 1346 /* MIPS kernel mode: KSU == 00b || EXL == 1 || ERL == 1 */ 1347 hwc->config_base |= MIPS_PERFCTRL_EXL; 1348 } 1349 if (!attr->exclude_hv) 1350 hwc->config_base |= MIPS_PERFCTRL_S; 1351 1352 hwc->config_base &= M_PERFCTL_CONFIG_MASK; 1353 /* 1354 * The event can belong to another cpu. We do not assign a local 1355 * counter for it for now. 1356 */ 1357 hwc->idx = -1; 1358 hwc->config = 0; 1359 1360 if (!hwc->sample_period) { 1361 hwc->sample_period = mipspmu.max_period; 1362 hwc->last_period = hwc->sample_period; 1363 local64_set(&hwc->period_left, hwc->sample_period); 1364 } 1365 1366 err = 0; 1367 if (event->group_leader != event) 1368 err = validate_group(event); 1369 1370 event->destroy = hw_perf_event_destroy; 1371 1372 if (err) 1373 event->destroy(event); 1374 1375 return err; 1376 } 1377 1378 static void pause_local_counters(void) 1379 { 1380 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); 1381 int ctr = mipspmu.num_counters; 1382 unsigned long flags; 1383 1384 local_irq_save(flags); 1385 do { 1386 ctr--; 1387 cpuc->saved_ctrl[ctr] = mipsxx_pmu_read_control(ctr); 1388 mipsxx_pmu_write_control(ctr, cpuc->saved_ctrl[ctr] & 1389 ~M_PERFCTL_COUNT_EVENT_WHENEVER); 1390 } while (ctr > 0); 1391 local_irq_restore(flags); 1392 } 1393 1394 static void resume_local_counters(void) 1395 { 1396 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); 1397 int ctr = mipspmu.num_counters; 1398 1399 do { 1400 ctr--; 1401 mipsxx_pmu_write_control(ctr, cpuc->saved_ctrl[ctr]); 1402 } while (ctr > 0); 1403 } 1404 1405 static int mipsxx_pmu_handle_shared_irq(void) 1406 { 1407 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); 1408 struct perf_sample_data data; 1409 unsigned int counters = mipspmu.num_counters; 1410 u64 counter; 1411 int handled = IRQ_NONE; 1412 struct pt_regs *regs; 1413 1414 if (cpu_has_perf_cntr_intr_bit && !(read_c0_cause() & CAUSEF_PCI)) 1415 return handled; 1416 /* 1417 * First we pause the local counters, so that when we are locked 1418 * here, the counters are all paused. When it gets locked due to 1419 * perf_disable(), the timer interrupt handler will be delayed. 1420 * 1421 * See also mipsxx_pmu_start(). 1422 */ 1423 pause_local_counters(); 1424 #ifdef CONFIG_MIPS_PERF_SHARED_TC_COUNTERS 1425 read_lock(&pmuint_rwlock); 1426 #endif 1427 1428 regs = get_irq_regs(); 1429 1430 perf_sample_data_init(&data, 0, 0); 1431 1432 switch (counters) { 1433 #define HANDLE_COUNTER(n) \ 1434 case n + 1: \ 1435 if (test_bit(n, cpuc->used_mask)) { \ 1436 counter = mipspmu.read_counter(n); \ 1437 if (counter & mipspmu.overflow) { \ 1438 handle_associated_event(cpuc, n, &data, regs); \ 1439 handled = IRQ_HANDLED; \ 1440 } \ 1441 } 1442 HANDLE_COUNTER(3) 1443 HANDLE_COUNTER(2) 1444 HANDLE_COUNTER(1) 1445 HANDLE_COUNTER(0) 1446 } 1447 1448 #ifdef CONFIG_MIPS_PERF_SHARED_TC_COUNTERS 1449 read_unlock(&pmuint_rwlock); 1450 #endif 1451 resume_local_counters(); 1452 1453 /* 1454 * Do all the work for the pending perf events. We can do this 1455 * in here because the performance counter interrupt is a regular 1456 * interrupt, not NMI. 1457 */ 1458 if (handled == IRQ_HANDLED) 1459 irq_work_run(); 1460 1461 return handled; 1462 } 1463 1464 static irqreturn_t mipsxx_pmu_handle_irq(int irq, void *dev) 1465 { 1466 return mipsxx_pmu_handle_shared_irq(); 1467 } 1468 1469 /* 24K */ 1470 #define IS_BOTH_COUNTERS_24K_EVENT(b) \ 1471 ((b) == 0 || (b) == 1 || (b) == 11) 1472 1473 /* 34K */ 1474 #define IS_BOTH_COUNTERS_34K_EVENT(b) \ 1475 ((b) == 0 || (b) == 1 || (b) == 11) 1476 #ifdef CONFIG_MIPS_MT_SMP 1477 #define IS_RANGE_P_34K_EVENT(r, b) \ 1478 ((b) == 0 || (r) == 18 || (b) == 21 || (b) == 22 || \ 1479 (b) == 25 || (b) == 39 || (r) == 44 || (r) == 174 || \ 1480 (r) == 176 || ((b) >= 50 && (b) <= 55) || \ 1481 ((b) >= 64 && (b) <= 67)) 1482 #define IS_RANGE_V_34K_EVENT(r) ((r) == 47) 1483 #endif 1484 1485 /* 74K */ 1486 #define IS_BOTH_COUNTERS_74K_EVENT(b) \ 1487 ((b) == 0 || (b) == 1) 1488 1489 /* proAptiv */ 1490 #define IS_BOTH_COUNTERS_PROAPTIV_EVENT(b) \ 1491 ((b) == 0 || (b) == 1) 1492 /* P5600 */ 1493 #define IS_BOTH_COUNTERS_P5600_EVENT(b) \ 1494 ((b) == 0 || (b) == 1) 1495 1496 /* 1004K */ 1497 #define IS_BOTH_COUNTERS_1004K_EVENT(b) \ 1498 ((b) == 0 || (b) == 1 || (b) == 11) 1499 #ifdef CONFIG_MIPS_MT_SMP 1500 #define IS_RANGE_P_1004K_EVENT(r, b) \ 1501 ((b) == 0 || (r) == 18 || (b) == 21 || (b) == 22 || \ 1502 (b) == 25 || (b) == 36 || (b) == 39 || (r) == 44 || \ 1503 (r) == 174 || (r) == 176 || ((b) >= 50 && (b) <= 59) || \ 1504 (r) == 188 || (b) == 61 || (b) == 62 || \ 1505 ((b) >= 64 && (b) <= 67)) 1506 #define IS_RANGE_V_1004K_EVENT(r) ((r) == 47) 1507 #endif 1508 1509 /* interAptiv */ 1510 #define IS_BOTH_COUNTERS_INTERAPTIV_EVENT(b) \ 1511 ((b) == 0 || (b) == 1 || (b) == 11) 1512 #ifdef CONFIG_MIPS_MT_SMP 1513 /* The P/V/T info is not provided for "(b) == 38" in SUM, assume P. */ 1514 #define IS_RANGE_P_INTERAPTIV_EVENT(r, b) \ 1515 ((b) == 0 || (r) == 18 || (b) == 21 || (b) == 22 || \ 1516 (b) == 25 || (b) == 36 || (b) == 38 || (b) == 39 || \ 1517 (r) == 44 || (r) == 174 || (r) == 176 || ((b) >= 50 && \ 1518 (b) <= 59) || (r) == 188 || (b) == 61 || (b) == 62 || \ 1519 ((b) >= 64 && (b) <= 67)) 1520 #define IS_RANGE_V_INTERAPTIV_EVENT(r) ((r) == 47 || (r) == 175) 1521 #endif 1522 1523 /* BMIPS5000 */ 1524 #define IS_BOTH_COUNTERS_BMIPS5000_EVENT(b) \ 1525 ((b) == 0 || (b) == 1) 1526 1527 1528 /* 1529 * For most cores the user can use 0-255 raw events, where 0-127 for the events 1530 * of even counters, and 128-255 for odd counters. Note that bit 7 is used to 1531 * indicate the even/odd bank selector. So, for example, when user wants to take 1532 * the Event Num of 15 for odd counters (by referring to the user manual), then 1533 * 128 needs to be added to 15 as the input for the event config, i.e., 143 (0x8F) 1534 * to be used. 1535 * 1536 * Some newer cores have even more events, in which case the user can use raw 1537 * events 0-511, where 0-255 are for the events of even counters, and 256-511 1538 * are for odd counters, so bit 8 is used to indicate the even/odd bank selector. 1539 */ 1540 static const struct mips_perf_event *mipsxx_pmu_map_raw_event(u64 config) 1541 { 1542 /* currently most cores have 7-bit event numbers */ 1543 unsigned int raw_id = config & 0xff; 1544 unsigned int base_id = raw_id & 0x7f; 1545 1546 switch (current_cpu_type()) { 1547 case CPU_24K: 1548 if (IS_BOTH_COUNTERS_24K_EVENT(base_id)) 1549 raw_event.cntr_mask = CNTR_EVEN | CNTR_ODD; 1550 else 1551 raw_event.cntr_mask = 1552 raw_id > 127 ? CNTR_ODD : CNTR_EVEN; 1553 #ifdef CONFIG_MIPS_MT_SMP 1554 /* 1555 * This is actually doing nothing. Non-multithreading 1556 * CPUs will not check and calculate the range. 1557 */ 1558 raw_event.range = P; 1559 #endif 1560 break; 1561 case CPU_34K: 1562 if (IS_BOTH_COUNTERS_34K_EVENT(base_id)) 1563 raw_event.cntr_mask = CNTR_EVEN | CNTR_ODD; 1564 else 1565 raw_event.cntr_mask = 1566 raw_id > 127 ? CNTR_ODD : CNTR_EVEN; 1567 #ifdef CONFIG_MIPS_MT_SMP 1568 if (IS_RANGE_P_34K_EVENT(raw_id, base_id)) 1569 raw_event.range = P; 1570 else if (unlikely(IS_RANGE_V_34K_EVENT(raw_id))) 1571 raw_event.range = V; 1572 else 1573 raw_event.range = T; 1574 #endif 1575 break; 1576 case CPU_74K: 1577 case CPU_1074K: 1578 if (IS_BOTH_COUNTERS_74K_EVENT(base_id)) 1579 raw_event.cntr_mask = CNTR_EVEN | CNTR_ODD; 1580 else 1581 raw_event.cntr_mask = 1582 raw_id > 127 ? CNTR_ODD : CNTR_EVEN; 1583 #ifdef CONFIG_MIPS_MT_SMP 1584 raw_event.range = P; 1585 #endif 1586 break; 1587 case CPU_PROAPTIV: 1588 if (IS_BOTH_COUNTERS_PROAPTIV_EVENT(base_id)) 1589 raw_event.cntr_mask = CNTR_EVEN | CNTR_ODD; 1590 else 1591 raw_event.cntr_mask = 1592 raw_id > 127 ? CNTR_ODD : CNTR_EVEN; 1593 #ifdef CONFIG_MIPS_MT_SMP 1594 raw_event.range = P; 1595 #endif 1596 break; 1597 case CPU_P5600: 1598 case CPU_P6600: 1599 /* 8-bit event numbers */ 1600 raw_id = config & 0x1ff; 1601 base_id = raw_id & 0xff; 1602 if (IS_BOTH_COUNTERS_P5600_EVENT(base_id)) 1603 raw_event.cntr_mask = CNTR_EVEN | CNTR_ODD; 1604 else 1605 raw_event.cntr_mask = 1606 raw_id > 255 ? CNTR_ODD : CNTR_EVEN; 1607 #ifdef CONFIG_MIPS_MT_SMP 1608 raw_event.range = P; 1609 #endif 1610 break; 1611 case CPU_I6400: 1612 case CPU_I6500: 1613 /* 8-bit event numbers */ 1614 base_id = config & 0xff; 1615 raw_event.cntr_mask = CNTR_EVEN | CNTR_ODD; 1616 break; 1617 case CPU_1004K: 1618 if (IS_BOTH_COUNTERS_1004K_EVENT(base_id)) 1619 raw_event.cntr_mask = CNTR_EVEN | CNTR_ODD; 1620 else 1621 raw_event.cntr_mask = 1622 raw_id > 127 ? CNTR_ODD : CNTR_EVEN; 1623 #ifdef CONFIG_MIPS_MT_SMP 1624 if (IS_RANGE_P_1004K_EVENT(raw_id, base_id)) 1625 raw_event.range = P; 1626 else if (unlikely(IS_RANGE_V_1004K_EVENT(raw_id))) 1627 raw_event.range = V; 1628 else 1629 raw_event.range = T; 1630 #endif 1631 break; 1632 case CPU_INTERAPTIV: 1633 if (IS_BOTH_COUNTERS_INTERAPTIV_EVENT(base_id)) 1634 raw_event.cntr_mask = CNTR_EVEN | CNTR_ODD; 1635 else 1636 raw_event.cntr_mask = 1637 raw_id > 127 ? CNTR_ODD : CNTR_EVEN; 1638 #ifdef CONFIG_MIPS_MT_SMP 1639 if (IS_RANGE_P_INTERAPTIV_EVENT(raw_id, base_id)) 1640 raw_event.range = P; 1641 else if (unlikely(IS_RANGE_V_INTERAPTIV_EVENT(raw_id))) 1642 raw_event.range = V; 1643 else 1644 raw_event.range = T; 1645 #endif 1646 break; 1647 case CPU_BMIPS5000: 1648 if (IS_BOTH_COUNTERS_BMIPS5000_EVENT(base_id)) 1649 raw_event.cntr_mask = CNTR_EVEN | CNTR_ODD; 1650 else 1651 raw_event.cntr_mask = 1652 raw_id > 127 ? CNTR_ODD : CNTR_EVEN; 1653 break; 1654 case CPU_LOONGSON3: 1655 raw_event.cntr_mask = raw_id > 127 ? CNTR_ODD : CNTR_EVEN; 1656 break; 1657 } 1658 1659 raw_event.event_id = base_id; 1660 1661 return &raw_event; 1662 } 1663 1664 static const struct mips_perf_event *octeon_pmu_map_raw_event(u64 config) 1665 { 1666 unsigned int raw_id = config & 0xff; 1667 unsigned int base_id = raw_id & 0x7f; 1668 1669 1670 raw_event.cntr_mask = CNTR_ALL; 1671 raw_event.event_id = base_id; 1672 1673 if (current_cpu_type() == CPU_CAVIUM_OCTEON2) { 1674 if (base_id > 0x42) 1675 return ERR_PTR(-EOPNOTSUPP); 1676 } else { 1677 if (base_id > 0x3a) 1678 return ERR_PTR(-EOPNOTSUPP); 1679 } 1680 1681 switch (base_id) { 1682 case 0x00: 1683 case 0x0f: 1684 case 0x1e: 1685 case 0x1f: 1686 case 0x2f: 1687 case 0x34: 1688 case 0x3b ... 0x3f: 1689 return ERR_PTR(-EOPNOTSUPP); 1690 default: 1691 break; 1692 } 1693 1694 return &raw_event; 1695 } 1696 1697 static const struct mips_perf_event *xlp_pmu_map_raw_event(u64 config) 1698 { 1699 unsigned int raw_id = config & 0xff; 1700 1701 /* Only 1-63 are defined */ 1702 if ((raw_id < 0x01) || (raw_id > 0x3f)) 1703 return ERR_PTR(-EOPNOTSUPP); 1704 1705 raw_event.cntr_mask = CNTR_ALL; 1706 raw_event.event_id = raw_id; 1707 1708 return &raw_event; 1709 } 1710 1711 static int __init 1712 init_hw_perf_events(void) 1713 { 1714 int counters, irq; 1715 int counter_bits; 1716 1717 pr_info("Performance counters: "); 1718 1719 counters = n_counters(); 1720 if (counters == 0) { 1721 pr_cont("No available PMU.\n"); 1722 return -ENODEV; 1723 } 1724 1725 #ifdef CONFIG_MIPS_PERF_SHARED_TC_COUNTERS 1726 cpu_has_mipsmt_pertccounters = read_c0_config7() & (1<<19); 1727 if (!cpu_has_mipsmt_pertccounters) 1728 counters = counters_total_to_per_cpu(counters); 1729 #endif 1730 1731 if (get_c0_perfcount_int) 1732 irq = get_c0_perfcount_int(); 1733 else if (cp0_perfcount_irq >= 0) 1734 irq = MIPS_CPU_IRQ_BASE + cp0_perfcount_irq; 1735 else 1736 irq = -1; 1737 1738 mipspmu.map_raw_event = mipsxx_pmu_map_raw_event; 1739 1740 switch (current_cpu_type()) { 1741 case CPU_24K: 1742 mipspmu.name = "mips/24K"; 1743 mipspmu.general_event_map = &mipsxxcore_event_map; 1744 mipspmu.cache_event_map = &mipsxxcore_cache_map; 1745 break; 1746 case CPU_34K: 1747 mipspmu.name = "mips/34K"; 1748 mipspmu.general_event_map = &mipsxxcore_event_map; 1749 mipspmu.cache_event_map = &mipsxxcore_cache_map; 1750 break; 1751 case CPU_74K: 1752 mipspmu.name = "mips/74K"; 1753 mipspmu.general_event_map = &mipsxxcore_event_map2; 1754 mipspmu.cache_event_map = &mipsxxcore_cache_map2; 1755 break; 1756 case CPU_PROAPTIV: 1757 mipspmu.name = "mips/proAptiv"; 1758 mipspmu.general_event_map = &mipsxxcore_event_map2; 1759 mipspmu.cache_event_map = &mipsxxcore_cache_map2; 1760 break; 1761 case CPU_P5600: 1762 mipspmu.name = "mips/P5600"; 1763 mipspmu.general_event_map = &mipsxxcore_event_map2; 1764 mipspmu.cache_event_map = &mipsxxcore_cache_map2; 1765 break; 1766 case CPU_P6600: 1767 mipspmu.name = "mips/P6600"; 1768 mipspmu.general_event_map = &mipsxxcore_event_map2; 1769 mipspmu.cache_event_map = &mipsxxcore_cache_map2; 1770 break; 1771 case CPU_I6400: 1772 mipspmu.name = "mips/I6400"; 1773 mipspmu.general_event_map = &i6x00_event_map; 1774 mipspmu.cache_event_map = &i6x00_cache_map; 1775 break; 1776 case CPU_I6500: 1777 mipspmu.name = "mips/I6500"; 1778 mipspmu.general_event_map = &i6x00_event_map; 1779 mipspmu.cache_event_map = &i6x00_cache_map; 1780 break; 1781 case CPU_1004K: 1782 mipspmu.name = "mips/1004K"; 1783 mipspmu.general_event_map = &mipsxxcore_event_map; 1784 mipspmu.cache_event_map = &mipsxxcore_cache_map; 1785 break; 1786 case CPU_1074K: 1787 mipspmu.name = "mips/1074K"; 1788 mipspmu.general_event_map = &mipsxxcore_event_map; 1789 mipspmu.cache_event_map = &mipsxxcore_cache_map; 1790 break; 1791 case CPU_INTERAPTIV: 1792 mipspmu.name = "mips/interAptiv"; 1793 mipspmu.general_event_map = &mipsxxcore_event_map; 1794 mipspmu.cache_event_map = &mipsxxcore_cache_map; 1795 break; 1796 case CPU_LOONGSON1: 1797 mipspmu.name = "mips/loongson1"; 1798 mipspmu.general_event_map = &mipsxxcore_event_map; 1799 mipspmu.cache_event_map = &mipsxxcore_cache_map; 1800 break; 1801 case CPU_LOONGSON3: 1802 mipspmu.name = "mips/loongson3"; 1803 mipspmu.general_event_map = &loongson3_event_map; 1804 mipspmu.cache_event_map = &loongson3_cache_map; 1805 break; 1806 case CPU_CAVIUM_OCTEON: 1807 case CPU_CAVIUM_OCTEON_PLUS: 1808 case CPU_CAVIUM_OCTEON2: 1809 mipspmu.name = "octeon"; 1810 mipspmu.general_event_map = &octeon_event_map; 1811 mipspmu.cache_event_map = &octeon_cache_map; 1812 mipspmu.map_raw_event = octeon_pmu_map_raw_event; 1813 break; 1814 case CPU_BMIPS5000: 1815 mipspmu.name = "BMIPS5000"; 1816 mipspmu.general_event_map = &bmips5000_event_map; 1817 mipspmu.cache_event_map = &bmips5000_cache_map; 1818 break; 1819 case CPU_XLP: 1820 mipspmu.name = "xlp"; 1821 mipspmu.general_event_map = &xlp_event_map; 1822 mipspmu.cache_event_map = &xlp_cache_map; 1823 mipspmu.map_raw_event = xlp_pmu_map_raw_event; 1824 break; 1825 default: 1826 pr_cont("Either hardware does not support performance " 1827 "counters, or not yet implemented.\n"); 1828 return -ENODEV; 1829 } 1830 1831 mipspmu.num_counters = counters; 1832 mipspmu.irq = irq; 1833 1834 if (read_c0_perfctrl0() & MIPS_PERFCTRL_W) { 1835 mipspmu.max_period = (1ULL << 63) - 1; 1836 mipspmu.valid_count = (1ULL << 63) - 1; 1837 mipspmu.overflow = 1ULL << 63; 1838 mipspmu.read_counter = mipsxx_pmu_read_counter_64; 1839 mipspmu.write_counter = mipsxx_pmu_write_counter_64; 1840 counter_bits = 64; 1841 } else { 1842 mipspmu.max_period = (1ULL << 31) - 1; 1843 mipspmu.valid_count = (1ULL << 31) - 1; 1844 mipspmu.overflow = 1ULL << 31; 1845 mipspmu.read_counter = mipsxx_pmu_read_counter; 1846 mipspmu.write_counter = mipsxx_pmu_write_counter; 1847 counter_bits = 32; 1848 } 1849 1850 on_each_cpu(reset_counters, (void *)(long)counters, 1); 1851 1852 pr_cont("%s PMU enabled, %d %d-bit counters available to each " 1853 "CPU, irq %d%s\n", mipspmu.name, counters, counter_bits, irq, 1854 irq < 0 ? " (share with timer interrupt)" : ""); 1855 1856 perf_pmu_register(&pmu, "cpu", PERF_TYPE_RAW); 1857 1858 return 0; 1859 } 1860 early_initcall(init_hw_perf_events); 1861