1979f8671SMichael Cree /* 2979f8671SMichael Cree * Hardware performance events for the Alpha. 3979f8671SMichael Cree * 4979f8671SMichael Cree * We implement HW counts on the EV67 and subsequent CPUs only. 5979f8671SMichael Cree * 6979f8671SMichael Cree * (C) 2010 Michael J. Cree 7979f8671SMichael Cree * 8979f8671SMichael Cree * Somewhat based on the Sparc code, and to a lesser extent the PowerPC and 9979f8671SMichael Cree * ARM code, which are copyright by their respective authors. 10979f8671SMichael Cree */ 11979f8671SMichael Cree 12979f8671SMichael Cree #include <linux/perf_event.h> 13979f8671SMichael Cree #include <linux/kprobes.h> 14979f8671SMichael Cree #include <linux/kernel.h> 15979f8671SMichael Cree #include <linux/kdebug.h> 16979f8671SMichael Cree #include <linux/mutex.h> 17979f8671SMichael Cree 18979f8671SMichael Cree #include <asm/hwrpb.h> 19979f8671SMichael Cree #include <asm/atomic.h> 20979f8671SMichael Cree #include <asm/irq.h> 21979f8671SMichael Cree #include <asm/irq_regs.h> 22979f8671SMichael Cree #include <asm/pal.h> 23979f8671SMichael Cree #include <asm/wrperfmon.h> 24979f8671SMichael Cree #include <asm/hw_irq.h> 25979f8671SMichael Cree 26979f8671SMichael Cree 27979f8671SMichael Cree /* The maximum number of PMCs on any Alpha CPU whatsoever. */ 28979f8671SMichael Cree #define MAX_HWEVENTS 3 29979f8671SMichael Cree #define PMC_NO_INDEX -1 30979f8671SMichael Cree 31979f8671SMichael Cree /* For tracking PMCs and the hw events they monitor on each CPU. */ 32979f8671SMichael Cree struct cpu_hw_events { 33979f8671SMichael Cree int enabled; 34979f8671SMichael Cree /* Number of events scheduled; also number entries valid in arrays below. */ 35979f8671SMichael Cree int n_events; 36979f8671SMichael Cree /* Number events added since last hw_perf_disable(). */ 37979f8671SMichael Cree int n_added; 38979f8671SMichael Cree /* Events currently scheduled. */ 39979f8671SMichael Cree struct perf_event *event[MAX_HWEVENTS]; 40979f8671SMichael Cree /* Event type of each scheduled event. */ 41979f8671SMichael Cree unsigned long evtype[MAX_HWEVENTS]; 42979f8671SMichael Cree /* Current index of each scheduled event; if not yet determined 43979f8671SMichael Cree * contains PMC_NO_INDEX. 44979f8671SMichael Cree */ 45979f8671SMichael Cree int current_idx[MAX_HWEVENTS]; 46979f8671SMichael Cree /* The active PMCs' config for easy use with wrperfmon(). */ 47979f8671SMichael Cree unsigned long config; 48979f8671SMichael Cree /* The active counters' indices for easy use with wrperfmon(). */ 49979f8671SMichael Cree unsigned long idx_mask; 50979f8671SMichael Cree }; 51979f8671SMichael Cree DEFINE_PER_CPU(struct cpu_hw_events, cpu_hw_events); 52979f8671SMichael Cree 53979f8671SMichael Cree 54979f8671SMichael Cree 55979f8671SMichael Cree /* 56979f8671SMichael Cree * A structure to hold the description of the PMCs available on a particular 57979f8671SMichael Cree * type of Alpha CPU. 58979f8671SMichael Cree */ 59979f8671SMichael Cree struct alpha_pmu_t { 60979f8671SMichael Cree /* Mapping of the perf system hw event types to indigenous event types */ 61979f8671SMichael Cree const int *event_map; 62979f8671SMichael Cree /* The number of entries in the event_map */ 63979f8671SMichael Cree int max_events; 64979f8671SMichael Cree /* The number of PMCs on this Alpha */ 65979f8671SMichael Cree int num_pmcs; 66979f8671SMichael Cree /* 67979f8671SMichael Cree * All PMC counters reside in the IBOX register PCTR. This is the 68979f8671SMichael Cree * LSB of the counter. 69979f8671SMichael Cree */ 70979f8671SMichael Cree int pmc_count_shift[MAX_HWEVENTS]; 71979f8671SMichael Cree /* 72979f8671SMichael Cree * The mask that isolates the PMC bits when the LSB of the counter 73979f8671SMichael Cree * is shifted to bit 0. 74979f8671SMichael Cree */ 75979f8671SMichael Cree unsigned long pmc_count_mask[MAX_HWEVENTS]; 76979f8671SMichael Cree /* The maximum period the PMC can count. */ 77979f8671SMichael Cree unsigned long pmc_max_period[MAX_HWEVENTS]; 78979f8671SMichael Cree /* 79979f8671SMichael Cree * The maximum value that may be written to the counter due to 80979f8671SMichael Cree * hardware restrictions is pmc_max_period - pmc_left. 81979f8671SMichael Cree */ 82979f8671SMichael Cree long pmc_left[3]; 83979f8671SMichael Cree /* Subroutine for allocation of PMCs. Enforces constraints. */ 84979f8671SMichael Cree int (*check_constraints)(struct perf_event **, unsigned long *, int); 85979f8671SMichael Cree }; 86979f8671SMichael Cree 87979f8671SMichael Cree /* 88979f8671SMichael Cree * The Alpha CPU PMU description currently in operation. This is set during 89979f8671SMichael Cree * the boot process to the specific CPU of the machine. 90979f8671SMichael Cree */ 91979f8671SMichael Cree static const struct alpha_pmu_t *alpha_pmu; 92979f8671SMichael Cree 93979f8671SMichael Cree 94979f8671SMichael Cree #define HW_OP_UNSUPPORTED -1 95979f8671SMichael Cree 96979f8671SMichael Cree /* 97979f8671SMichael Cree * The hardware description of the EV67, EV68, EV69, EV7 and EV79 PMUs 98979f8671SMichael Cree * follow. Since they are identical we refer to them collectively as the 99979f8671SMichael Cree * EV67 henceforth. 100979f8671SMichael Cree */ 101979f8671SMichael Cree 102979f8671SMichael Cree /* 103979f8671SMichael Cree * EV67 PMC event types 104979f8671SMichael Cree * 105979f8671SMichael Cree * There is no one-to-one mapping of the possible hw event types to the 106979f8671SMichael Cree * actual codes that are used to program the PMCs hence we introduce our 107979f8671SMichael Cree * own hw event type identifiers. 108979f8671SMichael Cree */ 109979f8671SMichael Cree enum ev67_pmc_event_type { 110979f8671SMichael Cree EV67_CYCLES = 1, 111979f8671SMichael Cree EV67_INSTRUCTIONS, 112979f8671SMichael Cree EV67_BCACHEMISS, 113979f8671SMichael Cree EV67_MBOXREPLAY, 114979f8671SMichael Cree EV67_LAST_ET 115979f8671SMichael Cree }; 116979f8671SMichael Cree #define EV67_NUM_EVENT_TYPES (EV67_LAST_ET-EV67_CYCLES) 117979f8671SMichael Cree 118979f8671SMichael Cree 119979f8671SMichael Cree /* Mapping of the hw event types to the perf tool interface */ 120979f8671SMichael Cree static const int ev67_perfmon_event_map[] = { 121979f8671SMichael Cree [PERF_COUNT_HW_CPU_CYCLES] = EV67_CYCLES, 122979f8671SMichael Cree [PERF_COUNT_HW_INSTRUCTIONS] = EV67_INSTRUCTIONS, 123979f8671SMichael Cree [PERF_COUNT_HW_CACHE_REFERENCES] = HW_OP_UNSUPPORTED, 124979f8671SMichael Cree [PERF_COUNT_HW_CACHE_MISSES] = EV67_BCACHEMISS, 125979f8671SMichael Cree }; 126979f8671SMichael Cree 127979f8671SMichael Cree struct ev67_mapping_t { 128979f8671SMichael Cree int config; 129979f8671SMichael Cree int idx; 130979f8671SMichael Cree }; 131979f8671SMichael Cree 132979f8671SMichael Cree /* 133979f8671SMichael Cree * The mapping used for one event only - these must be in same order as enum 134979f8671SMichael Cree * ev67_pmc_event_type definition. 135979f8671SMichael Cree */ 136979f8671SMichael Cree static const struct ev67_mapping_t ev67_mapping[] = { 137979f8671SMichael Cree {EV67_PCTR_INSTR_CYCLES, 1}, /* EV67_CYCLES, */ 138979f8671SMichael Cree {EV67_PCTR_INSTR_CYCLES, 0}, /* EV67_INSTRUCTIONS */ 139979f8671SMichael Cree {EV67_PCTR_INSTR_BCACHEMISS, 1}, /* EV67_BCACHEMISS */ 140979f8671SMichael Cree {EV67_PCTR_CYCLES_MBOX, 1} /* EV67_MBOXREPLAY */ 141979f8671SMichael Cree }; 142979f8671SMichael Cree 143979f8671SMichael Cree 144979f8671SMichael Cree /* 145979f8671SMichael Cree * Check that a group of events can be simultaneously scheduled on to the 146979f8671SMichael Cree * EV67 PMU. Also allocate counter indices and config. 147979f8671SMichael Cree */ 148979f8671SMichael Cree static int ev67_check_constraints(struct perf_event **event, 149979f8671SMichael Cree unsigned long *evtype, int n_ev) 150979f8671SMichael Cree { 151979f8671SMichael Cree int idx0; 152979f8671SMichael Cree unsigned long config; 153979f8671SMichael Cree 154979f8671SMichael Cree idx0 = ev67_mapping[evtype[0]-1].idx; 155979f8671SMichael Cree config = ev67_mapping[evtype[0]-1].config; 156979f8671SMichael Cree if (n_ev == 1) 157979f8671SMichael Cree goto success; 158979f8671SMichael Cree 159979f8671SMichael Cree BUG_ON(n_ev != 2); 160979f8671SMichael Cree 161979f8671SMichael Cree if (evtype[0] == EV67_MBOXREPLAY || evtype[1] == EV67_MBOXREPLAY) { 162979f8671SMichael Cree /* MBOX replay traps must be on PMC 1 */ 163979f8671SMichael Cree idx0 = (evtype[0] == EV67_MBOXREPLAY) ? 1 : 0; 164979f8671SMichael Cree /* Only cycles can accompany MBOX replay traps */ 165979f8671SMichael Cree if (evtype[idx0] == EV67_CYCLES) { 166979f8671SMichael Cree config = EV67_PCTR_CYCLES_MBOX; 167979f8671SMichael Cree goto success; 168979f8671SMichael Cree } 169979f8671SMichael Cree } 170979f8671SMichael Cree 171979f8671SMichael Cree if (evtype[0] == EV67_BCACHEMISS || evtype[1] == EV67_BCACHEMISS) { 172979f8671SMichael Cree /* Bcache misses must be on PMC 1 */ 173979f8671SMichael Cree idx0 = (evtype[0] == EV67_BCACHEMISS) ? 1 : 0; 174979f8671SMichael Cree /* Only instructions can accompany Bcache misses */ 175979f8671SMichael Cree if (evtype[idx0] == EV67_INSTRUCTIONS) { 176979f8671SMichael Cree config = EV67_PCTR_INSTR_BCACHEMISS; 177979f8671SMichael Cree goto success; 178979f8671SMichael Cree } 179979f8671SMichael Cree } 180979f8671SMichael Cree 181979f8671SMichael Cree if (evtype[0] == EV67_INSTRUCTIONS || evtype[1] == EV67_INSTRUCTIONS) { 182979f8671SMichael Cree /* Instructions must be on PMC 0 */ 183979f8671SMichael Cree idx0 = (evtype[0] == EV67_INSTRUCTIONS) ? 0 : 1; 184979f8671SMichael Cree /* By this point only cycles can accompany instructions */ 185979f8671SMichael Cree if (evtype[idx0^1] == EV67_CYCLES) { 186979f8671SMichael Cree config = EV67_PCTR_INSTR_CYCLES; 187979f8671SMichael Cree goto success; 188979f8671SMichael Cree } 189979f8671SMichael Cree } 190979f8671SMichael Cree 191979f8671SMichael Cree /* Otherwise, darn it, there is a conflict. */ 192979f8671SMichael Cree return -1; 193979f8671SMichael Cree 194979f8671SMichael Cree success: 195979f8671SMichael Cree event[0]->hw.idx = idx0; 196979f8671SMichael Cree event[0]->hw.config_base = config; 197979f8671SMichael Cree if (n_ev == 2) { 198979f8671SMichael Cree event[1]->hw.idx = idx0 ^ 1; 199979f8671SMichael Cree event[1]->hw.config_base = config; 200979f8671SMichael Cree } 201979f8671SMichael Cree return 0; 202979f8671SMichael Cree } 203979f8671SMichael Cree 204979f8671SMichael Cree 205979f8671SMichael Cree static const struct alpha_pmu_t ev67_pmu = { 206979f8671SMichael Cree .event_map = ev67_perfmon_event_map, 207979f8671SMichael Cree .max_events = ARRAY_SIZE(ev67_perfmon_event_map), 208979f8671SMichael Cree .num_pmcs = 2, 209979f8671SMichael Cree .pmc_count_shift = {EV67_PCTR_0_COUNT_SHIFT, EV67_PCTR_1_COUNT_SHIFT, 0}, 210979f8671SMichael Cree .pmc_count_mask = {EV67_PCTR_0_COUNT_MASK, EV67_PCTR_1_COUNT_MASK, 0}, 211979f8671SMichael Cree .pmc_max_period = {(1UL<<20) - 1, (1UL<<20) - 1, 0}, 212979f8671SMichael Cree .pmc_left = {16, 4, 0}, 213979f8671SMichael Cree .check_constraints = ev67_check_constraints 214979f8671SMichael Cree }; 215979f8671SMichael Cree 216979f8671SMichael Cree 217979f8671SMichael Cree 218979f8671SMichael Cree /* 219979f8671SMichael Cree * Helper routines to ensure that we read/write only the correct PMC bits 220979f8671SMichael Cree * when calling the wrperfmon PALcall. 221979f8671SMichael Cree */ 222979f8671SMichael Cree static inline void alpha_write_pmc(int idx, unsigned long val) 223979f8671SMichael Cree { 224979f8671SMichael Cree val &= alpha_pmu->pmc_count_mask[idx]; 225979f8671SMichael Cree val <<= alpha_pmu->pmc_count_shift[idx]; 226979f8671SMichael Cree val |= (1<<idx); 227979f8671SMichael Cree wrperfmon(PERFMON_CMD_WRITE, val); 228979f8671SMichael Cree } 229979f8671SMichael Cree 230979f8671SMichael Cree static inline unsigned long alpha_read_pmc(int idx) 231979f8671SMichael Cree { 232979f8671SMichael Cree unsigned long val; 233979f8671SMichael Cree 234979f8671SMichael Cree val = wrperfmon(PERFMON_CMD_READ, 0); 235979f8671SMichael Cree val >>= alpha_pmu->pmc_count_shift[idx]; 236979f8671SMichael Cree val &= alpha_pmu->pmc_count_mask[idx]; 237979f8671SMichael Cree return val; 238979f8671SMichael Cree } 239979f8671SMichael Cree 240979f8671SMichael Cree /* Set a new period to sample over */ 241979f8671SMichael Cree static int alpha_perf_event_set_period(struct perf_event *event, 242979f8671SMichael Cree struct hw_perf_event *hwc, int idx) 243979f8671SMichael Cree { 244979f8671SMichael Cree long left = atomic64_read(&hwc->period_left); 245979f8671SMichael Cree long period = hwc->sample_period; 246979f8671SMichael Cree int ret = 0; 247979f8671SMichael Cree 248979f8671SMichael Cree if (unlikely(left <= -period)) { 249979f8671SMichael Cree left = period; 250979f8671SMichael Cree atomic64_set(&hwc->period_left, left); 251979f8671SMichael Cree hwc->last_period = period; 252979f8671SMichael Cree ret = 1; 253979f8671SMichael Cree } 254979f8671SMichael Cree 255979f8671SMichael Cree if (unlikely(left <= 0)) { 256979f8671SMichael Cree left += period; 257979f8671SMichael Cree atomic64_set(&hwc->period_left, left); 258979f8671SMichael Cree hwc->last_period = period; 259979f8671SMichael Cree ret = 1; 260979f8671SMichael Cree } 261979f8671SMichael Cree 262979f8671SMichael Cree /* 263979f8671SMichael Cree * Hardware restrictions require that the counters must not be 264979f8671SMichael Cree * written with values that are too close to the maximum period. 265979f8671SMichael Cree */ 266979f8671SMichael Cree if (unlikely(left < alpha_pmu->pmc_left[idx])) 267979f8671SMichael Cree left = alpha_pmu->pmc_left[idx]; 268979f8671SMichael Cree 269979f8671SMichael Cree if (left > (long)alpha_pmu->pmc_max_period[idx]) 270979f8671SMichael Cree left = alpha_pmu->pmc_max_period[idx]; 271979f8671SMichael Cree 272979f8671SMichael Cree atomic64_set(&hwc->prev_count, (unsigned long)(-left)); 273979f8671SMichael Cree 274979f8671SMichael Cree alpha_write_pmc(idx, (unsigned long)(-left)); 275979f8671SMichael Cree 276979f8671SMichael Cree perf_event_update_userpage(event); 277979f8671SMichael Cree 278979f8671SMichael Cree return ret; 279979f8671SMichael Cree } 280979f8671SMichael Cree 281979f8671SMichael Cree 282979f8671SMichael Cree /* 283979f8671SMichael Cree * Calculates the count (the 'delta') since the last time the PMC was read. 284979f8671SMichael Cree * 285979f8671SMichael Cree * As the PMCs' full period can easily be exceeded within the perf system 286979f8671SMichael Cree * sampling period we cannot use any high order bits as a guard bit in the 287979f8671SMichael Cree * PMCs to detect overflow as is done by other architectures. The code here 288979f8671SMichael Cree * calculates the delta on the basis that there is no overflow when ovf is 289979f8671SMichael Cree * zero. The value passed via ovf by the interrupt handler corrects for 290979f8671SMichael Cree * overflow. 291979f8671SMichael Cree * 292979f8671SMichael Cree * This can be racey on rare occasions -- a call to this routine can occur 293979f8671SMichael Cree * with an overflowed counter just before the PMI service routine is called. 294979f8671SMichael Cree * The check for delta negative hopefully always rectifies this situation. 295979f8671SMichael Cree */ 296979f8671SMichael Cree static unsigned long alpha_perf_event_update(struct perf_event *event, 297979f8671SMichael Cree struct hw_perf_event *hwc, int idx, long ovf) 298979f8671SMichael Cree { 299979f8671SMichael Cree long prev_raw_count, new_raw_count; 300979f8671SMichael Cree long delta; 301979f8671SMichael Cree 302979f8671SMichael Cree again: 303979f8671SMichael Cree prev_raw_count = atomic64_read(&hwc->prev_count); 304979f8671SMichael Cree new_raw_count = alpha_read_pmc(idx); 305979f8671SMichael Cree 306979f8671SMichael Cree if (atomic64_cmpxchg(&hwc->prev_count, prev_raw_count, 307979f8671SMichael Cree new_raw_count) != prev_raw_count) 308979f8671SMichael Cree goto again; 309979f8671SMichael Cree 310979f8671SMichael Cree delta = (new_raw_count - (prev_raw_count & alpha_pmu->pmc_count_mask[idx])) + ovf; 311979f8671SMichael Cree 312979f8671SMichael Cree /* It is possible on very rare occasions that the PMC has overflowed 313979f8671SMichael Cree * but the interrupt is yet to come. Detect and fix this situation. 314979f8671SMichael Cree */ 315979f8671SMichael Cree if (unlikely(delta < 0)) { 316979f8671SMichael Cree delta += alpha_pmu->pmc_max_period[idx] + 1; 317979f8671SMichael Cree } 318979f8671SMichael Cree 319979f8671SMichael Cree atomic64_add(delta, &event->count); 320979f8671SMichael Cree atomic64_sub(delta, &hwc->period_left); 321979f8671SMichael Cree 322979f8671SMichael Cree return new_raw_count; 323979f8671SMichael Cree } 324979f8671SMichael Cree 325979f8671SMichael Cree 326979f8671SMichael Cree /* 327979f8671SMichael Cree * Collect all HW events into the array event[]. 328979f8671SMichael Cree */ 329979f8671SMichael Cree static int collect_events(struct perf_event *group, int max_count, 330979f8671SMichael Cree struct perf_event *event[], unsigned long *evtype, 331979f8671SMichael Cree int *current_idx) 332979f8671SMichael Cree { 333979f8671SMichael Cree struct perf_event *pe; 334979f8671SMichael Cree int n = 0; 335979f8671SMichael Cree 336979f8671SMichael Cree if (!is_software_event(group)) { 337979f8671SMichael Cree if (n >= max_count) 338979f8671SMichael Cree return -1; 339979f8671SMichael Cree event[n] = group; 340979f8671SMichael Cree evtype[n] = group->hw.event_base; 341979f8671SMichael Cree current_idx[n++] = PMC_NO_INDEX; 342979f8671SMichael Cree } 343979f8671SMichael Cree list_for_each_entry(pe, &group->sibling_list, group_entry) { 344979f8671SMichael Cree if (!is_software_event(pe) && pe->state != PERF_EVENT_STATE_OFF) { 345979f8671SMichael Cree if (n >= max_count) 346979f8671SMichael Cree return -1; 347979f8671SMichael Cree event[n] = pe; 348979f8671SMichael Cree evtype[n] = pe->hw.event_base; 349979f8671SMichael Cree current_idx[n++] = PMC_NO_INDEX; 350979f8671SMichael Cree } 351979f8671SMichael Cree } 352979f8671SMichael Cree return n; 353979f8671SMichael Cree } 354979f8671SMichael Cree 355979f8671SMichael Cree 356979f8671SMichael Cree 357979f8671SMichael Cree /* 358979f8671SMichael Cree * Check that a group of events can be simultaneously scheduled on to the PMU. 359979f8671SMichael Cree */ 360979f8671SMichael Cree static int alpha_check_constraints(struct perf_event **events, 361979f8671SMichael Cree unsigned long *evtypes, int n_ev) 362979f8671SMichael Cree { 363979f8671SMichael Cree 364979f8671SMichael Cree /* No HW events is possible from hw_perf_group_sched_in(). */ 365979f8671SMichael Cree if (n_ev == 0) 366979f8671SMichael Cree return 0; 367979f8671SMichael Cree 368979f8671SMichael Cree if (n_ev > alpha_pmu->num_pmcs) 369979f8671SMichael Cree return -1; 370979f8671SMichael Cree 371979f8671SMichael Cree return alpha_pmu->check_constraints(events, evtypes, n_ev); 372979f8671SMichael Cree } 373979f8671SMichael Cree 374979f8671SMichael Cree 375979f8671SMichael Cree /* 376979f8671SMichael Cree * If new events have been scheduled then update cpuc with the new 377979f8671SMichael Cree * configuration. This may involve shifting cycle counts from one PMC to 378979f8671SMichael Cree * another. 379979f8671SMichael Cree */ 380979f8671SMichael Cree static void maybe_change_configuration(struct cpu_hw_events *cpuc) 381979f8671SMichael Cree { 382979f8671SMichael Cree int j; 383979f8671SMichael Cree 384979f8671SMichael Cree if (cpuc->n_added == 0) 385979f8671SMichael Cree return; 386979f8671SMichael Cree 387979f8671SMichael Cree /* Find counters that are moving to another PMC and update */ 388979f8671SMichael Cree for (j = 0; j < cpuc->n_events; j++) { 389979f8671SMichael Cree struct perf_event *pe = cpuc->event[j]; 390979f8671SMichael Cree 391979f8671SMichael Cree if (cpuc->current_idx[j] != PMC_NO_INDEX && 392979f8671SMichael Cree cpuc->current_idx[j] != pe->hw.idx) { 393979f8671SMichael Cree alpha_perf_event_update(pe, &pe->hw, cpuc->current_idx[j], 0); 394979f8671SMichael Cree cpuc->current_idx[j] = PMC_NO_INDEX; 395979f8671SMichael Cree } 396979f8671SMichael Cree } 397979f8671SMichael Cree 398979f8671SMichael Cree /* Assign to counters all unassigned events. */ 399979f8671SMichael Cree cpuc->idx_mask = 0; 400979f8671SMichael Cree for (j = 0; j < cpuc->n_events; j++) { 401979f8671SMichael Cree struct perf_event *pe = cpuc->event[j]; 402979f8671SMichael Cree struct hw_perf_event *hwc = &pe->hw; 403979f8671SMichael Cree int idx = hwc->idx; 404979f8671SMichael Cree 405a4eaf7f1SPeter Zijlstra if (cpuc->current_idx[j] == PMC_NO_INDEX) { 406979f8671SMichael Cree alpha_perf_event_set_period(pe, hwc, idx); 407979f8671SMichael Cree cpuc->current_idx[j] = idx; 408a4eaf7f1SPeter Zijlstra } 409a4eaf7f1SPeter Zijlstra 410a4eaf7f1SPeter Zijlstra if (!(hwc->state & PERF_HES_STOPPED)) 411979f8671SMichael Cree cpuc->idx_mask |= (1<<cpuc->current_idx[j]); 412979f8671SMichael Cree } 413979f8671SMichael Cree cpuc->config = cpuc->event[0]->hw.config_base; 414979f8671SMichael Cree } 415979f8671SMichael Cree 416979f8671SMichael Cree 417979f8671SMichael Cree 418979f8671SMichael Cree /* Schedule perf HW event on to PMU. 419979f8671SMichael Cree * - this function is called from outside this module via the pmu struct 420979f8671SMichael Cree * returned from perf event initialisation. 421979f8671SMichael Cree */ 422a4eaf7f1SPeter Zijlstra static int alpha_pmu_add(struct perf_event *event, int flags) 423979f8671SMichael Cree { 424979f8671SMichael Cree struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); 425979f8671SMichael Cree int n0; 426979f8671SMichael Cree int ret; 427979f8671SMichael Cree unsigned long flags; 428979f8671SMichael Cree 429979f8671SMichael Cree /* 430979f8671SMichael Cree * The Sparc code has the IRQ disable first followed by the perf 431979f8671SMichael Cree * disable, however this can lead to an overflowed counter with the 432979f8671SMichael Cree * PMI disabled on rare occasions. The alpha_perf_event_update() 433979f8671SMichael Cree * routine should detect this situation by noting a negative delta, 434979f8671SMichael Cree * nevertheless we disable the PMCs first to enable a potential 435979f8671SMichael Cree * final PMI to occur before we disable interrupts. 436979f8671SMichael Cree */ 43733696fc0SPeter Zijlstra perf_pmu_disable(event->pmu); 438979f8671SMichael Cree local_irq_save(flags); 439979f8671SMichael Cree 440979f8671SMichael Cree /* Default to error to be returned */ 441979f8671SMichael Cree ret = -EAGAIN; 442979f8671SMichael Cree 443979f8671SMichael Cree /* Insert event on to PMU and if successful modify ret to valid return */ 444979f8671SMichael Cree n0 = cpuc->n_events; 445979f8671SMichael Cree if (n0 < alpha_pmu->num_pmcs) { 446979f8671SMichael Cree cpuc->event[n0] = event; 447979f8671SMichael Cree cpuc->evtype[n0] = event->hw.event_base; 448979f8671SMichael Cree cpuc->current_idx[n0] = PMC_NO_INDEX; 449979f8671SMichael Cree 450979f8671SMichael Cree if (!alpha_check_constraints(cpuc->event, cpuc->evtype, n0+1)) { 451979f8671SMichael Cree cpuc->n_events++; 452979f8671SMichael Cree cpuc->n_added++; 453979f8671SMichael Cree ret = 0; 454979f8671SMichael Cree } 455979f8671SMichael Cree } 456979f8671SMichael Cree 457a4eaf7f1SPeter Zijlstra hwc->state = PERF_HES_UPTODATE; 458a4eaf7f1SPeter Zijlstra if (!(flags & PERF_EF_START)) 459a4eaf7f1SPeter Zijlstra hwc->state |= PERF_HES_STOPPED; 460a4eaf7f1SPeter Zijlstra 461979f8671SMichael Cree local_irq_restore(flags); 46233696fc0SPeter Zijlstra perf_pmu_enable(event->pmu); 463979f8671SMichael Cree 464979f8671SMichael Cree return ret; 465979f8671SMichael Cree } 466979f8671SMichael Cree 467979f8671SMichael Cree 468979f8671SMichael Cree 469979f8671SMichael Cree /* Disable performance monitoring unit 470979f8671SMichael Cree * - this function is called from outside this module via the pmu struct 471979f8671SMichael Cree * returned from perf event initialisation. 472979f8671SMichael Cree */ 473a4eaf7f1SPeter Zijlstra static void alpha_pmu_del(struct perf_event *event, int flags) 474979f8671SMichael Cree { 475979f8671SMichael Cree struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); 476979f8671SMichael Cree struct hw_perf_event *hwc = &event->hw; 477979f8671SMichael Cree unsigned long flags; 478979f8671SMichael Cree int j; 479979f8671SMichael Cree 48033696fc0SPeter Zijlstra perf_pmu_disable(event->pmu); 481979f8671SMichael Cree local_irq_save(flags); 482979f8671SMichael Cree 483979f8671SMichael Cree for (j = 0; j < cpuc->n_events; j++) { 484979f8671SMichael Cree if (event == cpuc->event[j]) { 485979f8671SMichael Cree int idx = cpuc->current_idx[j]; 486979f8671SMichael Cree 487979f8671SMichael Cree /* Shift remaining entries down into the existing 488979f8671SMichael Cree * slot. 489979f8671SMichael Cree */ 490979f8671SMichael Cree while (++j < cpuc->n_events) { 491979f8671SMichael Cree cpuc->event[j - 1] = cpuc->event[j]; 492979f8671SMichael Cree cpuc->evtype[j - 1] = cpuc->evtype[j]; 493979f8671SMichael Cree cpuc->current_idx[j - 1] = 494979f8671SMichael Cree cpuc->current_idx[j]; 495979f8671SMichael Cree } 496979f8671SMichael Cree 497979f8671SMichael Cree /* Absorb the final count and turn off the event. */ 498979f8671SMichael Cree alpha_perf_event_update(event, hwc, idx, 0); 499979f8671SMichael Cree perf_event_update_userpage(event); 500979f8671SMichael Cree 501979f8671SMichael Cree cpuc->idx_mask &= ~(1UL<<idx); 502979f8671SMichael Cree cpuc->n_events--; 503979f8671SMichael Cree break; 504979f8671SMichael Cree } 505979f8671SMichael Cree } 506979f8671SMichael Cree 507979f8671SMichael Cree local_irq_restore(flags); 50833696fc0SPeter Zijlstra perf_pmu_enable(event->pmu); 509979f8671SMichael Cree } 510979f8671SMichael Cree 511979f8671SMichael Cree 512979f8671SMichael Cree static void alpha_pmu_read(struct perf_event *event) 513979f8671SMichael Cree { 514979f8671SMichael Cree struct hw_perf_event *hwc = &event->hw; 515979f8671SMichael Cree 516979f8671SMichael Cree alpha_perf_event_update(event, hwc, hwc->idx, 0); 517979f8671SMichael Cree } 518979f8671SMichael Cree 519979f8671SMichael Cree 520a4eaf7f1SPeter Zijlstra static void alpha_pmu_stop(struct perf_event *event, int flags) 521979f8671SMichael Cree { 522979f8671SMichael Cree struct hw_perf_event *hwc = &event->hw; 523979f8671SMichael Cree struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); 524979f8671SMichael Cree 525a4eaf7f1SPeter Zijlstra if (!(hwc->state & PERF_HES_STOPPED)) { 526a4eaf7f1SPeter Zijlstra cpuc->idx_mask &= !(1UL<<hwc->idx); 527a4eaf7f1SPeter Zijlstra hwc->state |= PERF_HES_STOPPED; 528a4eaf7f1SPeter Zijlstra } 529a4eaf7f1SPeter Zijlstra 530a4eaf7f1SPeter Zijlstra if ((flags & PERF_EF_UPDATE) && !(hwc->state & PERF_HES_UPTODATE)) { 531a4eaf7f1SPeter Zijlstra alpha_perf_event_update(event, hwc, hwc->idx, 0); 532a4eaf7f1SPeter Zijlstra hwc->state |= PERF_HES_UPTODATE; 533a4eaf7f1SPeter Zijlstra } 534a4eaf7f1SPeter Zijlstra 535a4eaf7f1SPeter Zijlstra if (cpuc->enabled) 536a4eaf7f1SPeter Zijlstra wrperfmon(PERFMON_CMD_ENABLE, (1UL<<hwc->idx)); 537a4eaf7f1SPeter Zijlstra } 538a4eaf7f1SPeter Zijlstra 539a4eaf7f1SPeter Zijlstra 540a4eaf7f1SPeter Zijlstra static void alpha_pmu_start(struct perf_event *event, int flags) 541a4eaf7f1SPeter Zijlstra { 542a4eaf7f1SPeter Zijlstra struct hw_perf_event *hwc = &event->hw; 543a4eaf7f1SPeter Zijlstra struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); 544a4eaf7f1SPeter Zijlstra 545a4eaf7f1SPeter Zijlstra if (WARN_ON_ONCE(!(hwc->state & PERF_HES_STOPPED))) 546a4eaf7f1SPeter Zijlstra return; 547a4eaf7f1SPeter Zijlstra 548a4eaf7f1SPeter Zijlstra if (flags & PERF_EF_RELOAD) { 549a4eaf7f1SPeter Zijlstra WARN_ON_ONCE(!(hwc->state & PERF_HES_UPTODATE)); 550a4eaf7f1SPeter Zijlstra alpha_perf_event_set_period(event, hwc, hwc->idx); 551a4eaf7f1SPeter Zijlstra } 552a4eaf7f1SPeter Zijlstra 553a4eaf7f1SPeter Zijlstra hwc->state = 0; 554a4eaf7f1SPeter Zijlstra 555979f8671SMichael Cree cpuc->idx_mask |= 1UL<<hwc->idx; 556a4eaf7f1SPeter Zijlstra if (cpuc->enabled) 557979f8671SMichael Cree wrperfmon(PERFMON_CMD_ENABLE, (1UL<<hwc->idx)); 558979f8671SMichael Cree } 559979f8671SMichael Cree 560979f8671SMichael Cree 561979f8671SMichael Cree /* 562979f8671SMichael Cree * Check that CPU performance counters are supported. 563979f8671SMichael Cree * - currently support EV67 and later CPUs. 564979f8671SMichael Cree * - actually some later revisions of the EV6 have the same PMC model as the 565979f8671SMichael Cree * EV67 but we don't do suffiently deep CPU detection to detect them. 566979f8671SMichael Cree * Bad luck to the very few people who might have one, I guess. 567979f8671SMichael Cree */ 568979f8671SMichael Cree static int supported_cpu(void) 569979f8671SMichael Cree { 570979f8671SMichael Cree struct percpu_struct *cpu; 571979f8671SMichael Cree unsigned long cputype; 572979f8671SMichael Cree 573979f8671SMichael Cree /* Get cpu type from HW */ 574979f8671SMichael Cree cpu = (struct percpu_struct *)((char *)hwrpb + hwrpb->processor_offset); 575979f8671SMichael Cree cputype = cpu->type & 0xffffffff; 576979f8671SMichael Cree /* Include all of EV67, EV68, EV7, EV79 and EV69 as supported. */ 577979f8671SMichael Cree return (cputype >= EV67_CPU) && (cputype <= EV69_CPU); 578979f8671SMichael Cree } 579979f8671SMichael Cree 580979f8671SMichael Cree 581979f8671SMichael Cree 582979f8671SMichael Cree static void hw_perf_event_destroy(struct perf_event *event) 583979f8671SMichael Cree { 584979f8671SMichael Cree /* Nothing to be done! */ 585979f8671SMichael Cree return; 586979f8671SMichael Cree } 587979f8671SMichael Cree 588979f8671SMichael Cree 589979f8671SMichael Cree 590979f8671SMichael Cree static int __hw_perf_event_init(struct perf_event *event) 591979f8671SMichael Cree { 592979f8671SMichael Cree struct perf_event_attr *attr = &event->attr; 593979f8671SMichael Cree struct hw_perf_event *hwc = &event->hw; 594979f8671SMichael Cree struct perf_event *evts[MAX_HWEVENTS]; 595979f8671SMichael Cree unsigned long evtypes[MAX_HWEVENTS]; 596979f8671SMichael Cree int idx_rubbish_bin[MAX_HWEVENTS]; 597979f8671SMichael Cree int ev; 598979f8671SMichael Cree int n; 599979f8671SMichael Cree 600979f8671SMichael Cree /* We only support a limited range of HARDWARE event types with one 601979f8671SMichael Cree * only programmable via a RAW event type. 602979f8671SMichael Cree */ 603979f8671SMichael Cree if (attr->type == PERF_TYPE_HARDWARE) { 604979f8671SMichael Cree if (attr->config >= alpha_pmu->max_events) 605979f8671SMichael Cree return -EINVAL; 606979f8671SMichael Cree ev = alpha_pmu->event_map[attr->config]; 607979f8671SMichael Cree } else if (attr->type == PERF_TYPE_HW_CACHE) { 608979f8671SMichael Cree return -EOPNOTSUPP; 609979f8671SMichael Cree } else if (attr->type == PERF_TYPE_RAW) { 610979f8671SMichael Cree ev = attr->config & 0xff; 611979f8671SMichael Cree } else { 612979f8671SMichael Cree return -EOPNOTSUPP; 613979f8671SMichael Cree } 614979f8671SMichael Cree 615979f8671SMichael Cree if (ev < 0) { 616979f8671SMichael Cree return ev; 617979f8671SMichael Cree } 618979f8671SMichael Cree 619979f8671SMichael Cree /* The EV67 does not support mode exclusion */ 620979f8671SMichael Cree if (attr->exclude_kernel || attr->exclude_user 621979f8671SMichael Cree || attr->exclude_hv || attr->exclude_idle) { 622979f8671SMichael Cree return -EPERM; 623979f8671SMichael Cree } 624979f8671SMichael Cree 625979f8671SMichael Cree /* 626979f8671SMichael Cree * We place the event type in event_base here and leave calculation 627979f8671SMichael Cree * of the codes to programme the PMU for alpha_pmu_enable() because 628979f8671SMichael Cree * it is only then we will know what HW events are actually 629979f8671SMichael Cree * scheduled on to the PMU. At that point the code to programme the 630979f8671SMichael Cree * PMU is put into config_base and the PMC to use is placed into 631979f8671SMichael Cree * idx. We initialise idx (below) to PMC_NO_INDEX to indicate that 632979f8671SMichael Cree * it is yet to be determined. 633979f8671SMichael Cree */ 634979f8671SMichael Cree hwc->event_base = ev; 635979f8671SMichael Cree 636979f8671SMichael Cree /* Collect events in a group together suitable for calling 637979f8671SMichael Cree * alpha_check_constraints() to verify that the group as a whole can 638979f8671SMichael Cree * be scheduled on to the PMU. 639979f8671SMichael Cree */ 640979f8671SMichael Cree n = 0; 641979f8671SMichael Cree if (event->group_leader != event) { 642979f8671SMichael Cree n = collect_events(event->group_leader, 643979f8671SMichael Cree alpha_pmu->num_pmcs - 1, 644979f8671SMichael Cree evts, evtypes, idx_rubbish_bin); 645979f8671SMichael Cree if (n < 0) 646979f8671SMichael Cree return -EINVAL; 647979f8671SMichael Cree } 648979f8671SMichael Cree evtypes[n] = hwc->event_base; 649979f8671SMichael Cree evts[n] = event; 650979f8671SMichael Cree 651979f8671SMichael Cree if (alpha_check_constraints(evts, evtypes, n + 1)) 652979f8671SMichael Cree return -EINVAL; 653979f8671SMichael Cree 654979f8671SMichael Cree /* Indicate that PMU config and idx are yet to be determined. */ 655979f8671SMichael Cree hwc->config_base = 0; 656979f8671SMichael Cree hwc->idx = PMC_NO_INDEX; 657979f8671SMichael Cree 658979f8671SMichael Cree event->destroy = hw_perf_event_destroy; 659979f8671SMichael Cree 660979f8671SMichael Cree /* 661979f8671SMichael Cree * Most architectures reserve the PMU for their use at this point. 662979f8671SMichael Cree * As there is no existing mechanism to arbitrate usage and there 663979f8671SMichael Cree * appears to be no other user of the Alpha PMU we just assume 664979f8671SMichael Cree * that we can just use it, hence a NO-OP here. 665979f8671SMichael Cree * 666979f8671SMichael Cree * Maybe an alpha_reserve_pmu() routine should be implemented but is 667979f8671SMichael Cree * anything else ever going to use it? 668979f8671SMichael Cree */ 669979f8671SMichael Cree 670979f8671SMichael Cree if (!hwc->sample_period) { 671979f8671SMichael Cree hwc->sample_period = alpha_pmu->pmc_max_period[0]; 672979f8671SMichael Cree hwc->last_period = hwc->sample_period; 673979f8671SMichael Cree atomic64_set(&hwc->period_left, hwc->sample_period); 674979f8671SMichael Cree } 675979f8671SMichael Cree 676979f8671SMichael Cree return 0; 677979f8671SMichael Cree } 678979f8671SMichael Cree 679b0a873ebSPeter Zijlstra /* 680b0a873ebSPeter Zijlstra * Main entry point to initialise a HW performance event. 681b0a873ebSPeter Zijlstra */ 682b0a873ebSPeter Zijlstra static int alpha_pmu_event_init(struct perf_event *event) 683b0a873ebSPeter Zijlstra { 684b0a873ebSPeter Zijlstra int err; 685b0a873ebSPeter Zijlstra 686b0a873ebSPeter Zijlstra switch (event->attr.type) { 687b0a873ebSPeter Zijlstra case PERF_TYPE_RAW: 688b0a873ebSPeter Zijlstra case PERF_TYPE_HARDWARE: 689b0a873ebSPeter Zijlstra case PERF_TYPE_HW_CACHE: 690b0a873ebSPeter Zijlstra break; 691b0a873ebSPeter Zijlstra 692b0a873ebSPeter Zijlstra default: 693b0a873ebSPeter Zijlstra return -ENOENT; 694b0a873ebSPeter Zijlstra } 695b0a873ebSPeter Zijlstra 696b0a873ebSPeter Zijlstra if (!alpha_pmu) 697b0a873ebSPeter Zijlstra return -ENODEV; 698b0a873ebSPeter Zijlstra 699b0a873ebSPeter Zijlstra /* Do the real initialisation work. */ 700b0a873ebSPeter Zijlstra err = __hw_perf_event_init(event); 701b0a873ebSPeter Zijlstra 702b0a873ebSPeter Zijlstra return err; 703b0a873ebSPeter Zijlstra } 704b0a873ebSPeter Zijlstra 705979f8671SMichael Cree /* 706979f8671SMichael Cree * Main entry point - enable HW performance counters. 707979f8671SMichael Cree */ 708a4eaf7f1SPeter Zijlstra static void alpha_pmu_enable(struct pmu *pmu) 709979f8671SMichael Cree { 710979f8671SMichael Cree struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); 711979f8671SMichael Cree 712979f8671SMichael Cree if (cpuc->enabled) 713979f8671SMichael Cree return; 714979f8671SMichael Cree 715979f8671SMichael Cree cpuc->enabled = 1; 716979f8671SMichael Cree barrier(); 717979f8671SMichael Cree 718979f8671SMichael Cree if (cpuc->n_events > 0) { 719979f8671SMichael Cree /* Update cpuc with information from any new scheduled events. */ 720979f8671SMichael Cree maybe_change_configuration(cpuc); 721979f8671SMichael Cree 722979f8671SMichael Cree /* Start counting the desired events. */ 723979f8671SMichael Cree wrperfmon(PERFMON_CMD_LOGGING_OPTIONS, EV67_PCTR_MODE_AGGREGATE); 724979f8671SMichael Cree wrperfmon(PERFMON_CMD_DESIRED_EVENTS, cpuc->config); 725979f8671SMichael Cree wrperfmon(PERFMON_CMD_ENABLE, cpuc->idx_mask); 726979f8671SMichael Cree } 727979f8671SMichael Cree } 728979f8671SMichael Cree 729979f8671SMichael Cree 730979f8671SMichael Cree /* 731979f8671SMichael Cree * Main entry point - disable HW performance counters. 732979f8671SMichael Cree */ 733979f8671SMichael Cree 734a4eaf7f1SPeter Zijlstra static void alpha_pmu_disable(struct pmu *pmu) 735979f8671SMichael Cree { 736979f8671SMichael Cree struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); 737979f8671SMichael Cree 738979f8671SMichael Cree if (!cpuc->enabled) 739979f8671SMichael Cree return; 740979f8671SMichael Cree 741979f8671SMichael Cree cpuc->enabled = 0; 742979f8671SMichael Cree cpuc->n_added = 0; 743979f8671SMichael Cree 744979f8671SMichael Cree wrperfmon(PERFMON_CMD_DISABLE, cpuc->idx_mask); 745979f8671SMichael Cree } 746979f8671SMichael Cree 74733696fc0SPeter Zijlstra static struct pmu pmu = { 748a4eaf7f1SPeter Zijlstra .pmu_enable = alpha_pmu_enable, 749a4eaf7f1SPeter Zijlstra .pmu_disable = alpha_pmu_disable, 75033696fc0SPeter Zijlstra .event_init = alpha_pmu_event_init, 751a4eaf7f1SPeter Zijlstra .add = alpha_pmu_add, 752a4eaf7f1SPeter Zijlstra .del = alpha_pmu_del, 753a4eaf7f1SPeter Zijlstra .start = alpha_pmu_start, 754a4eaf7f1SPeter Zijlstra .stop = alpha_pmu_stop, 75533696fc0SPeter Zijlstra .read = alpha_pmu_read, 75633696fc0SPeter Zijlstra }; 75733696fc0SPeter Zijlstra 758979f8671SMichael Cree 759979f8671SMichael Cree /* 760979f8671SMichael Cree * Main entry point - don't know when this is called but it 761979f8671SMichael Cree * obviously dumps debug info. 762979f8671SMichael Cree */ 763979f8671SMichael Cree void perf_event_print_debug(void) 764979f8671SMichael Cree { 765979f8671SMichael Cree unsigned long flags; 766979f8671SMichael Cree unsigned long pcr; 767979f8671SMichael Cree int pcr0, pcr1; 768979f8671SMichael Cree int cpu; 769979f8671SMichael Cree 770979f8671SMichael Cree if (!supported_cpu()) 771979f8671SMichael Cree return; 772979f8671SMichael Cree 773979f8671SMichael Cree local_irq_save(flags); 774979f8671SMichael Cree 775979f8671SMichael Cree cpu = smp_processor_id(); 776979f8671SMichael Cree 777979f8671SMichael Cree pcr = wrperfmon(PERFMON_CMD_READ, 0); 778979f8671SMichael Cree pcr0 = (pcr >> alpha_pmu->pmc_count_shift[0]) & alpha_pmu->pmc_count_mask[0]; 779979f8671SMichael Cree pcr1 = (pcr >> alpha_pmu->pmc_count_shift[1]) & alpha_pmu->pmc_count_mask[1]; 780979f8671SMichael Cree 781979f8671SMichael Cree pr_info("CPU#%d: PCTR0[%06x] PCTR1[%06x]\n", cpu, pcr0, pcr1); 782979f8671SMichael Cree 783979f8671SMichael Cree local_irq_restore(flags); 784979f8671SMichael Cree } 785979f8671SMichael Cree 786979f8671SMichael Cree 787979f8671SMichael Cree /* 788979f8671SMichael Cree * Performance Monitoring Interrupt Service Routine called when a PMC 789979f8671SMichael Cree * overflows. The PMC that overflowed is passed in la_ptr. 790979f8671SMichael Cree */ 791979f8671SMichael Cree static void alpha_perf_event_irq_handler(unsigned long la_ptr, 792979f8671SMichael Cree struct pt_regs *regs) 793979f8671SMichael Cree { 794979f8671SMichael Cree struct cpu_hw_events *cpuc; 795979f8671SMichael Cree struct perf_sample_data data; 796979f8671SMichael Cree struct perf_event *event; 797979f8671SMichael Cree struct hw_perf_event *hwc; 798979f8671SMichael Cree int idx, j; 799979f8671SMichael Cree 800979f8671SMichael Cree __get_cpu_var(irq_pmi_count)++; 801979f8671SMichael Cree cpuc = &__get_cpu_var(cpu_hw_events); 802979f8671SMichael Cree 803979f8671SMichael Cree /* Completely counting through the PMC's period to trigger a new PMC 804979f8671SMichael Cree * overflow interrupt while in this interrupt routine is utterly 805979f8671SMichael Cree * disastrous! The EV6 and EV67 counters are sufficiently large to 806979f8671SMichael Cree * prevent this but to be really sure disable the PMCs. 807979f8671SMichael Cree */ 808979f8671SMichael Cree wrperfmon(PERFMON_CMD_DISABLE, cpuc->idx_mask); 809979f8671SMichael Cree 810979f8671SMichael Cree /* la_ptr is the counter that overflowed. */ 811*15ac9a39SPeter Zijlstra if (unlikely(la_ptr >= alpha_pmu->num_pmcs)) { 812979f8671SMichael Cree /* This should never occur! */ 813979f8671SMichael Cree irq_err_count++; 814979f8671SMichael Cree pr_warning("PMI: silly index %ld\n", la_ptr); 815979f8671SMichael Cree wrperfmon(PERFMON_CMD_ENABLE, cpuc->idx_mask); 816979f8671SMichael Cree return; 817979f8671SMichael Cree } 818979f8671SMichael Cree 819979f8671SMichael Cree idx = la_ptr; 820979f8671SMichael Cree 821979f8671SMichael Cree perf_sample_data_init(&data, 0); 822979f8671SMichael Cree for (j = 0; j < cpuc->n_events; j++) { 823979f8671SMichael Cree if (cpuc->current_idx[j] == idx) 824979f8671SMichael Cree break; 825979f8671SMichael Cree } 826979f8671SMichael Cree 827979f8671SMichael Cree if (unlikely(j == cpuc->n_events)) { 828979f8671SMichael Cree /* This can occur if the event is disabled right on a PMC overflow. */ 829979f8671SMichael Cree wrperfmon(PERFMON_CMD_ENABLE, cpuc->idx_mask); 830979f8671SMichael Cree return; 831979f8671SMichael Cree } 832979f8671SMichael Cree 833979f8671SMichael Cree event = cpuc->event[j]; 834979f8671SMichael Cree 835979f8671SMichael Cree if (unlikely(!event)) { 836979f8671SMichael Cree /* This should never occur! */ 837979f8671SMichael Cree irq_err_count++; 838979f8671SMichael Cree pr_warning("PMI: No event at index %d!\n", idx); 839979f8671SMichael Cree wrperfmon(PERFMON_CMD_ENABLE, cpuc->idx_mask); 840979f8671SMichael Cree return; 841979f8671SMichael Cree } 842979f8671SMichael Cree 843979f8671SMichael Cree hwc = &event->hw; 844979f8671SMichael Cree alpha_perf_event_update(event, hwc, idx, alpha_pmu->pmc_max_period[idx]+1); 845979f8671SMichael Cree data.period = event->hw.last_period; 846979f8671SMichael Cree 847979f8671SMichael Cree if (alpha_perf_event_set_period(event, hwc, idx)) { 848979f8671SMichael Cree if (perf_event_overflow(event, 1, &data, regs)) { 849979f8671SMichael Cree /* Interrupts coming too quickly; "throttle" the 850979f8671SMichael Cree * counter, i.e., disable it for a little while. 851979f8671SMichael Cree */ 852979f8671SMichael Cree cpuc->idx_mask &= ~(1UL<<idx); 853979f8671SMichael Cree } 854979f8671SMichael Cree } 855979f8671SMichael Cree wrperfmon(PERFMON_CMD_ENABLE, cpuc->idx_mask); 856979f8671SMichael Cree 857979f8671SMichael Cree return; 858979f8671SMichael Cree } 859979f8671SMichael Cree 860979f8671SMichael Cree 861979f8671SMichael Cree 862979f8671SMichael Cree /* 863979f8671SMichael Cree * Init call to initialise performance events at kernel startup. 864979f8671SMichael Cree */ 865979f8671SMichael Cree void __init init_hw_perf_events(void) 866979f8671SMichael Cree { 867979f8671SMichael Cree pr_info("Performance events: "); 868979f8671SMichael Cree 869979f8671SMichael Cree if (!supported_cpu()) { 870979f8671SMichael Cree pr_cont("No support for your CPU.\n"); 871979f8671SMichael Cree return; 872979f8671SMichael Cree } 873979f8671SMichael Cree 874979f8671SMichael Cree pr_cont("Supported CPU type!\n"); 875979f8671SMichael Cree 876979f8671SMichael Cree /* Override performance counter IRQ vector */ 877979f8671SMichael Cree 878979f8671SMichael Cree perf_irq = alpha_perf_event_irq_handler; 879979f8671SMichael Cree 880979f8671SMichael Cree /* And set up PMU specification */ 881979f8671SMichael Cree alpha_pmu = &ev67_pmu; 882b0a873ebSPeter Zijlstra 883b0a873ebSPeter Zijlstra perf_pmu_register(&pmu); 884979f8671SMichael Cree } 885979f8671SMichael Cree 886