1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * RISC-V performance counter support. 4 * 5 * Copyright (C) 2021 Western Digital Corporation or its affiliates. 6 * 7 * This implementation is based on old RISC-V perf and ARM perf event code 8 * which are in turn based on sparc64 and x86 code. 9 */ 10 11 #include <linux/cpumask.h> 12 #include <linux/irq.h> 13 #include <linux/irqdesc.h> 14 #include <linux/perf/riscv_pmu.h> 15 #include <linux/printk.h> 16 #include <linux/smp.h> 17 18 #include <asm/sbi.h> 19 20 static unsigned long csr_read_num(int csr_num) 21 { 22 #define switchcase_csr_read(__csr_num, __val) {\ 23 case __csr_num: \ 24 __val = csr_read(__csr_num); \ 25 break; } 26 #define switchcase_csr_read_2(__csr_num, __val) {\ 27 switchcase_csr_read(__csr_num + 0, __val) \ 28 switchcase_csr_read(__csr_num + 1, __val)} 29 #define switchcase_csr_read_4(__csr_num, __val) {\ 30 switchcase_csr_read_2(__csr_num + 0, __val) \ 31 switchcase_csr_read_2(__csr_num + 2, __val)} 32 #define switchcase_csr_read_8(__csr_num, __val) {\ 33 switchcase_csr_read_4(__csr_num + 0, __val) \ 34 switchcase_csr_read_4(__csr_num + 4, __val)} 35 #define switchcase_csr_read_16(__csr_num, __val) {\ 36 switchcase_csr_read_8(__csr_num + 0, __val) \ 37 switchcase_csr_read_8(__csr_num + 8, __val)} 38 #define switchcase_csr_read_32(__csr_num, __val) {\ 39 switchcase_csr_read_16(__csr_num + 0, __val) \ 40 switchcase_csr_read_16(__csr_num + 16, __val)} 41 42 unsigned long ret = 0; 43 44 switch (csr_num) { 45 switchcase_csr_read_32(CSR_CYCLE, ret) 46 switchcase_csr_read_32(CSR_CYCLEH, ret) 47 default : 48 break; 49 } 50 51 return ret; 52 #undef switchcase_csr_read_32 53 #undef switchcase_csr_read_16 54 #undef switchcase_csr_read_8 55 #undef switchcase_csr_read_4 56 #undef switchcase_csr_read_2 57 #undef switchcase_csr_read 58 } 59 60 /* 61 * Read the CSR of a corresponding counter. 62 */ 63 unsigned long riscv_pmu_ctr_read_csr(unsigned long csr) 64 { 65 if (csr < CSR_CYCLE || csr > CSR_HPMCOUNTER31H || 66 (csr > CSR_HPMCOUNTER31 && csr < CSR_CYCLEH)) { 67 pr_err("Invalid performance counter csr %lx\n", csr); 68 return -EINVAL; 69 } 70 71 return csr_read_num(csr); 72 } 73 74 u64 riscv_pmu_ctr_get_width_mask(struct perf_event *event) 75 { 76 int cwidth; 77 struct riscv_pmu *rvpmu = to_riscv_pmu(event->pmu); 78 struct hw_perf_event *hwc = &event->hw; 79 80 if (!rvpmu->ctr_get_width) 81 /** 82 * If the pmu driver doesn't support counter width, set it to default 83 * maximum allowed by the specification. 84 */ 85 cwidth = 63; 86 else { 87 if (hwc->idx == -1) 88 /* Handle init case where idx is not initialized yet */ 89 cwidth = rvpmu->ctr_get_width(0); 90 else 91 cwidth = rvpmu->ctr_get_width(hwc->idx); 92 } 93 94 return GENMASK_ULL(cwidth, 0); 95 } 96 97 u64 riscv_pmu_event_update(struct perf_event *event) 98 { 99 struct riscv_pmu *rvpmu = to_riscv_pmu(event->pmu); 100 struct hw_perf_event *hwc = &event->hw; 101 u64 prev_raw_count, new_raw_count; 102 unsigned long cmask; 103 u64 oldval, delta; 104 105 if (!rvpmu->ctr_read) 106 return 0; 107 108 cmask = riscv_pmu_ctr_get_width_mask(event); 109 110 do { 111 prev_raw_count = local64_read(&hwc->prev_count); 112 new_raw_count = rvpmu->ctr_read(event); 113 oldval = local64_cmpxchg(&hwc->prev_count, prev_raw_count, 114 new_raw_count); 115 } while (oldval != prev_raw_count); 116 117 delta = (new_raw_count - prev_raw_count) & cmask; 118 local64_add(delta, &event->count); 119 local64_sub(delta, &hwc->period_left); 120 121 return delta; 122 } 123 124 void riscv_pmu_stop(struct perf_event *event, int flags) 125 { 126 struct hw_perf_event *hwc = &event->hw; 127 struct riscv_pmu *rvpmu = to_riscv_pmu(event->pmu); 128 129 WARN_ON_ONCE(hwc->state & PERF_HES_STOPPED); 130 131 if (!(hwc->state & PERF_HES_STOPPED)) { 132 if (rvpmu->ctr_stop) { 133 rvpmu->ctr_stop(event, 0); 134 hwc->state |= PERF_HES_STOPPED; 135 } 136 riscv_pmu_event_update(event); 137 hwc->state |= PERF_HES_UPTODATE; 138 } 139 } 140 141 int riscv_pmu_event_set_period(struct perf_event *event) 142 { 143 struct hw_perf_event *hwc = &event->hw; 144 s64 left = local64_read(&hwc->period_left); 145 s64 period = hwc->sample_period; 146 int overflow = 0; 147 uint64_t max_period = riscv_pmu_ctr_get_width_mask(event); 148 149 if (unlikely(left <= -period)) { 150 left = period; 151 local64_set(&hwc->period_left, left); 152 hwc->last_period = period; 153 overflow = 1; 154 } 155 156 if (unlikely(left <= 0)) { 157 left += period; 158 local64_set(&hwc->period_left, left); 159 hwc->last_period = period; 160 overflow = 1; 161 } 162 163 /* 164 * Limit the maximum period to prevent the counter value 165 * from overtaking the one we are about to program. In 166 * effect we are reducing max_period to account for 167 * interrupt latency (and we are being very conservative). 168 */ 169 if (left > (max_period >> 1)) 170 left = (max_period >> 1); 171 172 local64_set(&hwc->prev_count, (u64)-left); 173 174 return overflow; 175 } 176 177 void riscv_pmu_start(struct perf_event *event, int flags) 178 { 179 struct hw_perf_event *hwc = &event->hw; 180 struct riscv_pmu *rvpmu = to_riscv_pmu(event->pmu); 181 uint64_t max_period = riscv_pmu_ctr_get_width_mask(event); 182 u64 init_val; 183 184 if (flags & PERF_EF_RELOAD) 185 WARN_ON_ONCE(!(event->hw.state & PERF_HES_UPTODATE)); 186 187 hwc->state = 0; 188 riscv_pmu_event_set_period(event); 189 init_val = local64_read(&hwc->prev_count) & max_period; 190 rvpmu->ctr_start(event, init_val); 191 perf_event_update_userpage(event); 192 } 193 194 static int riscv_pmu_add(struct perf_event *event, int flags) 195 { 196 struct riscv_pmu *rvpmu = to_riscv_pmu(event->pmu); 197 struct cpu_hw_events *cpuc = this_cpu_ptr(rvpmu->hw_events); 198 struct hw_perf_event *hwc = &event->hw; 199 int idx; 200 201 idx = rvpmu->ctr_get_idx(event); 202 if (idx < 0) 203 return idx; 204 205 hwc->idx = idx; 206 cpuc->events[idx] = event; 207 cpuc->n_events++; 208 hwc->state = PERF_HES_UPTODATE | PERF_HES_STOPPED; 209 if (flags & PERF_EF_START) 210 riscv_pmu_start(event, PERF_EF_RELOAD); 211 212 /* Propagate our changes to the userspace mapping. */ 213 perf_event_update_userpage(event); 214 215 return 0; 216 } 217 218 static void riscv_pmu_del(struct perf_event *event, int flags) 219 { 220 struct riscv_pmu *rvpmu = to_riscv_pmu(event->pmu); 221 struct cpu_hw_events *cpuc = this_cpu_ptr(rvpmu->hw_events); 222 struct hw_perf_event *hwc = &event->hw; 223 224 riscv_pmu_stop(event, PERF_EF_UPDATE); 225 cpuc->events[hwc->idx] = NULL; 226 /* The firmware need to reset the counter mapping */ 227 if (rvpmu->ctr_stop) 228 rvpmu->ctr_stop(event, RISCV_PMU_STOP_FLAG_RESET); 229 cpuc->n_events--; 230 if (rvpmu->ctr_clear_idx) 231 rvpmu->ctr_clear_idx(event); 232 perf_event_update_userpage(event); 233 hwc->idx = -1; 234 } 235 236 static void riscv_pmu_read(struct perf_event *event) 237 { 238 riscv_pmu_event_update(event); 239 } 240 241 static int riscv_pmu_event_init(struct perf_event *event) 242 { 243 struct hw_perf_event *hwc = &event->hw; 244 struct riscv_pmu *rvpmu = to_riscv_pmu(event->pmu); 245 int mapped_event; 246 u64 event_config = 0; 247 uint64_t cmask; 248 249 hwc->flags = 0; 250 mapped_event = rvpmu->event_map(event, &event_config); 251 if (mapped_event < 0) { 252 pr_debug("event %x:%llx not supported\n", event->attr.type, 253 event->attr.config); 254 return mapped_event; 255 } 256 257 /* 258 * idx is set to -1 because the index of a general event should not be 259 * decided until binding to some counter in pmu->add(). 260 * config will contain the information about counter CSR 261 * the idx will contain the counter index 262 */ 263 hwc->config = event_config; 264 hwc->idx = -1; 265 hwc->event_base = mapped_event; 266 267 if (!is_sampling_event(event)) { 268 /* 269 * For non-sampling runs, limit the sample_period to half 270 * of the counter width. That way, the new counter value 271 * is far less likely to overtake the previous one unless 272 * you have some serious IRQ latency issues. 273 */ 274 cmask = riscv_pmu_ctr_get_width_mask(event); 275 hwc->sample_period = cmask >> 1; 276 hwc->last_period = hwc->sample_period; 277 local64_set(&hwc->period_left, hwc->sample_period); 278 } 279 280 return 0; 281 } 282 283 struct riscv_pmu *riscv_pmu_alloc(void) 284 { 285 struct riscv_pmu *pmu; 286 int cpuid, i; 287 struct cpu_hw_events *cpuc; 288 289 pmu = kzalloc(sizeof(*pmu), GFP_KERNEL); 290 if (!pmu) 291 goto out; 292 293 pmu->hw_events = alloc_percpu_gfp(struct cpu_hw_events, GFP_KERNEL); 294 if (!pmu->hw_events) { 295 pr_info("failed to allocate per-cpu PMU data.\n"); 296 goto out_free_pmu; 297 } 298 299 for_each_possible_cpu(cpuid) { 300 cpuc = per_cpu_ptr(pmu->hw_events, cpuid); 301 cpuc->n_events = 0; 302 for (i = 0; i < RISCV_MAX_COUNTERS; i++) 303 cpuc->events[i] = NULL; 304 } 305 pmu->pmu = (struct pmu) { 306 .event_init = riscv_pmu_event_init, 307 .add = riscv_pmu_add, 308 .del = riscv_pmu_del, 309 .start = riscv_pmu_start, 310 .stop = riscv_pmu_stop, 311 .read = riscv_pmu_read, 312 }; 313 314 return pmu; 315 316 out_free_pmu: 317 kfree(pmu); 318 out: 319 return NULL; 320 } 321