1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * ARMv8 PMUv3 Performance Events handling code. 4 * 5 * Copyright (C) 2012 ARM Limited 6 * Author: Will Deacon <will.deacon@arm.com> 7 * 8 * This code is based heavily on the ARMv7 perf event code. 9 */ 10 11 #include <asm/irq_regs.h> 12 #include <asm/perf_event.h> 13 #include <asm/virt.h> 14 15 #include <clocksource/arm_arch_timer.h> 16 17 #include <linux/acpi.h> 18 #include <linux/clocksource.h> 19 #include <linux/of.h> 20 #include <linux/perf/arm_pmu.h> 21 #include <linux/perf/arm_pmuv3.h> 22 #include <linux/platform_device.h> 23 #include <linux/sched_clock.h> 24 #include <linux/smp.h> 25 26 #include <asm/arm_pmuv3.h> 27 28 /* ARMv8 Cortex-A53 specific event types. */ 29 #define ARMV8_A53_PERFCTR_PREF_LINEFILL 0xC2 30 31 /* ARMv8 Cavium ThunderX specific event types. */ 32 #define ARMV8_THUNDER_PERFCTR_L1D_CACHE_MISS_ST 0xE9 33 #define ARMV8_THUNDER_PERFCTR_L1D_CACHE_PREF_ACCESS 0xEA 34 #define ARMV8_THUNDER_PERFCTR_L1D_CACHE_PREF_MISS 0xEB 35 #define ARMV8_THUNDER_PERFCTR_L1I_CACHE_PREF_ACCESS 0xEC 36 #define ARMV8_THUNDER_PERFCTR_L1I_CACHE_PREF_MISS 0xED 37 38 /* 39 * ARMv8 Architectural defined events, not all of these may 40 * be supported on any given implementation. Unsupported events will 41 * be disabled at run-time based on the PMCEID registers. 42 */ 43 static const unsigned armv8_pmuv3_perf_map[PERF_COUNT_HW_MAX] = { 44 PERF_MAP_ALL_UNSUPPORTED, 45 [PERF_COUNT_HW_CPU_CYCLES] = ARMV8_PMUV3_PERFCTR_CPU_CYCLES, 46 [PERF_COUNT_HW_INSTRUCTIONS] = ARMV8_PMUV3_PERFCTR_INST_RETIRED, 47 [PERF_COUNT_HW_CACHE_REFERENCES] = ARMV8_PMUV3_PERFCTR_L1D_CACHE, 48 [PERF_COUNT_HW_CACHE_MISSES] = ARMV8_PMUV3_PERFCTR_L1D_CACHE_REFILL, 49 [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = ARMV8_PMUV3_PERFCTR_PC_WRITE_RETIRED, 50 [PERF_COUNT_HW_BRANCH_MISSES] = ARMV8_PMUV3_PERFCTR_BR_MIS_PRED, 51 [PERF_COUNT_HW_BUS_CYCLES] = ARMV8_PMUV3_PERFCTR_BUS_CYCLES, 52 [PERF_COUNT_HW_STALLED_CYCLES_FRONTEND] = ARMV8_PMUV3_PERFCTR_STALL_FRONTEND, 53 [PERF_COUNT_HW_STALLED_CYCLES_BACKEND] = ARMV8_PMUV3_PERFCTR_STALL_BACKEND, 54 }; 55 56 static const unsigned armv8_pmuv3_perf_cache_map[PERF_COUNT_HW_CACHE_MAX] 57 [PERF_COUNT_HW_CACHE_OP_MAX] 58 [PERF_COUNT_HW_CACHE_RESULT_MAX] = { 59 PERF_CACHE_MAP_ALL_UNSUPPORTED, 60 61 [C(L1D)][C(OP_READ)][C(RESULT_ACCESS)] = ARMV8_PMUV3_PERFCTR_L1D_CACHE, 62 [C(L1D)][C(OP_READ)][C(RESULT_MISS)] = ARMV8_PMUV3_PERFCTR_L1D_CACHE_REFILL, 63 64 [C(L1I)][C(OP_READ)][C(RESULT_ACCESS)] = ARMV8_PMUV3_PERFCTR_L1I_CACHE, 65 [C(L1I)][C(OP_READ)][C(RESULT_MISS)] = ARMV8_PMUV3_PERFCTR_L1I_CACHE_REFILL, 66 67 [C(DTLB)][C(OP_READ)][C(RESULT_MISS)] = ARMV8_PMUV3_PERFCTR_L1D_TLB_REFILL, 68 [C(DTLB)][C(OP_READ)][C(RESULT_ACCESS)] = ARMV8_PMUV3_PERFCTR_L1D_TLB, 69 70 [C(ITLB)][C(OP_READ)][C(RESULT_MISS)] = ARMV8_PMUV3_PERFCTR_L1I_TLB_REFILL, 71 [C(ITLB)][C(OP_READ)][C(RESULT_ACCESS)] = ARMV8_PMUV3_PERFCTR_L1I_TLB, 72 73 [C(LL)][C(OP_READ)][C(RESULT_MISS)] = ARMV8_PMUV3_PERFCTR_LL_CACHE_MISS_RD, 74 [C(LL)][C(OP_READ)][C(RESULT_ACCESS)] = ARMV8_PMUV3_PERFCTR_LL_CACHE_RD, 75 76 [C(BPU)][C(OP_READ)][C(RESULT_ACCESS)] = ARMV8_PMUV3_PERFCTR_BR_PRED, 77 [C(BPU)][C(OP_READ)][C(RESULT_MISS)] = ARMV8_PMUV3_PERFCTR_BR_MIS_PRED, 78 }; 79 80 static const unsigned armv8_a53_perf_cache_map[PERF_COUNT_HW_CACHE_MAX] 81 [PERF_COUNT_HW_CACHE_OP_MAX] 82 [PERF_COUNT_HW_CACHE_RESULT_MAX] = { 83 PERF_CACHE_MAP_ALL_UNSUPPORTED, 84 85 [C(L1D)][C(OP_PREFETCH)][C(RESULT_MISS)] = ARMV8_A53_PERFCTR_PREF_LINEFILL, 86 87 [C(NODE)][C(OP_READ)][C(RESULT_ACCESS)] = ARMV8_IMPDEF_PERFCTR_BUS_ACCESS_RD, 88 [C(NODE)][C(OP_WRITE)][C(RESULT_ACCESS)] = ARMV8_IMPDEF_PERFCTR_BUS_ACCESS_WR, 89 }; 90 91 static const unsigned armv8_a57_perf_cache_map[PERF_COUNT_HW_CACHE_MAX] 92 [PERF_COUNT_HW_CACHE_OP_MAX] 93 [PERF_COUNT_HW_CACHE_RESULT_MAX] = { 94 PERF_CACHE_MAP_ALL_UNSUPPORTED, 95 96 [C(L1D)][C(OP_READ)][C(RESULT_ACCESS)] = ARMV8_IMPDEF_PERFCTR_L1D_CACHE_RD, 97 [C(L1D)][C(OP_READ)][C(RESULT_MISS)] = ARMV8_IMPDEF_PERFCTR_L1D_CACHE_REFILL_RD, 98 [C(L1D)][C(OP_WRITE)][C(RESULT_ACCESS)] = ARMV8_IMPDEF_PERFCTR_L1D_CACHE_WR, 99 [C(L1D)][C(OP_WRITE)][C(RESULT_MISS)] = ARMV8_IMPDEF_PERFCTR_L1D_CACHE_REFILL_WR, 100 101 [C(DTLB)][C(OP_READ)][C(RESULT_MISS)] = ARMV8_IMPDEF_PERFCTR_L1D_TLB_REFILL_RD, 102 [C(DTLB)][C(OP_WRITE)][C(RESULT_MISS)] = ARMV8_IMPDEF_PERFCTR_L1D_TLB_REFILL_WR, 103 104 [C(NODE)][C(OP_READ)][C(RESULT_ACCESS)] = ARMV8_IMPDEF_PERFCTR_BUS_ACCESS_RD, 105 [C(NODE)][C(OP_WRITE)][C(RESULT_ACCESS)] = ARMV8_IMPDEF_PERFCTR_BUS_ACCESS_WR, 106 }; 107 108 static const unsigned armv8_a73_perf_cache_map[PERF_COUNT_HW_CACHE_MAX] 109 [PERF_COUNT_HW_CACHE_OP_MAX] 110 [PERF_COUNT_HW_CACHE_RESULT_MAX] = { 111 PERF_CACHE_MAP_ALL_UNSUPPORTED, 112 113 [C(L1D)][C(OP_READ)][C(RESULT_ACCESS)] = ARMV8_IMPDEF_PERFCTR_L1D_CACHE_RD, 114 [C(L1D)][C(OP_WRITE)][C(RESULT_ACCESS)] = ARMV8_IMPDEF_PERFCTR_L1D_CACHE_WR, 115 }; 116 117 static const unsigned armv8_thunder_perf_cache_map[PERF_COUNT_HW_CACHE_MAX] 118 [PERF_COUNT_HW_CACHE_OP_MAX] 119 [PERF_COUNT_HW_CACHE_RESULT_MAX] = { 120 PERF_CACHE_MAP_ALL_UNSUPPORTED, 121 122 [C(L1D)][C(OP_READ)][C(RESULT_ACCESS)] = ARMV8_IMPDEF_PERFCTR_L1D_CACHE_RD, 123 [C(L1D)][C(OP_READ)][C(RESULT_MISS)] = ARMV8_IMPDEF_PERFCTR_L1D_CACHE_REFILL_RD, 124 [C(L1D)][C(OP_WRITE)][C(RESULT_ACCESS)] = ARMV8_IMPDEF_PERFCTR_L1D_CACHE_WR, 125 [C(L1D)][C(OP_WRITE)][C(RESULT_MISS)] = ARMV8_THUNDER_PERFCTR_L1D_CACHE_MISS_ST, 126 [C(L1D)][C(OP_PREFETCH)][C(RESULT_ACCESS)] = ARMV8_THUNDER_PERFCTR_L1D_CACHE_PREF_ACCESS, 127 [C(L1D)][C(OP_PREFETCH)][C(RESULT_MISS)] = ARMV8_THUNDER_PERFCTR_L1D_CACHE_PREF_MISS, 128 129 [C(L1I)][C(OP_PREFETCH)][C(RESULT_ACCESS)] = ARMV8_THUNDER_PERFCTR_L1I_CACHE_PREF_ACCESS, 130 [C(L1I)][C(OP_PREFETCH)][C(RESULT_MISS)] = ARMV8_THUNDER_PERFCTR_L1I_CACHE_PREF_MISS, 131 132 [C(DTLB)][C(OP_READ)][C(RESULT_ACCESS)] = ARMV8_IMPDEF_PERFCTR_L1D_TLB_RD, 133 [C(DTLB)][C(OP_READ)][C(RESULT_MISS)] = ARMV8_IMPDEF_PERFCTR_L1D_TLB_REFILL_RD, 134 [C(DTLB)][C(OP_WRITE)][C(RESULT_ACCESS)] = ARMV8_IMPDEF_PERFCTR_L1D_TLB_WR, 135 [C(DTLB)][C(OP_WRITE)][C(RESULT_MISS)] = ARMV8_IMPDEF_PERFCTR_L1D_TLB_REFILL_WR, 136 }; 137 138 static const unsigned armv8_vulcan_perf_cache_map[PERF_COUNT_HW_CACHE_MAX] 139 [PERF_COUNT_HW_CACHE_OP_MAX] 140 [PERF_COUNT_HW_CACHE_RESULT_MAX] = { 141 PERF_CACHE_MAP_ALL_UNSUPPORTED, 142 143 [C(L1D)][C(OP_READ)][C(RESULT_ACCESS)] = ARMV8_IMPDEF_PERFCTR_L1D_CACHE_RD, 144 [C(L1D)][C(OP_READ)][C(RESULT_MISS)] = ARMV8_IMPDEF_PERFCTR_L1D_CACHE_REFILL_RD, 145 [C(L1D)][C(OP_WRITE)][C(RESULT_ACCESS)] = ARMV8_IMPDEF_PERFCTR_L1D_CACHE_WR, 146 [C(L1D)][C(OP_WRITE)][C(RESULT_MISS)] = ARMV8_IMPDEF_PERFCTR_L1D_CACHE_REFILL_WR, 147 148 [C(DTLB)][C(OP_READ)][C(RESULT_ACCESS)] = ARMV8_IMPDEF_PERFCTR_L1D_TLB_RD, 149 [C(DTLB)][C(OP_WRITE)][C(RESULT_ACCESS)] = ARMV8_IMPDEF_PERFCTR_L1D_TLB_WR, 150 [C(DTLB)][C(OP_READ)][C(RESULT_MISS)] = ARMV8_IMPDEF_PERFCTR_L1D_TLB_REFILL_RD, 151 [C(DTLB)][C(OP_WRITE)][C(RESULT_MISS)] = ARMV8_IMPDEF_PERFCTR_L1D_TLB_REFILL_WR, 152 153 [C(NODE)][C(OP_READ)][C(RESULT_ACCESS)] = ARMV8_IMPDEF_PERFCTR_BUS_ACCESS_RD, 154 [C(NODE)][C(OP_WRITE)][C(RESULT_ACCESS)] = ARMV8_IMPDEF_PERFCTR_BUS_ACCESS_WR, 155 }; 156 157 static ssize_t 158 armv8pmu_events_sysfs_show(struct device *dev, 159 struct device_attribute *attr, char *page) 160 { 161 struct perf_pmu_events_attr *pmu_attr; 162 163 pmu_attr = container_of(attr, struct perf_pmu_events_attr, attr); 164 165 return sprintf(page, "event=0x%04llx\n", pmu_attr->id); 166 } 167 168 #define ARMV8_EVENT_ATTR(name, config) \ 169 PMU_EVENT_ATTR_ID(name, armv8pmu_events_sysfs_show, config) 170 171 static struct attribute *armv8_pmuv3_event_attrs[] = { 172 ARMV8_EVENT_ATTR(sw_incr, ARMV8_PMUV3_PERFCTR_SW_INCR), 173 ARMV8_EVENT_ATTR(l1i_cache_refill, ARMV8_PMUV3_PERFCTR_L1I_CACHE_REFILL), 174 ARMV8_EVENT_ATTR(l1i_tlb_refill, ARMV8_PMUV3_PERFCTR_L1I_TLB_REFILL), 175 ARMV8_EVENT_ATTR(l1d_cache_refill, ARMV8_PMUV3_PERFCTR_L1D_CACHE_REFILL), 176 ARMV8_EVENT_ATTR(l1d_cache, ARMV8_PMUV3_PERFCTR_L1D_CACHE), 177 ARMV8_EVENT_ATTR(l1d_tlb_refill, ARMV8_PMUV3_PERFCTR_L1D_TLB_REFILL), 178 ARMV8_EVENT_ATTR(ld_retired, ARMV8_PMUV3_PERFCTR_LD_RETIRED), 179 ARMV8_EVENT_ATTR(st_retired, ARMV8_PMUV3_PERFCTR_ST_RETIRED), 180 ARMV8_EVENT_ATTR(inst_retired, ARMV8_PMUV3_PERFCTR_INST_RETIRED), 181 ARMV8_EVENT_ATTR(exc_taken, ARMV8_PMUV3_PERFCTR_EXC_TAKEN), 182 ARMV8_EVENT_ATTR(exc_return, ARMV8_PMUV3_PERFCTR_EXC_RETURN), 183 ARMV8_EVENT_ATTR(cid_write_retired, ARMV8_PMUV3_PERFCTR_CID_WRITE_RETIRED), 184 ARMV8_EVENT_ATTR(pc_write_retired, ARMV8_PMUV3_PERFCTR_PC_WRITE_RETIRED), 185 ARMV8_EVENT_ATTR(br_immed_retired, ARMV8_PMUV3_PERFCTR_BR_IMMED_RETIRED), 186 ARMV8_EVENT_ATTR(br_return_retired, ARMV8_PMUV3_PERFCTR_BR_RETURN_RETIRED), 187 ARMV8_EVENT_ATTR(unaligned_ldst_retired, ARMV8_PMUV3_PERFCTR_UNALIGNED_LDST_RETIRED), 188 ARMV8_EVENT_ATTR(br_mis_pred, ARMV8_PMUV3_PERFCTR_BR_MIS_PRED), 189 ARMV8_EVENT_ATTR(cpu_cycles, ARMV8_PMUV3_PERFCTR_CPU_CYCLES), 190 ARMV8_EVENT_ATTR(br_pred, ARMV8_PMUV3_PERFCTR_BR_PRED), 191 ARMV8_EVENT_ATTR(mem_access, ARMV8_PMUV3_PERFCTR_MEM_ACCESS), 192 ARMV8_EVENT_ATTR(l1i_cache, ARMV8_PMUV3_PERFCTR_L1I_CACHE), 193 ARMV8_EVENT_ATTR(l1d_cache_wb, ARMV8_PMUV3_PERFCTR_L1D_CACHE_WB), 194 ARMV8_EVENT_ATTR(l2d_cache, ARMV8_PMUV3_PERFCTR_L2D_CACHE), 195 ARMV8_EVENT_ATTR(l2d_cache_refill, ARMV8_PMUV3_PERFCTR_L2D_CACHE_REFILL), 196 ARMV8_EVENT_ATTR(l2d_cache_wb, ARMV8_PMUV3_PERFCTR_L2D_CACHE_WB), 197 ARMV8_EVENT_ATTR(bus_access, ARMV8_PMUV3_PERFCTR_BUS_ACCESS), 198 ARMV8_EVENT_ATTR(memory_error, ARMV8_PMUV3_PERFCTR_MEMORY_ERROR), 199 ARMV8_EVENT_ATTR(inst_spec, ARMV8_PMUV3_PERFCTR_INST_SPEC), 200 ARMV8_EVENT_ATTR(ttbr_write_retired, ARMV8_PMUV3_PERFCTR_TTBR_WRITE_RETIRED), 201 ARMV8_EVENT_ATTR(bus_cycles, ARMV8_PMUV3_PERFCTR_BUS_CYCLES), 202 /* Don't expose the chain event in /sys, since it's useless in isolation */ 203 ARMV8_EVENT_ATTR(l1d_cache_allocate, ARMV8_PMUV3_PERFCTR_L1D_CACHE_ALLOCATE), 204 ARMV8_EVENT_ATTR(l2d_cache_allocate, ARMV8_PMUV3_PERFCTR_L2D_CACHE_ALLOCATE), 205 ARMV8_EVENT_ATTR(br_retired, ARMV8_PMUV3_PERFCTR_BR_RETIRED), 206 ARMV8_EVENT_ATTR(br_mis_pred_retired, ARMV8_PMUV3_PERFCTR_BR_MIS_PRED_RETIRED), 207 ARMV8_EVENT_ATTR(stall_frontend, ARMV8_PMUV3_PERFCTR_STALL_FRONTEND), 208 ARMV8_EVENT_ATTR(stall_backend, ARMV8_PMUV3_PERFCTR_STALL_BACKEND), 209 ARMV8_EVENT_ATTR(l1d_tlb, ARMV8_PMUV3_PERFCTR_L1D_TLB), 210 ARMV8_EVENT_ATTR(l1i_tlb, ARMV8_PMUV3_PERFCTR_L1I_TLB), 211 ARMV8_EVENT_ATTR(l2i_cache, ARMV8_PMUV3_PERFCTR_L2I_CACHE), 212 ARMV8_EVENT_ATTR(l2i_cache_refill, ARMV8_PMUV3_PERFCTR_L2I_CACHE_REFILL), 213 ARMV8_EVENT_ATTR(l3d_cache_allocate, ARMV8_PMUV3_PERFCTR_L3D_CACHE_ALLOCATE), 214 ARMV8_EVENT_ATTR(l3d_cache_refill, ARMV8_PMUV3_PERFCTR_L3D_CACHE_REFILL), 215 ARMV8_EVENT_ATTR(l3d_cache, ARMV8_PMUV3_PERFCTR_L3D_CACHE), 216 ARMV8_EVENT_ATTR(l3d_cache_wb, ARMV8_PMUV3_PERFCTR_L3D_CACHE_WB), 217 ARMV8_EVENT_ATTR(l2d_tlb_refill, ARMV8_PMUV3_PERFCTR_L2D_TLB_REFILL), 218 ARMV8_EVENT_ATTR(l2i_tlb_refill, ARMV8_PMUV3_PERFCTR_L2I_TLB_REFILL), 219 ARMV8_EVENT_ATTR(l2d_tlb, ARMV8_PMUV3_PERFCTR_L2D_TLB), 220 ARMV8_EVENT_ATTR(l2i_tlb, ARMV8_PMUV3_PERFCTR_L2I_TLB), 221 ARMV8_EVENT_ATTR(remote_access, ARMV8_PMUV3_PERFCTR_REMOTE_ACCESS), 222 ARMV8_EVENT_ATTR(ll_cache, ARMV8_PMUV3_PERFCTR_LL_CACHE), 223 ARMV8_EVENT_ATTR(ll_cache_miss, ARMV8_PMUV3_PERFCTR_LL_CACHE_MISS), 224 ARMV8_EVENT_ATTR(dtlb_walk, ARMV8_PMUV3_PERFCTR_DTLB_WALK), 225 ARMV8_EVENT_ATTR(itlb_walk, ARMV8_PMUV3_PERFCTR_ITLB_WALK), 226 ARMV8_EVENT_ATTR(ll_cache_rd, ARMV8_PMUV3_PERFCTR_LL_CACHE_RD), 227 ARMV8_EVENT_ATTR(ll_cache_miss_rd, ARMV8_PMUV3_PERFCTR_LL_CACHE_MISS_RD), 228 ARMV8_EVENT_ATTR(remote_access_rd, ARMV8_PMUV3_PERFCTR_REMOTE_ACCESS_RD), 229 ARMV8_EVENT_ATTR(l1d_cache_lmiss_rd, ARMV8_PMUV3_PERFCTR_L1D_CACHE_LMISS_RD), 230 ARMV8_EVENT_ATTR(op_retired, ARMV8_PMUV3_PERFCTR_OP_RETIRED), 231 ARMV8_EVENT_ATTR(op_spec, ARMV8_PMUV3_PERFCTR_OP_SPEC), 232 ARMV8_EVENT_ATTR(stall, ARMV8_PMUV3_PERFCTR_STALL), 233 ARMV8_EVENT_ATTR(stall_slot_backend, ARMV8_PMUV3_PERFCTR_STALL_SLOT_BACKEND), 234 ARMV8_EVENT_ATTR(stall_slot_frontend, ARMV8_PMUV3_PERFCTR_STALL_SLOT_FRONTEND), 235 ARMV8_EVENT_ATTR(stall_slot, ARMV8_PMUV3_PERFCTR_STALL_SLOT), 236 ARMV8_EVENT_ATTR(sample_pop, ARMV8_SPE_PERFCTR_SAMPLE_POP), 237 ARMV8_EVENT_ATTR(sample_feed, ARMV8_SPE_PERFCTR_SAMPLE_FEED), 238 ARMV8_EVENT_ATTR(sample_filtrate, ARMV8_SPE_PERFCTR_SAMPLE_FILTRATE), 239 ARMV8_EVENT_ATTR(sample_collision, ARMV8_SPE_PERFCTR_SAMPLE_COLLISION), 240 ARMV8_EVENT_ATTR(cnt_cycles, ARMV8_AMU_PERFCTR_CNT_CYCLES), 241 ARMV8_EVENT_ATTR(stall_backend_mem, ARMV8_AMU_PERFCTR_STALL_BACKEND_MEM), 242 ARMV8_EVENT_ATTR(l1i_cache_lmiss, ARMV8_PMUV3_PERFCTR_L1I_CACHE_LMISS), 243 ARMV8_EVENT_ATTR(l2d_cache_lmiss_rd, ARMV8_PMUV3_PERFCTR_L2D_CACHE_LMISS_RD), 244 ARMV8_EVENT_ATTR(l2i_cache_lmiss, ARMV8_PMUV3_PERFCTR_L2I_CACHE_LMISS), 245 ARMV8_EVENT_ATTR(l3d_cache_lmiss_rd, ARMV8_PMUV3_PERFCTR_L3D_CACHE_LMISS_RD), 246 ARMV8_EVENT_ATTR(trb_wrap, ARMV8_PMUV3_PERFCTR_TRB_WRAP), 247 ARMV8_EVENT_ATTR(trb_trig, ARMV8_PMUV3_PERFCTR_TRB_TRIG), 248 ARMV8_EVENT_ATTR(trcextout0, ARMV8_PMUV3_PERFCTR_TRCEXTOUT0), 249 ARMV8_EVENT_ATTR(trcextout1, ARMV8_PMUV3_PERFCTR_TRCEXTOUT1), 250 ARMV8_EVENT_ATTR(trcextout2, ARMV8_PMUV3_PERFCTR_TRCEXTOUT2), 251 ARMV8_EVENT_ATTR(trcextout3, ARMV8_PMUV3_PERFCTR_TRCEXTOUT3), 252 ARMV8_EVENT_ATTR(cti_trigout4, ARMV8_PMUV3_PERFCTR_CTI_TRIGOUT4), 253 ARMV8_EVENT_ATTR(cti_trigout5, ARMV8_PMUV3_PERFCTR_CTI_TRIGOUT5), 254 ARMV8_EVENT_ATTR(cti_trigout6, ARMV8_PMUV3_PERFCTR_CTI_TRIGOUT6), 255 ARMV8_EVENT_ATTR(cti_trigout7, ARMV8_PMUV3_PERFCTR_CTI_TRIGOUT7), 256 ARMV8_EVENT_ATTR(ldst_align_lat, ARMV8_PMUV3_PERFCTR_LDST_ALIGN_LAT), 257 ARMV8_EVENT_ATTR(ld_align_lat, ARMV8_PMUV3_PERFCTR_LD_ALIGN_LAT), 258 ARMV8_EVENT_ATTR(st_align_lat, ARMV8_PMUV3_PERFCTR_ST_ALIGN_LAT), 259 ARMV8_EVENT_ATTR(mem_access_checked, ARMV8_MTE_PERFCTR_MEM_ACCESS_CHECKED), 260 ARMV8_EVENT_ATTR(mem_access_checked_rd, ARMV8_MTE_PERFCTR_MEM_ACCESS_CHECKED_RD), 261 ARMV8_EVENT_ATTR(mem_access_checked_wr, ARMV8_MTE_PERFCTR_MEM_ACCESS_CHECKED_WR), 262 NULL, 263 }; 264 265 static umode_t 266 armv8pmu_event_attr_is_visible(struct kobject *kobj, 267 struct attribute *attr, int unused) 268 { 269 struct device *dev = kobj_to_dev(kobj); 270 struct pmu *pmu = dev_get_drvdata(dev); 271 struct arm_pmu *cpu_pmu = container_of(pmu, struct arm_pmu, pmu); 272 struct perf_pmu_events_attr *pmu_attr; 273 274 pmu_attr = container_of(attr, struct perf_pmu_events_attr, attr.attr); 275 276 if (pmu_attr->id < ARMV8_PMUV3_MAX_COMMON_EVENTS && 277 test_bit(pmu_attr->id, cpu_pmu->pmceid_bitmap)) 278 return attr->mode; 279 280 if (pmu_attr->id >= ARMV8_PMUV3_EXT_COMMON_EVENT_BASE) { 281 u64 id = pmu_attr->id - ARMV8_PMUV3_EXT_COMMON_EVENT_BASE; 282 283 if (id < ARMV8_PMUV3_MAX_COMMON_EVENTS && 284 test_bit(id, cpu_pmu->pmceid_ext_bitmap)) 285 return attr->mode; 286 } 287 288 return 0; 289 } 290 291 static const struct attribute_group armv8_pmuv3_events_attr_group = { 292 .name = "events", 293 .attrs = armv8_pmuv3_event_attrs, 294 .is_visible = armv8pmu_event_attr_is_visible, 295 }; 296 297 PMU_FORMAT_ATTR(event, "config:0-15"); 298 PMU_FORMAT_ATTR(long, "config1:0"); 299 PMU_FORMAT_ATTR(rdpmc, "config1:1"); 300 301 static int sysctl_perf_user_access __read_mostly; 302 303 static inline bool armv8pmu_event_is_64bit(struct perf_event *event) 304 { 305 return event->attr.config1 & 0x1; 306 } 307 308 static inline bool armv8pmu_event_want_user_access(struct perf_event *event) 309 { 310 return event->attr.config1 & 0x2; 311 } 312 313 static struct attribute *armv8_pmuv3_format_attrs[] = { 314 &format_attr_event.attr, 315 &format_attr_long.attr, 316 &format_attr_rdpmc.attr, 317 NULL, 318 }; 319 320 static const struct attribute_group armv8_pmuv3_format_attr_group = { 321 .name = "format", 322 .attrs = armv8_pmuv3_format_attrs, 323 }; 324 325 static ssize_t slots_show(struct device *dev, struct device_attribute *attr, 326 char *page) 327 { 328 struct pmu *pmu = dev_get_drvdata(dev); 329 struct arm_pmu *cpu_pmu = container_of(pmu, struct arm_pmu, pmu); 330 u32 slots = cpu_pmu->reg_pmmir & ARMV8_PMU_SLOTS_MASK; 331 332 return sysfs_emit(page, "0x%08x\n", slots); 333 } 334 335 static DEVICE_ATTR_RO(slots); 336 337 static ssize_t bus_slots_show(struct device *dev, struct device_attribute *attr, 338 char *page) 339 { 340 struct pmu *pmu = dev_get_drvdata(dev); 341 struct arm_pmu *cpu_pmu = container_of(pmu, struct arm_pmu, pmu); 342 u32 bus_slots = (cpu_pmu->reg_pmmir >> ARMV8_PMU_BUS_SLOTS_SHIFT) 343 & ARMV8_PMU_BUS_SLOTS_MASK; 344 345 return sysfs_emit(page, "0x%08x\n", bus_slots); 346 } 347 348 static DEVICE_ATTR_RO(bus_slots); 349 350 static ssize_t bus_width_show(struct device *dev, struct device_attribute *attr, 351 char *page) 352 { 353 struct pmu *pmu = dev_get_drvdata(dev); 354 struct arm_pmu *cpu_pmu = container_of(pmu, struct arm_pmu, pmu); 355 u32 bus_width = (cpu_pmu->reg_pmmir >> ARMV8_PMU_BUS_WIDTH_SHIFT) 356 & ARMV8_PMU_BUS_WIDTH_MASK; 357 u32 val = 0; 358 359 /* Encoded as Log2(number of bytes), plus one */ 360 if (bus_width > 2 && bus_width < 13) 361 val = 1 << (bus_width - 1); 362 363 return sysfs_emit(page, "0x%08x\n", val); 364 } 365 366 static DEVICE_ATTR_RO(bus_width); 367 368 static struct attribute *armv8_pmuv3_caps_attrs[] = { 369 &dev_attr_slots.attr, 370 &dev_attr_bus_slots.attr, 371 &dev_attr_bus_width.attr, 372 NULL, 373 }; 374 375 static const struct attribute_group armv8_pmuv3_caps_attr_group = { 376 .name = "caps", 377 .attrs = armv8_pmuv3_caps_attrs, 378 }; 379 380 /* 381 * Perf Events' indices 382 */ 383 #define ARMV8_IDX_CYCLE_COUNTER 0 384 #define ARMV8_IDX_COUNTER0 1 385 #define ARMV8_IDX_CYCLE_COUNTER_USER 32 386 387 /* 388 * We unconditionally enable ARMv8.5-PMU long event counter support 389 * (64-bit events) where supported. Indicate if this arm_pmu has long 390 * event counter support. 391 */ 392 static bool armv8pmu_has_long_event(struct arm_pmu *cpu_pmu) 393 { 394 return (is_pmuv3p5(cpu_pmu->pmuver)); 395 } 396 397 static inline bool armv8pmu_event_has_user_read(struct perf_event *event) 398 { 399 return event->hw.flags & PERF_EVENT_FLAG_USER_READ_CNT; 400 } 401 402 /* 403 * We must chain two programmable counters for 64 bit events, 404 * except when we have allocated the 64bit cycle counter (for CPU 405 * cycles event) or when user space counter access is enabled. 406 */ 407 static inline bool armv8pmu_event_is_chained(struct perf_event *event) 408 { 409 int idx = event->hw.idx; 410 struct arm_pmu *cpu_pmu = to_arm_pmu(event->pmu); 411 412 return !armv8pmu_event_has_user_read(event) && 413 armv8pmu_event_is_64bit(event) && 414 !armv8pmu_has_long_event(cpu_pmu) && 415 (idx != ARMV8_IDX_CYCLE_COUNTER); 416 } 417 418 /* 419 * ARMv8 low level PMU access 420 */ 421 422 /* 423 * Perf Event to low level counters mapping 424 */ 425 #define ARMV8_IDX_TO_COUNTER(x) \ 426 (((x) - ARMV8_IDX_COUNTER0) & ARMV8_PMU_COUNTER_MASK) 427 428 static inline u32 armv8pmu_pmcr_read(void) 429 { 430 return read_pmcr(); 431 } 432 433 static inline void armv8pmu_pmcr_write(u32 val) 434 { 435 val &= ARMV8_PMU_PMCR_MASK; 436 isb(); 437 write_pmcr(val); 438 } 439 440 static inline int armv8pmu_has_overflowed(u32 pmovsr) 441 { 442 return pmovsr & ARMV8_PMU_OVERFLOWED_MASK; 443 } 444 445 static inline int armv8pmu_counter_has_overflowed(u32 pmnc, int idx) 446 { 447 return pmnc & BIT(ARMV8_IDX_TO_COUNTER(idx)); 448 } 449 450 static inline u64 armv8pmu_read_evcntr(int idx) 451 { 452 u32 counter = ARMV8_IDX_TO_COUNTER(idx); 453 454 return read_pmevcntrn(counter); 455 } 456 457 static inline u64 armv8pmu_read_hw_counter(struct perf_event *event) 458 { 459 int idx = event->hw.idx; 460 u64 val = armv8pmu_read_evcntr(idx); 461 462 if (armv8pmu_event_is_chained(event)) 463 val = (val << 32) | armv8pmu_read_evcntr(idx - 1); 464 return val; 465 } 466 467 /* 468 * The cycle counter is always a 64-bit counter. When ARMV8_PMU_PMCR_LP 469 * is set the event counters also become 64-bit counters. Unless the 470 * user has requested a long counter (attr.config1) then we want to 471 * interrupt upon 32-bit overflow - we achieve this by applying a bias. 472 */ 473 static bool armv8pmu_event_needs_bias(struct perf_event *event) 474 { 475 struct arm_pmu *cpu_pmu = to_arm_pmu(event->pmu); 476 struct hw_perf_event *hwc = &event->hw; 477 int idx = hwc->idx; 478 479 if (armv8pmu_event_is_64bit(event)) 480 return false; 481 482 if (armv8pmu_has_long_event(cpu_pmu) || 483 idx == ARMV8_IDX_CYCLE_COUNTER) 484 return true; 485 486 return false; 487 } 488 489 static u64 armv8pmu_bias_long_counter(struct perf_event *event, u64 value) 490 { 491 if (armv8pmu_event_needs_bias(event)) 492 value |= GENMASK(63, 32); 493 494 return value; 495 } 496 497 static u64 armv8pmu_unbias_long_counter(struct perf_event *event, u64 value) 498 { 499 if (armv8pmu_event_needs_bias(event)) 500 value &= ~GENMASK(63, 32); 501 502 return value; 503 } 504 505 static u64 armv8pmu_read_counter(struct perf_event *event) 506 { 507 struct hw_perf_event *hwc = &event->hw; 508 int idx = hwc->idx; 509 u64 value; 510 511 if (idx == ARMV8_IDX_CYCLE_COUNTER) 512 value = read_pmccntr(); 513 else 514 value = armv8pmu_read_hw_counter(event); 515 516 return armv8pmu_unbias_long_counter(event, value); 517 } 518 519 static inline void armv8pmu_write_evcntr(int idx, u64 value) 520 { 521 u32 counter = ARMV8_IDX_TO_COUNTER(idx); 522 523 write_pmevcntrn(counter, value); 524 } 525 526 static inline void armv8pmu_write_hw_counter(struct perf_event *event, 527 u64 value) 528 { 529 int idx = event->hw.idx; 530 531 if (armv8pmu_event_is_chained(event)) { 532 armv8pmu_write_evcntr(idx, upper_32_bits(value)); 533 armv8pmu_write_evcntr(idx - 1, lower_32_bits(value)); 534 } else { 535 armv8pmu_write_evcntr(idx, value); 536 } 537 } 538 539 static void armv8pmu_write_counter(struct perf_event *event, u64 value) 540 { 541 struct hw_perf_event *hwc = &event->hw; 542 int idx = hwc->idx; 543 544 value = armv8pmu_bias_long_counter(event, value); 545 546 if (idx == ARMV8_IDX_CYCLE_COUNTER) 547 write_pmccntr(value); 548 else 549 armv8pmu_write_hw_counter(event, value); 550 } 551 552 static inline void armv8pmu_write_evtype(int idx, u32 val) 553 { 554 u32 counter = ARMV8_IDX_TO_COUNTER(idx); 555 556 val &= ARMV8_PMU_EVTYPE_MASK; 557 write_pmevtypern(counter, val); 558 } 559 560 static inline void armv8pmu_write_event_type(struct perf_event *event) 561 { 562 struct hw_perf_event *hwc = &event->hw; 563 int idx = hwc->idx; 564 565 /* 566 * For chained events, the low counter is programmed to count 567 * the event of interest and the high counter is programmed 568 * with CHAIN event code with filters set to count at all ELs. 569 */ 570 if (armv8pmu_event_is_chained(event)) { 571 u32 chain_evt = ARMV8_PMUV3_PERFCTR_CHAIN | 572 ARMV8_PMU_INCLUDE_EL2; 573 574 armv8pmu_write_evtype(idx - 1, hwc->config_base); 575 armv8pmu_write_evtype(idx, chain_evt); 576 } else { 577 if (idx == ARMV8_IDX_CYCLE_COUNTER) 578 write_pmccfiltr(hwc->config_base); 579 else 580 armv8pmu_write_evtype(idx, hwc->config_base); 581 } 582 } 583 584 static u32 armv8pmu_event_cnten_mask(struct perf_event *event) 585 { 586 int counter = ARMV8_IDX_TO_COUNTER(event->hw.idx); 587 u32 mask = BIT(counter); 588 589 if (armv8pmu_event_is_chained(event)) 590 mask |= BIT(counter - 1); 591 return mask; 592 } 593 594 static inline void armv8pmu_enable_counter(u32 mask) 595 { 596 /* 597 * Make sure event configuration register writes are visible before we 598 * enable the counter. 599 * */ 600 isb(); 601 write_pmcntenset(mask); 602 } 603 604 static inline void armv8pmu_enable_event_counter(struct perf_event *event) 605 { 606 struct perf_event_attr *attr = &event->attr; 607 u32 mask = armv8pmu_event_cnten_mask(event); 608 609 kvm_set_pmu_events(mask, attr); 610 611 /* We rely on the hypervisor switch code to enable guest counters */ 612 if (!kvm_pmu_counter_deferred(attr)) 613 armv8pmu_enable_counter(mask); 614 } 615 616 static inline void armv8pmu_disable_counter(u32 mask) 617 { 618 write_pmcntenclr(mask); 619 /* 620 * Make sure the effects of disabling the counter are visible before we 621 * start configuring the event. 622 */ 623 isb(); 624 } 625 626 static inline void armv8pmu_disable_event_counter(struct perf_event *event) 627 { 628 struct perf_event_attr *attr = &event->attr; 629 u32 mask = armv8pmu_event_cnten_mask(event); 630 631 kvm_clr_pmu_events(mask); 632 633 /* We rely on the hypervisor switch code to disable guest counters */ 634 if (!kvm_pmu_counter_deferred(attr)) 635 armv8pmu_disable_counter(mask); 636 } 637 638 static inline void armv8pmu_enable_intens(u32 mask) 639 { 640 write_pmintenset(mask); 641 } 642 643 static inline void armv8pmu_enable_event_irq(struct perf_event *event) 644 { 645 u32 counter = ARMV8_IDX_TO_COUNTER(event->hw.idx); 646 armv8pmu_enable_intens(BIT(counter)); 647 } 648 649 static inline void armv8pmu_disable_intens(u32 mask) 650 { 651 write_pmintenclr(mask); 652 isb(); 653 /* Clear the overflow flag in case an interrupt is pending. */ 654 write_pmovsclr(mask); 655 isb(); 656 } 657 658 static inline void armv8pmu_disable_event_irq(struct perf_event *event) 659 { 660 u32 counter = ARMV8_IDX_TO_COUNTER(event->hw.idx); 661 armv8pmu_disable_intens(BIT(counter)); 662 } 663 664 static inline u32 armv8pmu_getreset_flags(void) 665 { 666 u32 value; 667 668 /* Read */ 669 value = read_pmovsclr(); 670 671 /* Write to clear flags */ 672 value &= ARMV8_PMU_OVSR_MASK; 673 write_pmovsclr(value); 674 675 return value; 676 } 677 678 static void armv8pmu_disable_user_access(void) 679 { 680 write_pmuserenr(0); 681 } 682 683 static void armv8pmu_enable_user_access(struct arm_pmu *cpu_pmu) 684 { 685 int i; 686 struct pmu_hw_events *cpuc = this_cpu_ptr(cpu_pmu->hw_events); 687 688 /* Clear any unused counters to avoid leaking their contents */ 689 for_each_clear_bit(i, cpuc->used_mask, cpu_pmu->num_events) { 690 if (i == ARMV8_IDX_CYCLE_COUNTER) 691 write_pmccntr(0); 692 else 693 armv8pmu_write_evcntr(i, 0); 694 } 695 696 write_pmuserenr(0); 697 write_pmuserenr(ARMV8_PMU_USERENR_ER | ARMV8_PMU_USERENR_CR); 698 } 699 700 static void armv8pmu_enable_event(struct perf_event *event) 701 { 702 /* 703 * Enable counter and interrupt, and set the counter to count 704 * the event that we're interested in. 705 */ 706 707 /* 708 * Disable counter 709 */ 710 armv8pmu_disable_event_counter(event); 711 712 /* 713 * Set event. 714 */ 715 armv8pmu_write_event_type(event); 716 717 /* 718 * Enable interrupt for this counter 719 */ 720 armv8pmu_enable_event_irq(event); 721 722 /* 723 * Enable counter 724 */ 725 armv8pmu_enable_event_counter(event); 726 } 727 728 static void armv8pmu_disable_event(struct perf_event *event) 729 { 730 /* 731 * Disable counter 732 */ 733 armv8pmu_disable_event_counter(event); 734 735 /* 736 * Disable interrupt for this counter 737 */ 738 armv8pmu_disable_event_irq(event); 739 } 740 741 static void armv8pmu_start(struct arm_pmu *cpu_pmu) 742 { 743 struct perf_event_context *ctx; 744 int nr_user = 0; 745 746 ctx = perf_cpu_task_ctx(); 747 if (ctx) 748 nr_user = ctx->nr_user; 749 750 if (sysctl_perf_user_access && nr_user) 751 armv8pmu_enable_user_access(cpu_pmu); 752 else 753 armv8pmu_disable_user_access(); 754 755 /* Enable all counters */ 756 armv8pmu_pmcr_write(armv8pmu_pmcr_read() | ARMV8_PMU_PMCR_E); 757 } 758 759 static void armv8pmu_stop(struct arm_pmu *cpu_pmu) 760 { 761 /* Disable all counters */ 762 armv8pmu_pmcr_write(armv8pmu_pmcr_read() & ~ARMV8_PMU_PMCR_E); 763 } 764 765 static irqreturn_t armv8pmu_handle_irq(struct arm_pmu *cpu_pmu) 766 { 767 u32 pmovsr; 768 struct perf_sample_data data; 769 struct pmu_hw_events *cpuc = this_cpu_ptr(cpu_pmu->hw_events); 770 struct pt_regs *regs; 771 int idx; 772 773 /* 774 * Get and reset the IRQ flags 775 */ 776 pmovsr = armv8pmu_getreset_flags(); 777 778 /* 779 * Did an overflow occur? 780 */ 781 if (!armv8pmu_has_overflowed(pmovsr)) 782 return IRQ_NONE; 783 784 /* 785 * Handle the counter(s) overflow(s) 786 */ 787 regs = get_irq_regs(); 788 789 /* 790 * Stop the PMU while processing the counter overflows 791 * to prevent skews in group events. 792 */ 793 armv8pmu_stop(cpu_pmu); 794 for (idx = 0; idx < cpu_pmu->num_events; ++idx) { 795 struct perf_event *event = cpuc->events[idx]; 796 struct hw_perf_event *hwc; 797 798 /* Ignore if we don't have an event. */ 799 if (!event) 800 continue; 801 802 /* 803 * We have a single interrupt for all counters. Check that 804 * each counter has overflowed before we process it. 805 */ 806 if (!armv8pmu_counter_has_overflowed(pmovsr, idx)) 807 continue; 808 809 hwc = &event->hw; 810 armpmu_event_update(event); 811 perf_sample_data_init(&data, 0, hwc->last_period); 812 if (!armpmu_event_set_period(event)) 813 continue; 814 815 /* 816 * Perf event overflow will queue the processing of the event as 817 * an irq_work which will be taken care of in the handling of 818 * IPI_IRQ_WORK. 819 */ 820 if (perf_event_overflow(event, &data, regs)) 821 cpu_pmu->disable(event); 822 } 823 armv8pmu_start(cpu_pmu); 824 825 return IRQ_HANDLED; 826 } 827 828 static int armv8pmu_get_single_idx(struct pmu_hw_events *cpuc, 829 struct arm_pmu *cpu_pmu) 830 { 831 int idx; 832 833 for (idx = ARMV8_IDX_COUNTER0; idx < cpu_pmu->num_events; idx++) { 834 if (!test_and_set_bit(idx, cpuc->used_mask)) 835 return idx; 836 } 837 return -EAGAIN; 838 } 839 840 static int armv8pmu_get_chain_idx(struct pmu_hw_events *cpuc, 841 struct arm_pmu *cpu_pmu) 842 { 843 int idx; 844 845 /* 846 * Chaining requires two consecutive event counters, where 847 * the lower idx must be even. 848 */ 849 for (idx = ARMV8_IDX_COUNTER0 + 1; idx < cpu_pmu->num_events; idx += 2) { 850 if (!test_and_set_bit(idx, cpuc->used_mask)) { 851 /* Check if the preceding even counter is available */ 852 if (!test_and_set_bit(idx - 1, cpuc->used_mask)) 853 return idx; 854 /* Release the Odd counter */ 855 clear_bit(idx, cpuc->used_mask); 856 } 857 } 858 return -EAGAIN; 859 } 860 861 static int armv8pmu_get_event_idx(struct pmu_hw_events *cpuc, 862 struct perf_event *event) 863 { 864 struct arm_pmu *cpu_pmu = to_arm_pmu(event->pmu); 865 struct hw_perf_event *hwc = &event->hw; 866 unsigned long evtype = hwc->config_base & ARMV8_PMU_EVTYPE_EVENT; 867 868 /* Always prefer to place a cycle counter into the cycle counter. */ 869 if (evtype == ARMV8_PMUV3_PERFCTR_CPU_CYCLES) { 870 if (!test_and_set_bit(ARMV8_IDX_CYCLE_COUNTER, cpuc->used_mask)) 871 return ARMV8_IDX_CYCLE_COUNTER; 872 else if (armv8pmu_event_is_64bit(event) && 873 armv8pmu_event_want_user_access(event) && 874 !armv8pmu_has_long_event(cpu_pmu)) 875 return -EAGAIN; 876 } 877 878 /* 879 * Otherwise use events counters 880 */ 881 if (armv8pmu_event_is_chained(event)) 882 return armv8pmu_get_chain_idx(cpuc, cpu_pmu); 883 else 884 return armv8pmu_get_single_idx(cpuc, cpu_pmu); 885 } 886 887 static void armv8pmu_clear_event_idx(struct pmu_hw_events *cpuc, 888 struct perf_event *event) 889 { 890 int idx = event->hw.idx; 891 892 clear_bit(idx, cpuc->used_mask); 893 if (armv8pmu_event_is_chained(event)) 894 clear_bit(idx - 1, cpuc->used_mask); 895 } 896 897 static int armv8pmu_user_event_idx(struct perf_event *event) 898 { 899 if (!sysctl_perf_user_access || !armv8pmu_event_has_user_read(event)) 900 return 0; 901 902 /* 903 * We remap the cycle counter index to 32 to 904 * match the offset applied to the rest of 905 * the counter indices. 906 */ 907 if (event->hw.idx == ARMV8_IDX_CYCLE_COUNTER) 908 return ARMV8_IDX_CYCLE_COUNTER_USER; 909 910 return event->hw.idx; 911 } 912 913 /* 914 * Add an event filter to a given event. 915 */ 916 static int armv8pmu_set_event_filter(struct hw_perf_event *event, 917 struct perf_event_attr *attr) 918 { 919 unsigned long config_base = 0; 920 921 if (attr->exclude_idle) 922 return -EPERM; 923 924 /* 925 * If we're running in hyp mode, then we *are* the hypervisor. 926 * Therefore we ignore exclude_hv in this configuration, since 927 * there's no hypervisor to sample anyway. This is consistent 928 * with other architectures (x86 and Power). 929 */ 930 if (is_kernel_in_hyp_mode()) { 931 if (!attr->exclude_kernel && !attr->exclude_host) 932 config_base |= ARMV8_PMU_INCLUDE_EL2; 933 if (attr->exclude_guest) 934 config_base |= ARMV8_PMU_EXCLUDE_EL1; 935 if (attr->exclude_host) 936 config_base |= ARMV8_PMU_EXCLUDE_EL0; 937 } else { 938 if (!attr->exclude_hv && !attr->exclude_host) 939 config_base |= ARMV8_PMU_INCLUDE_EL2; 940 } 941 942 /* 943 * Filter out !VHE kernels and guest kernels 944 */ 945 if (attr->exclude_kernel) 946 config_base |= ARMV8_PMU_EXCLUDE_EL1; 947 948 if (attr->exclude_user) 949 config_base |= ARMV8_PMU_EXCLUDE_EL0; 950 951 /* 952 * Install the filter into config_base as this is used to 953 * construct the event type. 954 */ 955 event->config_base = config_base; 956 957 return 0; 958 } 959 960 static void armv8pmu_reset(void *info) 961 { 962 struct arm_pmu *cpu_pmu = (struct arm_pmu *)info; 963 u32 pmcr; 964 965 /* The counter and interrupt enable registers are unknown at reset. */ 966 armv8pmu_disable_counter(U32_MAX); 967 armv8pmu_disable_intens(U32_MAX); 968 969 /* Clear the counters we flip at guest entry/exit */ 970 kvm_clr_pmu_events(U32_MAX); 971 972 /* 973 * Initialize & Reset PMNC. Request overflow interrupt for 974 * 64 bit cycle counter but cheat in armv8pmu_write_counter(). 975 */ 976 pmcr = ARMV8_PMU_PMCR_P | ARMV8_PMU_PMCR_C | ARMV8_PMU_PMCR_LC; 977 978 /* Enable long event counter support where available */ 979 if (armv8pmu_has_long_event(cpu_pmu)) 980 pmcr |= ARMV8_PMU_PMCR_LP; 981 982 armv8pmu_pmcr_write(pmcr); 983 } 984 985 static int __armv8_pmuv3_map_event(struct perf_event *event, 986 const unsigned (*extra_event_map) 987 [PERF_COUNT_HW_MAX], 988 const unsigned (*extra_cache_map) 989 [PERF_COUNT_HW_CACHE_MAX] 990 [PERF_COUNT_HW_CACHE_OP_MAX] 991 [PERF_COUNT_HW_CACHE_RESULT_MAX]) 992 { 993 int hw_event_id; 994 struct arm_pmu *armpmu = to_arm_pmu(event->pmu); 995 996 hw_event_id = armpmu_map_event(event, &armv8_pmuv3_perf_map, 997 &armv8_pmuv3_perf_cache_map, 998 ARMV8_PMU_EVTYPE_EVENT); 999 1000 /* 1001 * CHAIN events only work when paired with an adjacent counter, and it 1002 * never makes sense for a user to open one in isolation, as they'll be 1003 * rotated arbitrarily. 1004 */ 1005 if (hw_event_id == ARMV8_PMUV3_PERFCTR_CHAIN) 1006 return -EINVAL; 1007 1008 if (armv8pmu_event_is_64bit(event)) 1009 event->hw.flags |= ARMPMU_EVT_64BIT; 1010 1011 /* 1012 * User events must be allocated into a single counter, and so 1013 * must not be chained. 1014 * 1015 * Most 64-bit events require long counter support, but 64-bit 1016 * CPU_CYCLES events can be placed into the dedicated cycle 1017 * counter when this is free. 1018 */ 1019 if (armv8pmu_event_want_user_access(event)) { 1020 if (!(event->attach_state & PERF_ATTACH_TASK)) 1021 return -EINVAL; 1022 if (armv8pmu_event_is_64bit(event) && 1023 (hw_event_id != ARMV8_PMUV3_PERFCTR_CPU_CYCLES) && 1024 !armv8pmu_has_long_event(armpmu)) 1025 return -EOPNOTSUPP; 1026 1027 event->hw.flags |= PERF_EVENT_FLAG_USER_READ_CNT; 1028 } 1029 1030 /* Only expose micro/arch events supported by this PMU */ 1031 if ((hw_event_id > 0) && (hw_event_id < ARMV8_PMUV3_MAX_COMMON_EVENTS) 1032 && test_bit(hw_event_id, armpmu->pmceid_bitmap)) { 1033 return hw_event_id; 1034 } 1035 1036 return armpmu_map_event(event, extra_event_map, extra_cache_map, 1037 ARMV8_PMU_EVTYPE_EVENT); 1038 } 1039 1040 static int armv8_pmuv3_map_event(struct perf_event *event) 1041 { 1042 return __armv8_pmuv3_map_event(event, NULL, NULL); 1043 } 1044 1045 static int armv8_a53_map_event(struct perf_event *event) 1046 { 1047 return __armv8_pmuv3_map_event(event, NULL, &armv8_a53_perf_cache_map); 1048 } 1049 1050 static int armv8_a57_map_event(struct perf_event *event) 1051 { 1052 return __armv8_pmuv3_map_event(event, NULL, &armv8_a57_perf_cache_map); 1053 } 1054 1055 static int armv8_a73_map_event(struct perf_event *event) 1056 { 1057 return __armv8_pmuv3_map_event(event, NULL, &armv8_a73_perf_cache_map); 1058 } 1059 1060 static int armv8_thunder_map_event(struct perf_event *event) 1061 { 1062 return __armv8_pmuv3_map_event(event, NULL, 1063 &armv8_thunder_perf_cache_map); 1064 } 1065 1066 static int armv8_vulcan_map_event(struct perf_event *event) 1067 { 1068 return __armv8_pmuv3_map_event(event, NULL, 1069 &armv8_vulcan_perf_cache_map); 1070 } 1071 1072 struct armv8pmu_probe_info { 1073 struct arm_pmu *pmu; 1074 bool present; 1075 }; 1076 1077 static void __armv8pmu_probe_pmu(void *info) 1078 { 1079 struct armv8pmu_probe_info *probe = info; 1080 struct arm_pmu *cpu_pmu = probe->pmu; 1081 u64 pmceid_raw[2]; 1082 u32 pmceid[2]; 1083 int pmuver; 1084 1085 pmuver = read_pmuver(); 1086 if (!pmuv3_implemented(pmuver)) 1087 return; 1088 1089 cpu_pmu->pmuver = pmuver; 1090 probe->present = true; 1091 1092 /* Read the nb of CNTx counters supported from PMNC */ 1093 cpu_pmu->num_events = (armv8pmu_pmcr_read() >> ARMV8_PMU_PMCR_N_SHIFT) 1094 & ARMV8_PMU_PMCR_N_MASK; 1095 1096 /* Add the CPU cycles counter */ 1097 cpu_pmu->num_events += 1; 1098 1099 pmceid[0] = pmceid_raw[0] = read_pmceid0(); 1100 pmceid[1] = pmceid_raw[1] = read_pmceid1(); 1101 1102 bitmap_from_arr32(cpu_pmu->pmceid_bitmap, 1103 pmceid, ARMV8_PMUV3_MAX_COMMON_EVENTS); 1104 1105 pmceid[0] = pmceid_raw[0] >> 32; 1106 pmceid[1] = pmceid_raw[1] >> 32; 1107 1108 bitmap_from_arr32(cpu_pmu->pmceid_ext_bitmap, 1109 pmceid, ARMV8_PMUV3_MAX_COMMON_EVENTS); 1110 1111 /* store PMMIR register for sysfs */ 1112 if (is_pmuv3p4(pmuver) && (pmceid_raw[1] & BIT(31))) 1113 cpu_pmu->reg_pmmir = read_pmmir(); 1114 else 1115 cpu_pmu->reg_pmmir = 0; 1116 } 1117 1118 static int armv8pmu_probe_pmu(struct arm_pmu *cpu_pmu) 1119 { 1120 struct armv8pmu_probe_info probe = { 1121 .pmu = cpu_pmu, 1122 .present = false, 1123 }; 1124 int ret; 1125 1126 ret = smp_call_function_any(&cpu_pmu->supported_cpus, 1127 __armv8pmu_probe_pmu, 1128 &probe, 1); 1129 if (ret) 1130 return ret; 1131 1132 return probe.present ? 0 : -ENODEV; 1133 } 1134 1135 static void armv8pmu_disable_user_access_ipi(void *unused) 1136 { 1137 armv8pmu_disable_user_access(); 1138 } 1139 1140 static int armv8pmu_proc_user_access_handler(struct ctl_table *table, int write, 1141 void *buffer, size_t *lenp, loff_t *ppos) 1142 { 1143 int ret = proc_dointvec_minmax(table, write, buffer, lenp, ppos); 1144 if (ret || !write || sysctl_perf_user_access) 1145 return ret; 1146 1147 on_each_cpu(armv8pmu_disable_user_access_ipi, NULL, 1); 1148 return 0; 1149 } 1150 1151 static struct ctl_table armv8_pmu_sysctl_table[] = { 1152 { 1153 .procname = "perf_user_access", 1154 .data = &sysctl_perf_user_access, 1155 .maxlen = sizeof(unsigned int), 1156 .mode = 0644, 1157 .proc_handler = armv8pmu_proc_user_access_handler, 1158 .extra1 = SYSCTL_ZERO, 1159 .extra2 = SYSCTL_ONE, 1160 }, 1161 { } 1162 }; 1163 1164 static void armv8_pmu_register_sysctl_table(void) 1165 { 1166 static u32 tbl_registered = 0; 1167 1168 if (!cmpxchg_relaxed(&tbl_registered, 0, 1)) 1169 register_sysctl("kernel", armv8_pmu_sysctl_table); 1170 } 1171 1172 static int armv8_pmu_init(struct arm_pmu *cpu_pmu, char *name, 1173 int (*map_event)(struct perf_event *event), 1174 const struct attribute_group *events, 1175 const struct attribute_group *format, 1176 const struct attribute_group *caps) 1177 { 1178 int ret = armv8pmu_probe_pmu(cpu_pmu); 1179 if (ret) 1180 return ret; 1181 1182 cpu_pmu->handle_irq = armv8pmu_handle_irq; 1183 cpu_pmu->enable = armv8pmu_enable_event; 1184 cpu_pmu->disable = armv8pmu_disable_event; 1185 cpu_pmu->read_counter = armv8pmu_read_counter; 1186 cpu_pmu->write_counter = armv8pmu_write_counter; 1187 cpu_pmu->get_event_idx = armv8pmu_get_event_idx; 1188 cpu_pmu->clear_event_idx = armv8pmu_clear_event_idx; 1189 cpu_pmu->start = armv8pmu_start; 1190 cpu_pmu->stop = armv8pmu_stop; 1191 cpu_pmu->reset = armv8pmu_reset; 1192 cpu_pmu->set_event_filter = armv8pmu_set_event_filter; 1193 1194 cpu_pmu->pmu.event_idx = armv8pmu_user_event_idx; 1195 1196 cpu_pmu->name = name; 1197 cpu_pmu->map_event = map_event; 1198 cpu_pmu->attr_groups[ARMPMU_ATTR_GROUP_EVENTS] = events ? 1199 events : &armv8_pmuv3_events_attr_group; 1200 cpu_pmu->attr_groups[ARMPMU_ATTR_GROUP_FORMATS] = format ? 1201 format : &armv8_pmuv3_format_attr_group; 1202 cpu_pmu->attr_groups[ARMPMU_ATTR_GROUP_CAPS] = caps ? 1203 caps : &armv8_pmuv3_caps_attr_group; 1204 1205 armv8_pmu_register_sysctl_table(); 1206 return 0; 1207 } 1208 1209 static int armv8_pmu_init_nogroups(struct arm_pmu *cpu_pmu, char *name, 1210 int (*map_event)(struct perf_event *event)) 1211 { 1212 return armv8_pmu_init(cpu_pmu, name, map_event, NULL, NULL, NULL); 1213 } 1214 1215 #define PMUV3_INIT_SIMPLE(name) \ 1216 static int name##_pmu_init(struct arm_pmu *cpu_pmu) \ 1217 { \ 1218 return armv8_pmu_init_nogroups(cpu_pmu, #name, armv8_pmuv3_map_event);\ 1219 } 1220 1221 PMUV3_INIT_SIMPLE(armv8_pmuv3) 1222 1223 PMUV3_INIT_SIMPLE(armv8_cortex_a34) 1224 PMUV3_INIT_SIMPLE(armv8_cortex_a55) 1225 PMUV3_INIT_SIMPLE(armv8_cortex_a65) 1226 PMUV3_INIT_SIMPLE(armv8_cortex_a75) 1227 PMUV3_INIT_SIMPLE(armv8_cortex_a76) 1228 PMUV3_INIT_SIMPLE(armv8_cortex_a77) 1229 PMUV3_INIT_SIMPLE(armv8_cortex_a78) 1230 PMUV3_INIT_SIMPLE(armv9_cortex_a510) 1231 PMUV3_INIT_SIMPLE(armv9_cortex_a710) 1232 PMUV3_INIT_SIMPLE(armv8_cortex_x1) 1233 PMUV3_INIT_SIMPLE(armv9_cortex_x2) 1234 PMUV3_INIT_SIMPLE(armv8_neoverse_e1) 1235 PMUV3_INIT_SIMPLE(armv8_neoverse_n1) 1236 PMUV3_INIT_SIMPLE(armv9_neoverse_n2) 1237 PMUV3_INIT_SIMPLE(armv8_neoverse_v1) 1238 1239 PMUV3_INIT_SIMPLE(armv8_nvidia_carmel) 1240 PMUV3_INIT_SIMPLE(armv8_nvidia_denver) 1241 1242 static int armv8_a35_pmu_init(struct arm_pmu *cpu_pmu) 1243 { 1244 return armv8_pmu_init_nogroups(cpu_pmu, "armv8_cortex_a35", 1245 armv8_a53_map_event); 1246 } 1247 1248 static int armv8_a53_pmu_init(struct arm_pmu *cpu_pmu) 1249 { 1250 return armv8_pmu_init_nogroups(cpu_pmu, "armv8_cortex_a53", 1251 armv8_a53_map_event); 1252 } 1253 1254 static int armv8_a57_pmu_init(struct arm_pmu *cpu_pmu) 1255 { 1256 return armv8_pmu_init_nogroups(cpu_pmu, "armv8_cortex_a57", 1257 armv8_a57_map_event); 1258 } 1259 1260 static int armv8_a72_pmu_init(struct arm_pmu *cpu_pmu) 1261 { 1262 return armv8_pmu_init_nogroups(cpu_pmu, "armv8_cortex_a72", 1263 armv8_a57_map_event); 1264 } 1265 1266 static int armv8_a73_pmu_init(struct arm_pmu *cpu_pmu) 1267 { 1268 return armv8_pmu_init_nogroups(cpu_pmu, "armv8_cortex_a73", 1269 armv8_a73_map_event); 1270 } 1271 1272 static int armv8_thunder_pmu_init(struct arm_pmu *cpu_pmu) 1273 { 1274 return armv8_pmu_init_nogroups(cpu_pmu, "armv8_cavium_thunder", 1275 armv8_thunder_map_event); 1276 } 1277 1278 static int armv8_vulcan_pmu_init(struct arm_pmu *cpu_pmu) 1279 { 1280 return armv8_pmu_init_nogroups(cpu_pmu, "armv8_brcm_vulcan", 1281 armv8_vulcan_map_event); 1282 } 1283 1284 static const struct of_device_id armv8_pmu_of_device_ids[] = { 1285 {.compatible = "arm,armv8-pmuv3", .data = armv8_pmuv3_pmu_init}, 1286 {.compatible = "arm,cortex-a34-pmu", .data = armv8_cortex_a34_pmu_init}, 1287 {.compatible = "arm,cortex-a35-pmu", .data = armv8_a35_pmu_init}, 1288 {.compatible = "arm,cortex-a53-pmu", .data = armv8_a53_pmu_init}, 1289 {.compatible = "arm,cortex-a55-pmu", .data = armv8_cortex_a55_pmu_init}, 1290 {.compatible = "arm,cortex-a57-pmu", .data = armv8_a57_pmu_init}, 1291 {.compatible = "arm,cortex-a65-pmu", .data = armv8_cortex_a65_pmu_init}, 1292 {.compatible = "arm,cortex-a72-pmu", .data = armv8_a72_pmu_init}, 1293 {.compatible = "arm,cortex-a73-pmu", .data = armv8_a73_pmu_init}, 1294 {.compatible = "arm,cortex-a75-pmu", .data = armv8_cortex_a75_pmu_init}, 1295 {.compatible = "arm,cortex-a76-pmu", .data = armv8_cortex_a76_pmu_init}, 1296 {.compatible = "arm,cortex-a77-pmu", .data = armv8_cortex_a77_pmu_init}, 1297 {.compatible = "arm,cortex-a78-pmu", .data = armv8_cortex_a78_pmu_init}, 1298 {.compatible = "arm,cortex-a510-pmu", .data = armv9_cortex_a510_pmu_init}, 1299 {.compatible = "arm,cortex-a710-pmu", .data = armv9_cortex_a710_pmu_init}, 1300 {.compatible = "arm,cortex-x1-pmu", .data = armv8_cortex_x1_pmu_init}, 1301 {.compatible = "arm,cortex-x2-pmu", .data = armv9_cortex_x2_pmu_init}, 1302 {.compatible = "arm,neoverse-e1-pmu", .data = armv8_neoverse_e1_pmu_init}, 1303 {.compatible = "arm,neoverse-n1-pmu", .data = armv8_neoverse_n1_pmu_init}, 1304 {.compatible = "arm,neoverse-n2-pmu", .data = armv9_neoverse_n2_pmu_init}, 1305 {.compatible = "arm,neoverse-v1-pmu", .data = armv8_neoverse_v1_pmu_init}, 1306 {.compatible = "cavium,thunder-pmu", .data = armv8_thunder_pmu_init}, 1307 {.compatible = "brcm,vulcan-pmu", .data = armv8_vulcan_pmu_init}, 1308 {.compatible = "nvidia,carmel-pmu", .data = armv8_nvidia_carmel_pmu_init}, 1309 {.compatible = "nvidia,denver-pmu", .data = armv8_nvidia_denver_pmu_init}, 1310 {}, 1311 }; 1312 1313 static int armv8_pmu_device_probe(struct platform_device *pdev) 1314 { 1315 return arm_pmu_device_probe(pdev, armv8_pmu_of_device_ids, NULL); 1316 } 1317 1318 static struct platform_driver armv8_pmu_driver = { 1319 .driver = { 1320 .name = ARMV8_PMU_PDEV_NAME, 1321 .of_match_table = armv8_pmu_of_device_ids, 1322 .suppress_bind_attrs = true, 1323 }, 1324 .probe = armv8_pmu_device_probe, 1325 }; 1326 1327 static int __init armv8_pmu_driver_init(void) 1328 { 1329 if (acpi_disabled) 1330 return platform_driver_register(&armv8_pmu_driver); 1331 else 1332 return arm_pmu_acpi_probe(armv8_pmuv3_pmu_init); 1333 } 1334 device_initcall(armv8_pmu_driver_init) 1335 1336 void arch_perf_update_userpage(struct perf_event *event, 1337 struct perf_event_mmap_page *userpg, u64 now) 1338 { 1339 struct clock_read_data *rd; 1340 unsigned int seq; 1341 u64 ns; 1342 1343 userpg->cap_user_time = 0; 1344 userpg->cap_user_time_zero = 0; 1345 userpg->cap_user_time_short = 0; 1346 userpg->cap_user_rdpmc = armv8pmu_event_has_user_read(event); 1347 1348 if (userpg->cap_user_rdpmc) { 1349 if (event->hw.flags & ARMPMU_EVT_64BIT) 1350 userpg->pmc_width = 64; 1351 else 1352 userpg->pmc_width = 32; 1353 } 1354 1355 do { 1356 rd = sched_clock_read_begin(&seq); 1357 1358 if (rd->read_sched_clock != arch_timer_read_counter) 1359 return; 1360 1361 userpg->time_mult = rd->mult; 1362 userpg->time_shift = rd->shift; 1363 userpg->time_zero = rd->epoch_ns; 1364 userpg->time_cycles = rd->epoch_cyc; 1365 userpg->time_mask = rd->sched_clock_mask; 1366 1367 /* 1368 * Subtract the cycle base, such that software that 1369 * doesn't know about cap_user_time_short still 'works' 1370 * assuming no wraps. 1371 */ 1372 ns = mul_u64_u32_shr(rd->epoch_cyc, rd->mult, rd->shift); 1373 userpg->time_zero -= ns; 1374 1375 } while (sched_clock_read_retry(seq)); 1376 1377 userpg->time_offset = userpg->time_zero - now; 1378 1379 /* 1380 * time_shift is not expected to be greater than 31 due to 1381 * the original published conversion algorithm shifting a 1382 * 32-bit value (now specifies a 64-bit value) - refer 1383 * perf_event_mmap_page documentation in perf_event.h. 1384 */ 1385 if (userpg->time_shift == 32) { 1386 userpg->time_shift = 31; 1387 userpg->time_mult >>= 1; 1388 } 1389 1390 /* 1391 * Internal timekeeping for enabled/running/stopped times 1392 * is always computed with the sched_clock. 1393 */ 1394 userpg->cap_user_time = 1; 1395 userpg->cap_user_time_zero = 1; 1396 userpg->cap_user_time_short = 1; 1397 } 1398