1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* 3 * Performance event support - PPC 8xx 4 * 5 * Copyright 2016 Christophe Leroy, CS Systemes d'Information 6 */ 7 8 #include <linux/kernel.h> 9 #include <linux/sched.h> 10 #include <linux/perf_event.h> 11 #include <linux/percpu.h> 12 #include <linux/hardirq.h> 13 #include <asm/pmc.h> 14 #include <asm/machdep.h> 15 #include <asm/firmware.h> 16 #include <asm/ptrace.h> 17 #include <asm/code-patching.h> 18 19 #define PERF_8xx_ID_CPU_CYCLES 1 20 #define PERF_8xx_ID_HW_INSTRUCTIONS 2 21 #define PERF_8xx_ID_ITLB_LOAD_MISS 3 22 #define PERF_8xx_ID_DTLB_LOAD_MISS 4 23 24 #define C(x) PERF_COUNT_HW_CACHE_##x 25 #define DTLB_LOAD_MISS (C(DTLB) | (C(OP_READ) << 8) | (C(RESULT_MISS) << 16)) 26 #define ITLB_LOAD_MISS (C(ITLB) | (C(OP_READ) << 8) | (C(RESULT_MISS) << 16)) 27 28 extern unsigned long itlb_miss_counter, dtlb_miss_counter; 29 extern atomic_t instruction_counter; 30 31 static atomic_t insn_ctr_ref; 32 static atomic_t itlb_miss_ref; 33 static atomic_t dtlb_miss_ref; 34 35 static s64 get_insn_ctr(void) 36 { 37 int ctr; 38 unsigned long counta; 39 40 do { 41 ctr = atomic_read(&instruction_counter); 42 counta = mfspr(SPRN_COUNTA); 43 } while (ctr != atomic_read(&instruction_counter)); 44 45 return ((s64)ctr << 16) | (counta >> 16); 46 } 47 48 static int event_type(struct perf_event *event) 49 { 50 switch (event->attr.type) { 51 case PERF_TYPE_HARDWARE: 52 if (event->attr.config == PERF_COUNT_HW_CPU_CYCLES) 53 return PERF_8xx_ID_CPU_CYCLES; 54 if (event->attr.config == PERF_COUNT_HW_INSTRUCTIONS) 55 return PERF_8xx_ID_HW_INSTRUCTIONS; 56 break; 57 case PERF_TYPE_HW_CACHE: 58 if (event->attr.config == ITLB_LOAD_MISS) 59 return PERF_8xx_ID_ITLB_LOAD_MISS; 60 if (event->attr.config == DTLB_LOAD_MISS) 61 return PERF_8xx_ID_DTLB_LOAD_MISS; 62 break; 63 case PERF_TYPE_RAW: 64 break; 65 default: 66 return -ENOENT; 67 } 68 return -EOPNOTSUPP; 69 } 70 71 static int mpc8xx_pmu_event_init(struct perf_event *event) 72 { 73 int type = event_type(event); 74 75 if (type < 0) 76 return type; 77 return 0; 78 } 79 80 static int mpc8xx_pmu_add(struct perf_event *event, int flags) 81 { 82 int type = event_type(event); 83 s64 val = 0; 84 85 if (type < 0) 86 return type; 87 88 switch (type) { 89 case PERF_8xx_ID_CPU_CYCLES: 90 val = get_tb(); 91 break; 92 case PERF_8xx_ID_HW_INSTRUCTIONS: 93 if (atomic_inc_return(&insn_ctr_ref) == 1) 94 mtspr(SPRN_ICTRL, 0xc0080007); 95 val = get_insn_ctr(); 96 break; 97 case PERF_8xx_ID_ITLB_LOAD_MISS: 98 if (atomic_inc_return(&itlb_miss_ref) == 1) { 99 unsigned long target = patch_site_addr(&patch__itlbmiss_perf); 100 101 patch_branch_site(&patch__itlbmiss_exit_1, target, 0); 102 #ifndef CONFIG_PIN_TLB_TEXT 103 patch_branch_site(&patch__itlbmiss_exit_2, target, 0); 104 #endif 105 } 106 val = itlb_miss_counter; 107 break; 108 case PERF_8xx_ID_DTLB_LOAD_MISS: 109 if (atomic_inc_return(&dtlb_miss_ref) == 1) { 110 unsigned long target = patch_site_addr(&patch__dtlbmiss_perf); 111 112 patch_branch_site(&patch__dtlbmiss_exit_1, target, 0); 113 patch_branch_site(&patch__dtlbmiss_exit_2, target, 0); 114 patch_branch_site(&patch__dtlbmiss_exit_3, target, 0); 115 } 116 val = dtlb_miss_counter; 117 break; 118 } 119 local64_set(&event->hw.prev_count, val); 120 return 0; 121 } 122 123 static void mpc8xx_pmu_read(struct perf_event *event) 124 { 125 int type = event_type(event); 126 s64 prev, val = 0, delta = 0; 127 128 if (type < 0) 129 return; 130 131 do { 132 prev = local64_read(&event->hw.prev_count); 133 switch (type) { 134 case PERF_8xx_ID_CPU_CYCLES: 135 val = get_tb(); 136 delta = 16 * (val - prev); 137 break; 138 case PERF_8xx_ID_HW_INSTRUCTIONS: 139 val = get_insn_ctr(); 140 delta = prev - val; 141 if (delta < 0) 142 delta += 0x1000000000000LL; 143 break; 144 case PERF_8xx_ID_ITLB_LOAD_MISS: 145 val = itlb_miss_counter; 146 delta = (s64)((s32)val - (s32)prev); 147 break; 148 case PERF_8xx_ID_DTLB_LOAD_MISS: 149 val = dtlb_miss_counter; 150 delta = (s64)((s32)val - (s32)prev); 151 break; 152 } 153 } while (local64_cmpxchg(&event->hw.prev_count, prev, val) != prev); 154 155 local64_add(delta, &event->count); 156 } 157 158 static void mpc8xx_pmu_del(struct perf_event *event, int flags) 159 { 160 /* mfspr r10, SPRN_SPRG_SCRATCH0 */ 161 unsigned int insn = PPC_INST_MFSPR | __PPC_RS(R10) | 162 __PPC_SPR(SPRN_SPRG_SCRATCH0); 163 164 mpc8xx_pmu_read(event); 165 166 /* If it was the last user, stop counting to avoid useles overhead */ 167 switch (event_type(event)) { 168 case PERF_8xx_ID_CPU_CYCLES: 169 break; 170 case PERF_8xx_ID_HW_INSTRUCTIONS: 171 if (atomic_dec_return(&insn_ctr_ref) == 0) 172 mtspr(SPRN_ICTRL, 7); 173 break; 174 case PERF_8xx_ID_ITLB_LOAD_MISS: 175 if (atomic_dec_return(&itlb_miss_ref) == 0) { 176 patch_instruction_site(&patch__itlbmiss_exit_1, insn); 177 #ifndef CONFIG_PIN_TLB_TEXT 178 patch_instruction_site(&patch__itlbmiss_exit_2, insn); 179 #endif 180 } 181 break; 182 case PERF_8xx_ID_DTLB_LOAD_MISS: 183 if (atomic_dec_return(&dtlb_miss_ref) == 0) { 184 patch_instruction_site(&patch__dtlbmiss_exit_1, insn); 185 patch_instruction_site(&patch__dtlbmiss_exit_2, insn); 186 patch_instruction_site(&patch__dtlbmiss_exit_3, insn); 187 } 188 break; 189 } 190 } 191 192 static struct pmu mpc8xx_pmu = { 193 .event_init = mpc8xx_pmu_event_init, 194 .add = mpc8xx_pmu_add, 195 .del = mpc8xx_pmu_del, 196 .read = mpc8xx_pmu_read, 197 .capabilities = PERF_PMU_CAP_NO_INTERRUPT | 198 PERF_PMU_CAP_NO_NMI, 199 }; 200 201 static int init_mpc8xx_pmu(void) 202 { 203 mtspr(SPRN_ICTRL, 7); 204 mtspr(SPRN_CMPA, 0); 205 mtspr(SPRN_COUNTA, 0xffff); 206 207 return perf_pmu_register(&mpc8xx_pmu, "cpu", PERF_TYPE_RAW); 208 } 209 210 early_initcall(init_mpc8xx_pmu); 211