1 /* 2 * PMU emulation helpers for TCG IBM POWER chips 3 * 4 * Copyright IBM Corp. 2021 5 * 6 * Authors: 7 * Daniel Henrique Barboza <danielhb413@gmail.com> 8 * 9 * This work is licensed under the terms of the GNU GPL, version 2 or later. 10 * See the COPYING file in the top-level directory. 11 */ 12 13 #include "qemu/osdep.h" 14 15 #include "power8-pmu.h" 16 #include "cpu.h" 17 #include "helper_regs.h" 18 #include "exec/exec-all.h" 19 #include "exec/helper-proto.h" 20 #include "qemu/error-report.h" 21 #include "qemu/main-loop.h" 22 #include "hw/ppc/ppc.h" 23 24 #if defined(TARGET_PPC64) && !defined(CONFIG_USER_ONLY) 25 26 #define PMC_COUNTER_NEGATIVE_VAL 0x80000000UL 27 28 static bool pmc_is_inactive(CPUPPCState *env, int sprn) 29 { 30 if (env->spr[SPR_POWER_MMCR0] & MMCR0_FC) { 31 return true; 32 } 33 34 if (sprn < SPR_POWER_PMC5) { 35 return env->spr[SPR_POWER_MMCR0] & MMCR0_FC14; 36 } 37 38 return env->spr[SPR_POWER_MMCR0] & MMCR0_FC56; 39 } 40 41 static bool pmc_has_overflow_enabled(CPUPPCState *env, int sprn) 42 { 43 if (sprn == SPR_POWER_PMC1) { 44 return env->spr[SPR_POWER_MMCR0] & MMCR0_PMC1CE; 45 } 46 47 return env->spr[SPR_POWER_MMCR0] & MMCR0_PMCjCE; 48 } 49 50 /* 51 * For PMCs 1-4, IBM POWER chips has support for an implementation 52 * dependent event, 0x1E, that enables cycle counting. The Linux kernel 53 * makes extensive use of 0x1E, so let's also support it. 54 * 55 * Likewise, event 0x2 is an implementation-dependent event that IBM 56 * POWER chips implement (at least since POWER8) that is equivalent to 57 * PM_INST_CMPL. Let's support this event on PMCs 1-4 as well. 58 */ 59 static PMUEventType pmc_get_event(CPUPPCState *env, int sprn) 60 { 61 uint8_t mmcr1_evt_extr[] = { MMCR1_PMC1EVT_EXTR, MMCR1_PMC2EVT_EXTR, 62 MMCR1_PMC3EVT_EXTR, MMCR1_PMC4EVT_EXTR }; 63 PMUEventType evt_type = PMU_EVENT_INVALID; 64 uint8_t pmcsel; 65 int i; 66 67 if (pmc_is_inactive(env, sprn)) { 68 return PMU_EVENT_INACTIVE; 69 } 70 71 if (sprn == SPR_POWER_PMC5) { 72 return PMU_EVENT_INSTRUCTIONS; 73 } 74 75 if (sprn == SPR_POWER_PMC6) { 76 return PMU_EVENT_CYCLES; 77 } 78 79 i = sprn - SPR_POWER_PMC1; 80 pmcsel = extract64(env->spr[SPR_POWER_MMCR1], mmcr1_evt_extr[i], 81 MMCR1_EVT_SIZE); 82 83 switch (pmcsel) { 84 case 0x2: 85 evt_type = PMU_EVENT_INSTRUCTIONS; 86 break; 87 case 0x1E: 88 evt_type = PMU_EVENT_CYCLES; 89 break; 90 case 0xF0: 91 /* 92 * PMC1SEL = 0xF0 is the architected PowerISA v3.1 93 * event that counts cycles using PMC1. 94 */ 95 if (sprn == SPR_POWER_PMC1) { 96 evt_type = PMU_EVENT_CYCLES; 97 } 98 break; 99 case 0xFA: 100 /* 101 * PMC4SEL = 0xFA is the "instructions completed 102 * with run latch set" event. 103 */ 104 if (sprn == SPR_POWER_PMC4) { 105 evt_type = PMU_EVENT_INSN_RUN_LATCH; 106 } 107 break; 108 case 0xFE: 109 /* 110 * PMC1SEL = 0xFE is the architected PowerISA v3.1 111 * event to sample instructions using PMC1. 112 */ 113 if (sprn == SPR_POWER_PMC1) { 114 evt_type = PMU_EVENT_INSTRUCTIONS; 115 } 116 break; 117 default: 118 break; 119 } 120 121 return evt_type; 122 } 123 124 bool pmu_insn_cnt_enabled(CPUPPCState *env) 125 { 126 int sprn; 127 128 for (sprn = SPR_POWER_PMC1; sprn <= SPR_POWER_PMC5; sprn++) { 129 if (pmc_get_event(env, sprn) == PMU_EVENT_INSTRUCTIONS || 130 pmc_get_event(env, sprn) == PMU_EVENT_INSN_RUN_LATCH) { 131 return true; 132 } 133 } 134 135 return false; 136 } 137 138 static bool pmu_increment_insns(CPUPPCState *env, uint32_t num_insns) 139 { 140 bool overflow_triggered = false; 141 int sprn; 142 143 /* PMC6 never counts instructions */ 144 for (sprn = SPR_POWER_PMC1; sprn <= SPR_POWER_PMC5; sprn++) { 145 PMUEventType evt_type = pmc_get_event(env, sprn); 146 bool insn_event = evt_type == PMU_EVENT_INSTRUCTIONS || 147 evt_type == PMU_EVENT_INSN_RUN_LATCH; 148 149 if (pmc_is_inactive(env, sprn) || !insn_event) { 150 continue; 151 } 152 153 if (evt_type == PMU_EVENT_INSTRUCTIONS) { 154 env->spr[sprn] += num_insns; 155 } 156 157 if (evt_type == PMU_EVENT_INSN_RUN_LATCH && 158 env->spr[SPR_CTRL] & CTRL_RUN) { 159 env->spr[sprn] += num_insns; 160 } 161 162 if (env->spr[sprn] >= PMC_COUNTER_NEGATIVE_VAL && 163 pmc_has_overflow_enabled(env, sprn)) { 164 165 overflow_triggered = true; 166 167 /* 168 * The real PMU will always trigger a counter overflow with 169 * PMC_COUNTER_NEGATIVE_VAL. We don't have an easy way to 170 * do that since we're counting block of instructions at 171 * the end of each translation block, and we're probably 172 * passing this value at this point. 173 * 174 * Let's write PMC_COUNTER_NEGATIVE_VAL to the overflowed 175 * counter to simulate what the real hardware would do. 176 */ 177 env->spr[sprn] = PMC_COUNTER_NEGATIVE_VAL; 178 } 179 } 180 181 return overflow_triggered; 182 } 183 184 static void pmu_update_cycles(CPUPPCState *env) 185 { 186 uint64_t now = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL); 187 uint64_t time_delta = now - env->pmu_base_time; 188 int sprn; 189 190 for (sprn = SPR_POWER_PMC1; sprn <= SPR_POWER_PMC6; sprn++) { 191 if (pmc_get_event(env, sprn) != PMU_EVENT_CYCLES) { 192 continue; 193 } 194 195 /* 196 * The pseries and powernv clock runs at 1Ghz, meaning 197 * that 1 nanosec equals 1 cycle. 198 */ 199 env->spr[sprn] += time_delta; 200 } 201 202 /* Update base_time for future calculations */ 203 env->pmu_base_time = now; 204 } 205 206 /* 207 * Helper function to retrieve the cycle overflow timer of the 208 * 'sprn' counter. 209 */ 210 static QEMUTimer *get_cyc_overflow_timer(CPUPPCState *env, int sprn) 211 { 212 return env->pmu_cyc_overflow_timers[sprn - SPR_POWER_PMC1]; 213 } 214 215 static void pmc_update_overflow_timer(CPUPPCState *env, int sprn) 216 { 217 QEMUTimer *pmc_overflow_timer = get_cyc_overflow_timer(env, sprn); 218 int64_t timeout; 219 220 /* 221 * PMC5 does not have an overflow timer and this pointer 222 * will be NULL. 223 */ 224 if (!pmc_overflow_timer) { 225 return; 226 } 227 228 if (pmc_get_event(env, sprn) != PMU_EVENT_CYCLES || 229 !pmc_has_overflow_enabled(env, sprn)) { 230 /* Overflow timer is not needed for this counter */ 231 timer_del(pmc_overflow_timer); 232 return; 233 } 234 235 if (env->spr[sprn] >= PMC_COUNTER_NEGATIVE_VAL) { 236 timeout = 0; 237 } else { 238 timeout = PMC_COUNTER_NEGATIVE_VAL - env->spr[sprn]; 239 } 240 241 /* 242 * Use timer_mod_anticipate() because an overflow timer might 243 * be already running for this PMC. 244 */ 245 timer_mod_anticipate(pmc_overflow_timer, env->pmu_base_time + timeout); 246 } 247 248 static void pmu_update_overflow_timers(CPUPPCState *env) 249 { 250 int sprn; 251 252 /* 253 * Scroll through all PMCs and start counter overflow timers for 254 * PM_CYC events, if needed. 255 */ 256 for (sprn = SPR_POWER_PMC1; sprn <= SPR_POWER_PMC6; sprn++) { 257 pmc_update_overflow_timer(env, sprn); 258 } 259 } 260 261 void helper_store_mmcr0(CPUPPCState *env, target_ulong value) 262 { 263 pmu_update_cycles(env); 264 265 env->spr[SPR_POWER_MMCR0] = value; 266 267 /* MMCR0 writes can change HFLAGS_PMCCCLEAR and HFLAGS_INSN_CNT */ 268 hreg_compute_hflags(env); 269 270 /* Update cycle overflow timers with the current MMCR0 state */ 271 pmu_update_overflow_timers(env); 272 } 273 274 void helper_store_mmcr1(CPUPPCState *env, uint64_t value) 275 { 276 pmu_update_cycles(env); 277 278 env->spr[SPR_POWER_MMCR1] = value; 279 280 /* MMCR1 writes can change HFLAGS_INSN_CNT */ 281 hreg_compute_hflags(env); 282 } 283 284 target_ulong helper_read_pmc(CPUPPCState *env, uint32_t sprn) 285 { 286 pmu_update_cycles(env); 287 288 return env->spr[sprn]; 289 } 290 291 void helper_store_pmc(CPUPPCState *env, uint32_t sprn, uint64_t value) 292 { 293 pmu_update_cycles(env); 294 295 env->spr[sprn] = value; 296 297 pmc_update_overflow_timer(env, sprn); 298 } 299 300 static void fire_PMC_interrupt(PowerPCCPU *cpu) 301 { 302 CPUPPCState *env = &cpu->env; 303 304 if (!(env->spr[SPR_POWER_MMCR0] & MMCR0_EBE)) { 305 return; 306 } 307 308 /* PMC interrupt not implemented yet */ 309 return; 310 } 311 312 /* This helper assumes that the PMC is running. */ 313 void helper_insns_inc(CPUPPCState *env, uint32_t num_insns) 314 { 315 bool overflow_triggered; 316 PowerPCCPU *cpu; 317 318 overflow_triggered = pmu_increment_insns(env, num_insns); 319 320 if (overflow_triggered) { 321 cpu = env_archcpu(env); 322 fire_PMC_interrupt(cpu); 323 } 324 } 325 326 static void cpu_ppc_pmu_timer_cb(void *opaque) 327 { 328 PowerPCCPU *cpu = opaque; 329 330 fire_PMC_interrupt(cpu); 331 } 332 333 void cpu_ppc_pmu_init(CPUPPCState *env) 334 { 335 PowerPCCPU *cpu = env_archcpu(env); 336 int i, sprn; 337 338 for (sprn = SPR_POWER_PMC1; sprn <= SPR_POWER_PMC6; sprn++) { 339 if (sprn == SPR_POWER_PMC5) { 340 continue; 341 } 342 343 i = sprn - SPR_POWER_PMC1; 344 345 env->pmu_cyc_overflow_timers[i] = timer_new_ns(QEMU_CLOCK_VIRTUAL, 346 &cpu_ppc_pmu_timer_cb, 347 cpu); 348 } 349 } 350 #endif /* defined(TARGET_PPC64) && !defined(CONFIG_USER_ONLY) */ 351