1 /* 2 * PMU emulation helpers for TCG IBM POWER chips 3 * 4 * Copyright IBM Corp. 2021 5 * 6 * Authors: 7 * Daniel Henrique Barboza <danielhb413@gmail.com> 8 * 9 * This work is licensed under the terms of the GNU GPL, version 2 or later. 10 * See the COPYING file in the top-level directory. 11 */ 12 13 #include "qemu/osdep.h" 14 #include "cpu.h" 15 #include "helper_regs.h" 16 #include "exec/exec-all.h" 17 #include "exec/helper-proto.h" 18 #include "qemu/error-report.h" 19 #include "qemu/timer.h" 20 #include "qemu/main-loop.h" 21 #include "hw/ppc/ppc.h" 22 #include "power8-pmu.h" 23 24 #if defined(TARGET_PPC64) && !defined(CONFIG_USER_ONLY) 25 26 static bool pmc_has_overflow_enabled(CPUPPCState *env, int sprn) 27 { 28 if (sprn == SPR_POWER_PMC1) { 29 return env->spr[SPR_POWER_MMCR0] & MMCR0_PMC1CE; 30 } 31 32 return env->spr[SPR_POWER_MMCR0] & MMCR0_PMCjCE; 33 } 34 35 /* 36 * Called after MMCR0 or MMCR1 changes to update pmc_ins_cnt and pmc_cyc_cnt. 37 * hflags must subsequently be updated. 38 */ 39 static void pmu_update_summaries(CPUPPCState *env) 40 { 41 target_ulong mmcr0 = env->spr[SPR_POWER_MMCR0]; 42 target_ulong mmcr1 = env->spr[SPR_POWER_MMCR1]; 43 int ins_cnt = 0; 44 int cyc_cnt = 0; 45 46 if (mmcr0 & MMCR0_FC) { 47 goto out; 48 } 49 50 if (!(mmcr0 & MMCR0_FC14) && mmcr1 != 0) { 51 target_ulong sel; 52 53 sel = extract64(mmcr1, MMCR1_PMC1EVT_EXTR, MMCR1_EVT_SIZE); 54 switch (sel) { 55 case 0x02: 56 case 0xfe: 57 ins_cnt |= 1 << 1; 58 break; 59 case 0x1e: 60 case 0xf0: 61 cyc_cnt |= 1 << 1; 62 break; 63 } 64 65 sel = extract64(mmcr1, MMCR1_PMC2EVT_EXTR, MMCR1_EVT_SIZE); 66 ins_cnt |= (sel == 0x02) << 2; 67 cyc_cnt |= (sel == 0x1e) << 2; 68 69 sel = extract64(mmcr1, MMCR1_PMC3EVT_EXTR, MMCR1_EVT_SIZE); 70 ins_cnt |= (sel == 0x02) << 3; 71 cyc_cnt |= (sel == 0x1e) << 3; 72 73 sel = extract64(mmcr1, MMCR1_PMC4EVT_EXTR, MMCR1_EVT_SIZE); 74 ins_cnt |= ((sel == 0xfa) || (sel == 0x2)) << 4; 75 cyc_cnt |= (sel == 0x1e) << 4; 76 } 77 78 ins_cnt |= !(mmcr0 & MMCR0_FC56) << 5; 79 cyc_cnt |= !(mmcr0 & MMCR0_FC56) << 6; 80 81 out: 82 env->pmc_ins_cnt = ins_cnt; 83 env->pmc_cyc_cnt = cyc_cnt; 84 } 85 86 void pmu_mmcr01_updated(CPUPPCState *env) 87 { 88 PowerPCCPU *cpu = env_archcpu(env); 89 90 pmu_update_summaries(env); 91 hreg_update_pmu_hflags(env); 92 93 if (env->spr[SPR_POWER_MMCR0] & MMCR0_PMAO) { 94 ppc_set_irq(cpu, PPC_INTERRUPT_PERFM, 1); 95 } else { 96 ppc_set_irq(cpu, PPC_INTERRUPT_PERFM, 0); 97 } 98 99 /* 100 * Should this update overflow timers (if mmcr0 is updated) so they 101 * get set in cpu_post_load? 102 */ 103 } 104 105 static bool pmu_increment_insns(CPUPPCState *env, uint32_t num_insns) 106 { 107 target_ulong mmcr0 = env->spr[SPR_POWER_MMCR0]; 108 unsigned ins_cnt = env->pmc_ins_cnt; 109 bool overflow_triggered = false; 110 target_ulong tmp; 111 112 if (ins_cnt & (1 << 1)) { 113 tmp = env->spr[SPR_POWER_PMC1]; 114 tmp += num_insns; 115 if (tmp >= PMC_COUNTER_NEGATIVE_VAL && (mmcr0 & MMCR0_PMC1CE)) { 116 tmp = PMC_COUNTER_NEGATIVE_VAL; 117 overflow_triggered = true; 118 } 119 env->spr[SPR_POWER_PMC1] = tmp; 120 } 121 122 if (ins_cnt & (1 << 2)) { 123 tmp = env->spr[SPR_POWER_PMC2]; 124 tmp += num_insns; 125 if (tmp >= PMC_COUNTER_NEGATIVE_VAL && (mmcr0 & MMCR0_PMCjCE)) { 126 tmp = PMC_COUNTER_NEGATIVE_VAL; 127 overflow_triggered = true; 128 } 129 env->spr[SPR_POWER_PMC2] = tmp; 130 } 131 132 if (ins_cnt & (1 << 3)) { 133 tmp = env->spr[SPR_POWER_PMC3]; 134 tmp += num_insns; 135 if (tmp >= PMC_COUNTER_NEGATIVE_VAL && (mmcr0 & MMCR0_PMCjCE)) { 136 tmp = PMC_COUNTER_NEGATIVE_VAL; 137 overflow_triggered = true; 138 } 139 env->spr[SPR_POWER_PMC3] = tmp; 140 } 141 142 if (ins_cnt & (1 << 4)) { 143 target_ulong mmcr1 = env->spr[SPR_POWER_MMCR1]; 144 int sel = extract64(mmcr1, MMCR1_PMC4EVT_EXTR, MMCR1_EVT_SIZE); 145 if (sel == 0x02 || (env->spr[SPR_CTRL] & CTRL_RUN)) { 146 tmp = env->spr[SPR_POWER_PMC4]; 147 tmp += num_insns; 148 if (tmp >= PMC_COUNTER_NEGATIVE_VAL && (mmcr0 & MMCR0_PMCjCE)) { 149 tmp = PMC_COUNTER_NEGATIVE_VAL; 150 overflow_triggered = true; 151 } 152 env->spr[SPR_POWER_PMC4] = tmp; 153 } 154 } 155 156 if (ins_cnt & (1 << 5)) { 157 tmp = env->spr[SPR_POWER_PMC5]; 158 tmp += num_insns; 159 if (tmp >= PMC_COUNTER_NEGATIVE_VAL && (mmcr0 & MMCR0_PMCjCE)) { 160 tmp = PMC_COUNTER_NEGATIVE_VAL; 161 overflow_triggered = true; 162 } 163 env->spr[SPR_POWER_PMC5] = tmp; 164 } 165 166 return overflow_triggered; 167 } 168 169 static void pmu_update_cycles(CPUPPCState *env) 170 { 171 uint64_t now = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL); 172 uint64_t time_delta = now - env->pmu_base_time; 173 int sprn, cyc_cnt = env->pmc_cyc_cnt; 174 175 for (sprn = SPR_POWER_PMC1; sprn <= SPR_POWER_PMC6; sprn++) { 176 if (cyc_cnt & (1 << (sprn - SPR_POWER_PMC1 + 1))) { 177 /* 178 * The pseries and powernv clock runs at 1Ghz, meaning 179 * that 1 nanosec equals 1 cycle. 180 */ 181 env->spr[sprn] += time_delta; 182 } 183 } 184 185 /* Update base_time for future calculations */ 186 env->pmu_base_time = now; 187 } 188 189 /* 190 * Helper function to retrieve the cycle overflow timer of the 191 * 'sprn' counter. 192 */ 193 static QEMUTimer *get_cyc_overflow_timer(CPUPPCState *env, int sprn) 194 { 195 return env->pmu_cyc_overflow_timers[sprn - SPR_POWER_PMC1]; 196 } 197 198 static void pmc_update_overflow_timer(CPUPPCState *env, int sprn) 199 { 200 QEMUTimer *pmc_overflow_timer = get_cyc_overflow_timer(env, sprn); 201 int64_t timeout; 202 203 /* 204 * PMC5 does not have an overflow timer and this pointer 205 * will be NULL. 206 */ 207 if (!pmc_overflow_timer) { 208 return; 209 } 210 211 if (!(env->pmc_cyc_cnt & (1 << (sprn - SPR_POWER_PMC1 + 1))) || 212 !pmc_has_overflow_enabled(env, sprn)) { 213 /* Overflow timer is not needed for this counter */ 214 timer_del(pmc_overflow_timer); 215 return; 216 } 217 218 if (env->spr[sprn] >= PMC_COUNTER_NEGATIVE_VAL) { 219 timeout = 0; 220 } else { 221 timeout = PMC_COUNTER_NEGATIVE_VAL - env->spr[sprn]; 222 } 223 224 /* 225 * Use timer_mod_anticipate() because an overflow timer might 226 * be already running for this PMC. 227 */ 228 timer_mod_anticipate(pmc_overflow_timer, env->pmu_base_time + timeout); 229 } 230 231 static void pmu_update_overflow_timers(CPUPPCState *env) 232 { 233 int sprn; 234 235 /* 236 * Scroll through all PMCs and start counter overflow timers for 237 * PM_CYC events, if needed. 238 */ 239 for (sprn = SPR_POWER_PMC1; sprn <= SPR_POWER_PMC6; sprn++) { 240 pmc_update_overflow_timer(env, sprn); 241 } 242 } 243 244 static void pmu_delete_timers(CPUPPCState *env) 245 { 246 QEMUTimer *pmc_overflow_timer; 247 int sprn; 248 249 for (sprn = SPR_POWER_PMC1; sprn <= SPR_POWER_PMC6; sprn++) { 250 pmc_overflow_timer = get_cyc_overflow_timer(env, sprn); 251 252 if (pmc_overflow_timer) { 253 timer_del(pmc_overflow_timer); 254 } 255 } 256 } 257 258 void helper_store_mmcr0(CPUPPCState *env, target_ulong value) 259 { 260 pmu_update_cycles(env); 261 262 env->spr[SPR_POWER_MMCR0] = value; 263 264 pmu_mmcr01_updated(env); 265 266 /* Update cycle overflow timers with the current MMCR0 state */ 267 pmu_update_overflow_timers(env); 268 } 269 270 void helper_store_mmcr1(CPUPPCState *env, uint64_t value) 271 { 272 pmu_update_cycles(env); 273 274 env->spr[SPR_POWER_MMCR1] = value; 275 276 pmu_mmcr01_updated(env); 277 } 278 279 target_ulong helper_read_pmc(CPUPPCState *env, uint32_t sprn) 280 { 281 pmu_update_cycles(env); 282 283 return env->spr[sprn]; 284 } 285 286 void helper_store_pmc(CPUPPCState *env, uint32_t sprn, uint64_t value) 287 { 288 pmu_update_cycles(env); 289 290 env->spr[sprn] = (uint32_t)value; 291 292 pmc_update_overflow_timer(env, sprn); 293 } 294 295 static void perfm_alert(PowerPCCPU *cpu) 296 { 297 CPUPPCState *env = &cpu->env; 298 299 pmu_update_cycles(env); 300 301 if (env->spr[SPR_POWER_MMCR0] & MMCR0_FCECE) { 302 env->spr[SPR_POWER_MMCR0] |= MMCR0_FC; 303 304 /* Changing MMCR0_FC requires summaries and hflags update */ 305 pmu_mmcr01_updated(env); 306 307 /* 308 * Delete all pending timers if we need to freeze 309 * the PMC. We'll restart them when the PMC starts 310 * running again. 311 */ 312 pmu_delete_timers(env); 313 } 314 315 if (env->spr[SPR_POWER_MMCR0] & MMCR0_PMAE) { 316 /* These MMCR0 bits do not require summaries or hflags update. */ 317 env->spr[SPR_POWER_MMCR0] &= ~MMCR0_PMAE; 318 env->spr[SPR_POWER_MMCR0] |= MMCR0_PMAO; 319 ppc_set_irq(cpu, PPC_INTERRUPT_PERFM, 1); 320 } 321 322 raise_ebb_perfm_exception(env); 323 } 324 325 void helper_handle_pmc5_overflow(CPUPPCState *env) 326 { 327 env->spr[SPR_POWER_PMC5] = PMC_COUNTER_NEGATIVE_VAL; 328 perfm_alert(env_archcpu(env)); 329 } 330 331 /* This helper assumes that the PMC is running. */ 332 void helper_insns_inc(CPUPPCState *env, uint32_t num_insns) 333 { 334 bool overflow_triggered; 335 336 overflow_triggered = pmu_increment_insns(env, num_insns); 337 if (overflow_triggered) { 338 perfm_alert(env_archcpu(env)); 339 } 340 } 341 342 static void cpu_ppc_pmu_timer_cb(void *opaque) 343 { 344 PowerPCCPU *cpu = opaque; 345 346 perfm_alert(cpu); 347 } 348 349 void cpu_ppc_pmu_init(CPUPPCState *env) 350 { 351 PowerPCCPU *cpu = env_archcpu(env); 352 int i, sprn; 353 354 for (sprn = SPR_POWER_PMC1; sprn <= SPR_POWER_PMC6; sprn++) { 355 if (sprn == SPR_POWER_PMC5) { 356 continue; 357 } 358 359 i = sprn - SPR_POWER_PMC1; 360 361 env->pmu_cyc_overflow_timers[i] = timer_new_ns(QEMU_CLOCK_VIRTUAL, 362 &cpu_ppc_pmu_timer_cb, 363 cpu); 364 } 365 } 366 #endif /* defined(TARGET_PPC64) && !defined(CONFIG_USER_ONLY) */ 367