xref: /openbmc/qemu/target/ppc/power8-pmu.c (revision 89aafcf2)
1 /*
2  * PMU emulation helpers for TCG IBM POWER chips
3  *
4  *  Copyright IBM Corp. 2021
5  *
6  * Authors:
7  *  Daniel Henrique Barboza      <danielhb413@gmail.com>
8  *
9  * This work is licensed under the terms of the GNU GPL, version 2 or later.
10  * See the COPYING file in the top-level directory.
11  */
12 
13 #include "qemu/osdep.h"
14 #include "cpu.h"
15 #include "helper_regs.h"
16 #include "exec/exec-all.h"
17 #include "exec/helper-proto.h"
18 #include "qemu/error-report.h"
19 #include "qemu/main-loop.h"
20 #include "hw/ppc/ppc.h"
21 #include "power8-pmu.h"
22 
23 #if defined(TARGET_PPC64) && !defined(CONFIG_USER_ONLY)
24 
25 static bool pmc_has_overflow_enabled(CPUPPCState *env, int sprn)
26 {
27     if (sprn == SPR_POWER_PMC1) {
28         return env->spr[SPR_POWER_MMCR0] & MMCR0_PMC1CE;
29     }
30 
31     return env->spr[SPR_POWER_MMCR0] & MMCR0_PMCjCE;
32 }
33 
34 void pmu_update_summaries(CPUPPCState *env)
35 {
36     target_ulong mmcr0 = env->spr[SPR_POWER_MMCR0];
37     target_ulong mmcr1 = env->spr[SPR_POWER_MMCR1];
38     int ins_cnt = 0;
39     int cyc_cnt = 0;
40 
41     if (mmcr0 & MMCR0_FC) {
42         goto hflags_calc;
43     }
44 
45     if (!(mmcr0 & MMCR0_FC14) && mmcr1 != 0) {
46         target_ulong sel;
47 
48         sel = extract64(mmcr1, MMCR1_PMC1EVT_EXTR, MMCR1_EVT_SIZE);
49         switch (sel) {
50         case 0x02:
51         case 0xfe:
52             ins_cnt |= 1 << 1;
53             break;
54         case 0x1e:
55         case 0xf0:
56             cyc_cnt |= 1 << 1;
57             break;
58         }
59 
60         sel = extract64(mmcr1, MMCR1_PMC2EVT_EXTR, MMCR1_EVT_SIZE);
61         ins_cnt |= (sel == 0x02) << 2;
62         cyc_cnt |= (sel == 0x1e) << 2;
63 
64         sel = extract64(mmcr1, MMCR1_PMC3EVT_EXTR, MMCR1_EVT_SIZE);
65         ins_cnt |= (sel == 0x02) << 3;
66         cyc_cnt |= (sel == 0x1e) << 3;
67 
68         sel = extract64(mmcr1, MMCR1_PMC4EVT_EXTR, MMCR1_EVT_SIZE);
69         ins_cnt |= ((sel == 0xfa) || (sel == 0x2)) << 4;
70         cyc_cnt |= (sel == 0x1e) << 4;
71     }
72 
73     ins_cnt |= !(mmcr0 & MMCR0_FC56) << 5;
74     cyc_cnt |= !(mmcr0 & MMCR0_FC56) << 6;
75 
76  hflags_calc:
77     env->pmc_ins_cnt = ins_cnt;
78     env->pmc_cyc_cnt = cyc_cnt;
79     env->hflags = deposit32(env->hflags, HFLAGS_INSN_CNT, 1, ins_cnt != 0);
80 }
81 
82 static bool pmu_increment_insns(CPUPPCState *env, uint32_t num_insns)
83 {
84     target_ulong mmcr0 = env->spr[SPR_POWER_MMCR0];
85     unsigned ins_cnt = env->pmc_ins_cnt;
86     bool overflow_triggered = false;
87     target_ulong tmp;
88 
89     if (ins_cnt & (1 << 1)) {
90         tmp = env->spr[SPR_POWER_PMC1];
91         tmp += num_insns;
92         if (tmp >= PMC_COUNTER_NEGATIVE_VAL && (mmcr0 & MMCR0_PMC1CE)) {
93             tmp = PMC_COUNTER_NEGATIVE_VAL;
94             overflow_triggered = true;
95         }
96         env->spr[SPR_POWER_PMC1] = tmp;
97     }
98 
99     if (ins_cnt & (1 << 2)) {
100         tmp = env->spr[SPR_POWER_PMC2];
101         tmp += num_insns;
102         if (tmp >= PMC_COUNTER_NEGATIVE_VAL && (mmcr0 & MMCR0_PMCjCE)) {
103             tmp = PMC_COUNTER_NEGATIVE_VAL;
104             overflow_triggered = true;
105         }
106         env->spr[SPR_POWER_PMC2] = tmp;
107     }
108 
109     if (ins_cnt & (1 << 3)) {
110         tmp = env->spr[SPR_POWER_PMC3];
111         tmp += num_insns;
112         if (tmp >= PMC_COUNTER_NEGATIVE_VAL && (mmcr0 & MMCR0_PMCjCE)) {
113             tmp = PMC_COUNTER_NEGATIVE_VAL;
114             overflow_triggered = true;
115         }
116         env->spr[SPR_POWER_PMC3] = tmp;
117     }
118 
119     if (ins_cnt & (1 << 4)) {
120         target_ulong mmcr1 = env->spr[SPR_POWER_MMCR1];
121         int sel = extract64(mmcr1, MMCR1_PMC4EVT_EXTR, MMCR1_EVT_SIZE);
122         if (sel == 0x02 || (env->spr[SPR_CTRL] & CTRL_RUN)) {
123             tmp = env->spr[SPR_POWER_PMC4];
124             tmp += num_insns;
125             if (tmp >= PMC_COUNTER_NEGATIVE_VAL && (mmcr0 & MMCR0_PMCjCE)) {
126                 tmp = PMC_COUNTER_NEGATIVE_VAL;
127                 overflow_triggered = true;
128             }
129             env->spr[SPR_POWER_PMC4] = tmp;
130         }
131     }
132 
133     if (ins_cnt & (1 << 5)) {
134         tmp = env->spr[SPR_POWER_PMC5];
135         tmp += num_insns;
136         if (tmp >= PMC_COUNTER_NEGATIVE_VAL && (mmcr0 & MMCR0_PMCjCE)) {
137             tmp = PMC_COUNTER_NEGATIVE_VAL;
138             overflow_triggered = true;
139         }
140         env->spr[SPR_POWER_PMC5] = tmp;
141     }
142 
143     return overflow_triggered;
144 }
145 
146 static void pmu_update_cycles(CPUPPCState *env)
147 {
148     uint64_t now = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL);
149     uint64_t time_delta = now - env->pmu_base_time;
150     int sprn, cyc_cnt = env->pmc_cyc_cnt;
151 
152     for (sprn = SPR_POWER_PMC1; sprn <= SPR_POWER_PMC6; sprn++) {
153         if (cyc_cnt & (1 << (sprn - SPR_POWER_PMC1 + 1))) {
154             /*
155              * The pseries and powernv clock runs at 1Ghz, meaning
156              * that 1 nanosec equals 1 cycle.
157              */
158             env->spr[sprn] += time_delta;
159         }
160     }
161 
162     /* Update base_time for future calculations */
163     env->pmu_base_time = now;
164 }
165 
166 /*
167  * Helper function to retrieve the cycle overflow timer of the
168  * 'sprn' counter.
169  */
170 static QEMUTimer *get_cyc_overflow_timer(CPUPPCState *env, int sprn)
171 {
172     return env->pmu_cyc_overflow_timers[sprn - SPR_POWER_PMC1];
173 }
174 
175 static void pmc_update_overflow_timer(CPUPPCState *env, int sprn)
176 {
177     QEMUTimer *pmc_overflow_timer = get_cyc_overflow_timer(env, sprn);
178     int64_t timeout;
179 
180     /*
181      * PMC5 does not have an overflow timer and this pointer
182      * will be NULL.
183      */
184     if (!pmc_overflow_timer) {
185         return;
186     }
187 
188     if (!(env->pmc_cyc_cnt & (1 << (sprn - SPR_POWER_PMC1 + 1))) ||
189         !pmc_has_overflow_enabled(env, sprn)) {
190         /* Overflow timer is not needed for this counter */
191         timer_del(pmc_overflow_timer);
192         return;
193     }
194 
195     if (env->spr[sprn] >= PMC_COUNTER_NEGATIVE_VAL) {
196         timeout = 0;
197     } else {
198         timeout = PMC_COUNTER_NEGATIVE_VAL - env->spr[sprn];
199     }
200 
201     /*
202      * Use timer_mod_anticipate() because an overflow timer might
203      * be already running for this PMC.
204      */
205     timer_mod_anticipate(pmc_overflow_timer, env->pmu_base_time + timeout);
206 }
207 
208 static void pmu_update_overflow_timers(CPUPPCState *env)
209 {
210     int sprn;
211 
212     /*
213      * Scroll through all PMCs and start counter overflow timers for
214      * PM_CYC events, if needed.
215      */
216     for (sprn = SPR_POWER_PMC1; sprn <= SPR_POWER_PMC6; sprn++) {
217         pmc_update_overflow_timer(env, sprn);
218     }
219 }
220 
221 static void pmu_delete_timers(CPUPPCState *env)
222 {
223     QEMUTimer *pmc_overflow_timer;
224     int sprn;
225 
226     for (sprn = SPR_POWER_PMC1; sprn <= SPR_POWER_PMC6; sprn++) {
227         pmc_overflow_timer = get_cyc_overflow_timer(env, sprn);
228 
229         if (pmc_overflow_timer) {
230             timer_del(pmc_overflow_timer);
231         }
232     }
233 }
234 
235 void helper_store_mmcr0(CPUPPCState *env, target_ulong value)
236 {
237     bool hflags_pmcc0 = (value & MMCR0_PMCC0) != 0;
238     bool hflags_pmcc1 = (value & MMCR0_PMCC1) != 0;
239 
240     pmu_update_cycles(env);
241 
242     env->spr[SPR_POWER_MMCR0] = value;
243 
244     /* MMCR0 writes can change HFLAGS_PMCC[01] and HFLAGS_INSN_CNT */
245     env->hflags = deposit32(env->hflags, HFLAGS_PMCC0, 1, hflags_pmcc0);
246     env->hflags = deposit32(env->hflags, HFLAGS_PMCC1, 1, hflags_pmcc1);
247 
248     pmu_update_summaries(env);
249 
250     /* Update cycle overflow timers with the current MMCR0 state */
251     pmu_update_overflow_timers(env);
252 }
253 
254 void helper_store_mmcr1(CPUPPCState *env, uint64_t value)
255 {
256     pmu_update_cycles(env);
257 
258     env->spr[SPR_POWER_MMCR1] = value;
259 
260     /* MMCR1 writes can change HFLAGS_INSN_CNT */
261     pmu_update_summaries(env);
262 }
263 
264 target_ulong helper_read_pmc(CPUPPCState *env, uint32_t sprn)
265 {
266     pmu_update_cycles(env);
267 
268     return env->spr[sprn];
269 }
270 
271 void helper_store_pmc(CPUPPCState *env, uint32_t sprn, uint64_t value)
272 {
273     pmu_update_cycles(env);
274 
275     env->spr[sprn] = (uint32_t)value;
276 
277     pmc_update_overflow_timer(env, sprn);
278 }
279 
280 static void fire_PMC_interrupt(PowerPCCPU *cpu)
281 {
282     CPUPPCState *env = &cpu->env;
283 
284     pmu_update_cycles(env);
285 
286     if (env->spr[SPR_POWER_MMCR0] & MMCR0_FCECE) {
287         env->spr[SPR_POWER_MMCR0] &= ~MMCR0_FCECE;
288         env->spr[SPR_POWER_MMCR0] |= MMCR0_FC;
289 
290         /* Changing MMCR0_FC requires a new HFLAGS_INSN_CNT calc */
291         pmu_update_summaries(env);
292 
293         /*
294          * Delete all pending timers if we need to freeze
295          * the PMC. We'll restart them when the PMC starts
296          * running again.
297          */
298         pmu_delete_timers(env);
299     }
300 
301     if (env->spr[SPR_POWER_MMCR0] & MMCR0_PMAE) {
302         env->spr[SPR_POWER_MMCR0] &= ~MMCR0_PMAE;
303         env->spr[SPR_POWER_MMCR0] |= MMCR0_PMAO;
304     }
305 
306     raise_ebb_perfm_exception(env);
307 }
308 
309 void helper_handle_pmc5_overflow(CPUPPCState *env)
310 {
311     env->spr[SPR_POWER_PMC5] = PMC_COUNTER_NEGATIVE_VAL;
312     fire_PMC_interrupt(env_archcpu(env));
313 }
314 
315 /* This helper assumes that the PMC is running. */
316 void helper_insns_inc(CPUPPCState *env, uint32_t num_insns)
317 {
318     bool overflow_triggered;
319     PowerPCCPU *cpu;
320 
321     overflow_triggered = pmu_increment_insns(env, num_insns);
322 
323     if (overflow_triggered) {
324         cpu = env_archcpu(env);
325         fire_PMC_interrupt(cpu);
326     }
327 }
328 
329 static void cpu_ppc_pmu_timer_cb(void *opaque)
330 {
331     PowerPCCPU *cpu = opaque;
332 
333     fire_PMC_interrupt(cpu);
334 }
335 
336 void cpu_ppc_pmu_init(CPUPPCState *env)
337 {
338     PowerPCCPU *cpu = env_archcpu(env);
339     int i, sprn;
340 
341     for (sprn = SPR_POWER_PMC1; sprn <= SPR_POWER_PMC6; sprn++) {
342         if (sprn == SPR_POWER_PMC5) {
343             continue;
344         }
345 
346         i = sprn - SPR_POWER_PMC1;
347 
348         env->pmu_cyc_overflow_timers[i] = timer_new_ns(QEMU_CLOCK_VIRTUAL,
349                                                        &cpu_ppc_pmu_timer_cb,
350                                                        cpu);
351     }
352 }
353 #endif /* defined(TARGET_PPC64) && !defined(CONFIG_USER_ONLY) */
354