xref: /openbmc/qemu/target/riscv/pmu.c (revision 14664483)
1 /*
2  * RISC-V PMU file.
3  *
4  * Copyright (c) 2021 Western Digital Corporation or its affiliates.
5  *
6  * This program is free software; you can redistribute it and/or modify it
7  * under the terms and conditions of the GNU General Public License,
8  * version 2 or later, as published by the Free Software Foundation.
9  *
10  * This program is distributed in the hope it will be useful, but WITHOUT
11  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
13  * more details.
14  *
15  * You should have received a copy of the GNU General Public License along with
16  * this program.  If not, see <http://www.gnu.org/licenses/>.
17  */
18 
19 #include "qemu/osdep.h"
20 #include "cpu.h"
21 #include "pmu.h"
22 #include "sysemu/cpu-timers.h"
23 
24 #define RISCV_TIMEBASE_FREQ 1000000000 /* 1Ghz */
25 #define MAKE_32BIT_MASK(shift, length) \
26         (((uint32_t)(~0UL) >> (32 - (length))) << (shift))
27 
28 static bool riscv_pmu_counter_valid(RISCVCPU *cpu, uint32_t ctr_idx)
29 {
30     if (ctr_idx < 3 || ctr_idx >= RV_MAX_MHPMCOUNTERS ||
31         !(cpu->pmu_avail_ctrs & BIT(ctr_idx))) {
32         return false;
33     } else {
34         return true;
35     }
36 }
37 
38 static bool riscv_pmu_counter_enabled(RISCVCPU *cpu, uint32_t ctr_idx)
39 {
40     CPURISCVState *env = &cpu->env;
41 
42     if (riscv_pmu_counter_valid(cpu, ctr_idx) &&
43         !get_field(env->mcountinhibit, BIT(ctr_idx))) {
44         return true;
45     } else {
46         return false;
47     }
48 }
49 
50 static int riscv_pmu_incr_ctr_rv32(RISCVCPU *cpu, uint32_t ctr_idx)
51 {
52     CPURISCVState *env = &cpu->env;
53     target_ulong max_val = UINT32_MAX;
54     PMUCTRState *counter = &env->pmu_ctrs[ctr_idx];
55     bool virt_on = riscv_cpu_virt_enabled(env);
56 
57     /* Privilege mode filtering */
58     if ((env->priv == PRV_M &&
59         (env->mhpmeventh_val[ctr_idx] & MHPMEVENTH_BIT_MINH)) ||
60         (env->priv == PRV_S && virt_on &&
61         (env->mhpmeventh_val[ctr_idx] & MHPMEVENTH_BIT_VSINH)) ||
62         (env->priv == PRV_U && virt_on &&
63         (env->mhpmeventh_val[ctr_idx] & MHPMEVENTH_BIT_VUINH)) ||
64         (env->priv == PRV_S && !virt_on &&
65         (env->mhpmeventh_val[ctr_idx] & MHPMEVENTH_BIT_SINH)) ||
66         (env->priv == PRV_U && !virt_on &&
67         (env->mhpmeventh_val[ctr_idx] & MHPMEVENTH_BIT_UINH))) {
68         return 0;
69     }
70 
71     /* Handle the overflow scenario */
72     if (counter->mhpmcounter_val == max_val) {
73         if (counter->mhpmcounterh_val == max_val) {
74             counter->mhpmcounter_val = 0;
75             counter->mhpmcounterh_val = 0;
76             /* Generate interrupt only if OF bit is clear */
77             if (!(env->mhpmeventh_val[ctr_idx] & MHPMEVENTH_BIT_OF)) {
78                 env->mhpmeventh_val[ctr_idx] |= MHPMEVENTH_BIT_OF;
79                 riscv_cpu_update_mip(cpu, MIP_LCOFIP, BOOL_TO_MASK(1));
80             }
81         } else {
82             counter->mhpmcounterh_val++;
83         }
84     } else {
85         counter->mhpmcounter_val++;
86     }
87 
88     return 0;
89 }
90 
91 static int riscv_pmu_incr_ctr_rv64(RISCVCPU *cpu, uint32_t ctr_idx)
92 {
93     CPURISCVState *env = &cpu->env;
94     PMUCTRState *counter = &env->pmu_ctrs[ctr_idx];
95     uint64_t max_val = UINT64_MAX;
96     bool virt_on = riscv_cpu_virt_enabled(env);
97 
98     /* Privilege mode filtering */
99     if ((env->priv == PRV_M &&
100         (env->mhpmevent_val[ctr_idx] & MHPMEVENT_BIT_MINH)) ||
101         (env->priv == PRV_S && virt_on &&
102         (env->mhpmevent_val[ctr_idx] & MHPMEVENT_BIT_VSINH)) ||
103         (env->priv == PRV_U && virt_on &&
104         (env->mhpmevent_val[ctr_idx] & MHPMEVENT_BIT_VUINH)) ||
105         (env->priv == PRV_S && !virt_on &&
106         (env->mhpmevent_val[ctr_idx] & MHPMEVENT_BIT_SINH)) ||
107         (env->priv == PRV_U && !virt_on &&
108         (env->mhpmevent_val[ctr_idx] & MHPMEVENT_BIT_UINH))) {
109         return 0;
110     }
111 
112     /* Handle the overflow scenario */
113     if (counter->mhpmcounter_val == max_val) {
114         counter->mhpmcounter_val = 0;
115         /* Generate interrupt only if OF bit is clear */
116         if (!(env->mhpmevent_val[ctr_idx] & MHPMEVENT_BIT_OF)) {
117             env->mhpmevent_val[ctr_idx] |= MHPMEVENT_BIT_OF;
118             riscv_cpu_update_mip(cpu, MIP_LCOFIP, BOOL_TO_MASK(1));
119         }
120     } else {
121         counter->mhpmcounter_val++;
122     }
123     return 0;
124 }
125 
126 int riscv_pmu_incr_ctr(RISCVCPU *cpu, enum riscv_pmu_event_idx event_idx)
127 {
128     uint32_t ctr_idx;
129     int ret;
130     CPURISCVState *env = &cpu->env;
131     gpointer value;
132 
133     if (!cpu->cfg.pmu_num) {
134         return 0;
135     }
136     value = g_hash_table_lookup(cpu->pmu_event_ctr_map,
137                                 GUINT_TO_POINTER(event_idx));
138     if (!value) {
139         return -1;
140     }
141 
142     ctr_idx = GPOINTER_TO_UINT(value);
143     if (!riscv_pmu_counter_enabled(cpu, ctr_idx) ||
144         get_field(env->mcountinhibit, BIT(ctr_idx))) {
145         return -1;
146     }
147 
148     if (riscv_cpu_mxl(env) == MXL_RV32) {
149         ret = riscv_pmu_incr_ctr_rv32(cpu, ctr_idx);
150     } else {
151         ret = riscv_pmu_incr_ctr_rv64(cpu, ctr_idx);
152     }
153 
154     return ret;
155 }
156 
157 bool riscv_pmu_ctr_monitor_instructions(CPURISCVState *env,
158                                         uint32_t target_ctr)
159 {
160     RISCVCPU *cpu;
161     uint32_t event_idx;
162     uint32_t ctr_idx;
163 
164     /* Fixed instret counter */
165     if (target_ctr == 2) {
166         return true;
167     }
168 
169     cpu = RISCV_CPU(env_cpu(env));
170     if (!cpu->pmu_event_ctr_map) {
171         return false;
172     }
173 
174     event_idx = RISCV_PMU_EVENT_HW_INSTRUCTIONS;
175     ctr_idx = GPOINTER_TO_UINT(g_hash_table_lookup(cpu->pmu_event_ctr_map,
176                                GUINT_TO_POINTER(event_idx)));
177     if (!ctr_idx) {
178         return false;
179     }
180 
181     return target_ctr == ctr_idx ? true : false;
182 }
183 
184 bool riscv_pmu_ctr_monitor_cycles(CPURISCVState *env, uint32_t target_ctr)
185 {
186     RISCVCPU *cpu;
187     uint32_t event_idx;
188     uint32_t ctr_idx;
189 
190     /* Fixed mcycle counter */
191     if (target_ctr == 0) {
192         return true;
193     }
194 
195     cpu = RISCV_CPU(env_cpu(env));
196     if (!cpu->pmu_event_ctr_map) {
197         return false;
198     }
199 
200     event_idx = RISCV_PMU_EVENT_HW_CPU_CYCLES;
201     ctr_idx = GPOINTER_TO_UINT(g_hash_table_lookup(cpu->pmu_event_ctr_map,
202                                GUINT_TO_POINTER(event_idx)));
203 
204     /* Counter zero is not used for event_ctr_map */
205     if (!ctr_idx) {
206         return false;
207     }
208 
209     return (target_ctr == ctr_idx) ? true : false;
210 }
211 
212 static gboolean pmu_remove_event_map(gpointer key, gpointer value,
213                                      gpointer udata)
214 {
215     return (GPOINTER_TO_UINT(value) == GPOINTER_TO_UINT(udata)) ? true : false;
216 }
217 
218 static int64_t pmu_icount_ticks_to_ns(int64_t value)
219 {
220     int64_t ret = 0;
221 
222     if (icount_enabled()) {
223         ret = icount_to_ns(value);
224     } else {
225         ret = (NANOSECONDS_PER_SECOND / RISCV_TIMEBASE_FREQ) * value;
226     }
227 
228     return ret;
229 }
230 
231 int riscv_pmu_update_event_map(CPURISCVState *env, uint64_t value,
232                                uint32_t ctr_idx)
233 {
234     uint32_t event_idx;
235     RISCVCPU *cpu = RISCV_CPU(env_cpu(env));
236 
237     if (!riscv_pmu_counter_valid(cpu, ctr_idx) || !cpu->pmu_event_ctr_map) {
238         return -1;
239     }
240 
241     /*
242      * Expected mhpmevent value is zero for reset case. Remove the current
243      * mapping.
244      */
245     if (!value) {
246         g_hash_table_foreach_remove(cpu->pmu_event_ctr_map,
247                                     pmu_remove_event_map,
248                                     GUINT_TO_POINTER(ctr_idx));
249         return 0;
250     }
251 
252     event_idx = value & MHPMEVENT_IDX_MASK;
253     if (g_hash_table_lookup(cpu->pmu_event_ctr_map,
254                             GUINT_TO_POINTER(event_idx))) {
255         return 0;
256     }
257 
258     switch (event_idx) {
259     case RISCV_PMU_EVENT_HW_CPU_CYCLES:
260     case RISCV_PMU_EVENT_HW_INSTRUCTIONS:
261     case RISCV_PMU_EVENT_CACHE_DTLB_READ_MISS:
262     case RISCV_PMU_EVENT_CACHE_DTLB_WRITE_MISS:
263     case RISCV_PMU_EVENT_CACHE_ITLB_PREFETCH_MISS:
264         break;
265     default:
266         /* We don't support any raw events right now */
267         return -1;
268     }
269     g_hash_table_insert(cpu->pmu_event_ctr_map, GUINT_TO_POINTER(event_idx),
270                         GUINT_TO_POINTER(ctr_idx));
271 
272     return 0;
273 }
274 
275 static void pmu_timer_trigger_irq(RISCVCPU *cpu,
276                                   enum riscv_pmu_event_idx evt_idx)
277 {
278     uint32_t ctr_idx;
279     CPURISCVState *env = &cpu->env;
280     PMUCTRState *counter;
281     target_ulong *mhpmevent_val;
282     uint64_t of_bit_mask;
283     int64_t irq_trigger_at;
284 
285     if (evt_idx != RISCV_PMU_EVENT_HW_CPU_CYCLES &&
286         evt_idx != RISCV_PMU_EVENT_HW_INSTRUCTIONS) {
287         return;
288     }
289 
290     ctr_idx = GPOINTER_TO_UINT(g_hash_table_lookup(cpu->pmu_event_ctr_map,
291                                GUINT_TO_POINTER(evt_idx)));
292     if (!riscv_pmu_counter_enabled(cpu, ctr_idx)) {
293         return;
294     }
295 
296     if (riscv_cpu_mxl(env) == MXL_RV32) {
297         mhpmevent_val = &env->mhpmeventh_val[ctr_idx];
298         of_bit_mask = MHPMEVENTH_BIT_OF;
299      } else {
300         mhpmevent_val = &env->mhpmevent_val[ctr_idx];
301         of_bit_mask = MHPMEVENT_BIT_OF;
302     }
303 
304     counter = &env->pmu_ctrs[ctr_idx];
305     if (counter->irq_overflow_left > 0) {
306         irq_trigger_at = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) +
307                         counter->irq_overflow_left;
308         timer_mod_anticipate_ns(cpu->pmu_timer, irq_trigger_at);
309         counter->irq_overflow_left = 0;
310         return;
311     }
312 
313     if (cpu->pmu_avail_ctrs & BIT(ctr_idx)) {
314         /* Generate interrupt only if OF bit is clear */
315         if (!(*mhpmevent_val & of_bit_mask)) {
316             *mhpmevent_val |= of_bit_mask;
317             riscv_cpu_update_mip(cpu, MIP_LCOFIP, BOOL_TO_MASK(1));
318         }
319     }
320 }
321 
322 /* Timer callback for instret and cycle counter overflow */
323 void riscv_pmu_timer_cb(void *priv)
324 {
325     RISCVCPU *cpu = priv;
326 
327     /* Timer event was triggered only for these events */
328     pmu_timer_trigger_irq(cpu, RISCV_PMU_EVENT_HW_CPU_CYCLES);
329     pmu_timer_trigger_irq(cpu, RISCV_PMU_EVENT_HW_INSTRUCTIONS);
330 }
331 
332 int riscv_pmu_setup_timer(CPURISCVState *env, uint64_t value, uint32_t ctr_idx)
333 {
334     uint64_t overflow_delta, overflow_at;
335     int64_t overflow_ns, overflow_left = 0;
336     RISCVCPU *cpu = RISCV_CPU(env_cpu(env));
337     PMUCTRState *counter = &env->pmu_ctrs[ctr_idx];
338 
339     if (!riscv_pmu_counter_valid(cpu, ctr_idx) || !cpu->cfg.ext_sscofpmf) {
340         return -1;
341     }
342 
343     if (value) {
344         overflow_delta = UINT64_MAX - value + 1;
345     } else {
346         overflow_delta = UINT64_MAX;
347     }
348 
349     /*
350      * QEMU supports only int64_t timers while RISC-V counters are uint64_t.
351      * Compute the leftover and save it so that it can be reprogrammed again
352      * when timer expires.
353      */
354     if (overflow_delta > INT64_MAX) {
355         overflow_left = overflow_delta - INT64_MAX;
356     }
357 
358     if (riscv_pmu_ctr_monitor_cycles(env, ctr_idx) ||
359         riscv_pmu_ctr_monitor_instructions(env, ctr_idx)) {
360         overflow_ns = pmu_icount_ticks_to_ns((int64_t)overflow_delta);
361         overflow_left = pmu_icount_ticks_to_ns(overflow_left) ;
362     } else {
363         return -1;
364     }
365     overflow_at = (uint64_t)qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) + overflow_ns;
366 
367     if (overflow_at > INT64_MAX) {
368         overflow_left += overflow_at - INT64_MAX;
369         counter->irq_overflow_left = overflow_left;
370         overflow_at = INT64_MAX;
371     }
372     timer_mod_anticipate_ns(cpu->pmu_timer, overflow_at);
373 
374     return 0;
375 }
376 
377 
378 int riscv_pmu_init(RISCVCPU *cpu, int num_counters)
379 {
380     if (num_counters > (RV_MAX_MHPMCOUNTERS - 3)) {
381         return -1;
382     }
383 
384     cpu->pmu_event_ctr_map = g_hash_table_new(g_direct_hash, g_direct_equal);
385     if (!cpu->pmu_event_ctr_map) {
386         /* PMU support can not be enabled */
387         qemu_log_mask(LOG_UNIMP, "PMU events can't be supported\n");
388         cpu->cfg.pmu_num = 0;
389         return -1;
390     }
391 
392     /* Create a bitmask of available programmable counters */
393     cpu->pmu_avail_ctrs = MAKE_32BIT_MASK(3, num_counters);
394 
395     return 0;
396 }
397