xref: /openbmc/qemu/target/riscv/pmu.c (revision 2f95279a)
1 /*
2  * RISC-V PMU file.
3  *
4  * Copyright (c) 2021 Western Digital Corporation or its affiliates.
5  *
6  * This program is free software; you can redistribute it and/or modify it
7  * under the terms and conditions of the GNU General Public License,
8  * version 2 or later, as published by the Free Software Foundation.
9  *
10  * This program is distributed in the hope it will be useful, but WITHOUT
11  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
13  * more details.
14  *
15  * You should have received a copy of the GNU General Public License along with
16  * this program.  If not, see <http://www.gnu.org/licenses/>.
17  */
18 
19 #include "qemu/osdep.h"
20 #include "qemu/log.h"
21 #include "qemu/error-report.h"
22 #include "cpu.h"
23 #include "pmu.h"
24 #include "sysemu/cpu-timers.h"
25 #include "sysemu/device_tree.h"
26 
27 #define RISCV_TIMEBASE_FREQ 1000000000 /* 1Ghz */
28 
29 /*
30  * To keep it simple, any event can be mapped to any programmable counters in
31  * QEMU. The generic cycle & instruction count events can also be monitored
32  * using programmable counters. In that case, mcycle & minstret must continue
33  * to provide the correct value as well. Heterogeneous PMU per hart is not
34  * supported yet. Thus, number of counters are same across all harts.
35  */
36 void riscv_pmu_generate_fdt_node(void *fdt, uint32_t cmask, char *pmu_name)
37 {
38     uint32_t fdt_event_ctr_map[15] = {};
39 
40    /*
41     * The event encoding is specified in the SBI specification
42     * Event idx is a 20bits wide number encoded as follows:
43     * event_idx[19:16] = type
44     * event_idx[15:0] = code
45     * The code field in cache events are encoded as follows:
46     * event_idx.code[15:3] = cache_id
47     * event_idx.code[2:1] = op_id
48     * event_idx.code[0:0] = result_id
49     */
50 
51    /* SBI_PMU_HW_CPU_CYCLES: 0x01 : type(0x00) */
52    fdt_event_ctr_map[0] = cpu_to_be32(0x00000001);
53    fdt_event_ctr_map[1] = cpu_to_be32(0x00000001);
54    fdt_event_ctr_map[2] = cpu_to_be32(cmask | 1 << 0);
55 
56    /* SBI_PMU_HW_INSTRUCTIONS: 0x02 : type(0x00) */
57    fdt_event_ctr_map[3] = cpu_to_be32(0x00000002);
58    fdt_event_ctr_map[4] = cpu_to_be32(0x00000002);
59    fdt_event_ctr_map[5] = cpu_to_be32(cmask | 1 << 2);
60 
61    /* SBI_PMU_HW_CACHE_DTLB : 0x03 READ : 0x00 MISS : 0x00 type(0x01) */
62    fdt_event_ctr_map[6] = cpu_to_be32(0x00010019);
63    fdt_event_ctr_map[7] = cpu_to_be32(0x00010019);
64    fdt_event_ctr_map[8] = cpu_to_be32(cmask);
65 
66    /* SBI_PMU_HW_CACHE_DTLB : 0x03 WRITE : 0x01 MISS : 0x00 type(0x01) */
67    fdt_event_ctr_map[9] = cpu_to_be32(0x0001001B);
68    fdt_event_ctr_map[10] = cpu_to_be32(0x0001001B);
69    fdt_event_ctr_map[11] = cpu_to_be32(cmask);
70 
71    /* SBI_PMU_HW_CACHE_ITLB : 0x04 READ : 0x00 MISS : 0x00 type(0x01) */
72    fdt_event_ctr_map[12] = cpu_to_be32(0x00010021);
73    fdt_event_ctr_map[13] = cpu_to_be32(0x00010021);
74    fdt_event_ctr_map[14] = cpu_to_be32(cmask);
75 
76    /* This a OpenSBI specific DT property documented in OpenSBI docs */
77    qemu_fdt_setprop(fdt, pmu_name, "riscv,event-to-mhpmcounters",
78                     fdt_event_ctr_map, sizeof(fdt_event_ctr_map));
79 }
80 
81 static bool riscv_pmu_counter_valid(RISCVCPU *cpu, uint32_t ctr_idx)
82 {
83     if (ctr_idx < 3 || ctr_idx >= RV_MAX_MHPMCOUNTERS ||
84         !(cpu->pmu_avail_ctrs & BIT(ctr_idx))) {
85         return false;
86     } else {
87         return true;
88     }
89 }
90 
91 static bool riscv_pmu_counter_enabled(RISCVCPU *cpu, uint32_t ctr_idx)
92 {
93     CPURISCVState *env = &cpu->env;
94 
95     if (riscv_pmu_counter_valid(cpu, ctr_idx) &&
96         !get_field(env->mcountinhibit, BIT(ctr_idx))) {
97         return true;
98     } else {
99         return false;
100     }
101 }
102 
103 static int riscv_pmu_incr_ctr_rv32(RISCVCPU *cpu, uint32_t ctr_idx)
104 {
105     CPURISCVState *env = &cpu->env;
106     target_ulong max_val = UINT32_MAX;
107     PMUCTRState *counter = &env->pmu_ctrs[ctr_idx];
108     bool virt_on = env->virt_enabled;
109 
110     /* Privilege mode filtering */
111     if ((env->priv == PRV_M &&
112         (env->mhpmeventh_val[ctr_idx] & MHPMEVENTH_BIT_MINH)) ||
113         (env->priv == PRV_S && virt_on &&
114         (env->mhpmeventh_val[ctr_idx] & MHPMEVENTH_BIT_VSINH)) ||
115         (env->priv == PRV_U && virt_on &&
116         (env->mhpmeventh_val[ctr_idx] & MHPMEVENTH_BIT_VUINH)) ||
117         (env->priv == PRV_S && !virt_on &&
118         (env->mhpmeventh_val[ctr_idx] & MHPMEVENTH_BIT_SINH)) ||
119         (env->priv == PRV_U && !virt_on &&
120         (env->mhpmeventh_val[ctr_idx] & MHPMEVENTH_BIT_UINH))) {
121         return 0;
122     }
123 
124     /* Handle the overflow scenario */
125     if (counter->mhpmcounter_val == max_val) {
126         if (counter->mhpmcounterh_val == max_val) {
127             counter->mhpmcounter_val = 0;
128             counter->mhpmcounterh_val = 0;
129             /* Generate interrupt only if OF bit is clear */
130             if (!(env->mhpmeventh_val[ctr_idx] & MHPMEVENTH_BIT_OF)) {
131                 env->mhpmeventh_val[ctr_idx] |= MHPMEVENTH_BIT_OF;
132                 riscv_cpu_update_mip(env, MIP_LCOFIP, BOOL_TO_MASK(1));
133             }
134         } else {
135             counter->mhpmcounterh_val++;
136         }
137     } else {
138         counter->mhpmcounter_val++;
139     }
140 
141     return 0;
142 }
143 
144 static int riscv_pmu_incr_ctr_rv64(RISCVCPU *cpu, uint32_t ctr_idx)
145 {
146     CPURISCVState *env = &cpu->env;
147     PMUCTRState *counter = &env->pmu_ctrs[ctr_idx];
148     uint64_t max_val = UINT64_MAX;
149     bool virt_on = env->virt_enabled;
150 
151     /* Privilege mode filtering */
152     if ((env->priv == PRV_M &&
153         (env->mhpmevent_val[ctr_idx] & MHPMEVENT_BIT_MINH)) ||
154         (env->priv == PRV_S && virt_on &&
155         (env->mhpmevent_val[ctr_idx] & MHPMEVENT_BIT_VSINH)) ||
156         (env->priv == PRV_U && virt_on &&
157         (env->mhpmevent_val[ctr_idx] & MHPMEVENT_BIT_VUINH)) ||
158         (env->priv == PRV_S && !virt_on &&
159         (env->mhpmevent_val[ctr_idx] & MHPMEVENT_BIT_SINH)) ||
160         (env->priv == PRV_U && !virt_on &&
161         (env->mhpmevent_val[ctr_idx] & MHPMEVENT_BIT_UINH))) {
162         return 0;
163     }
164 
165     /* Handle the overflow scenario */
166     if (counter->mhpmcounter_val == max_val) {
167         counter->mhpmcounter_val = 0;
168         /* Generate interrupt only if OF bit is clear */
169         if (!(env->mhpmevent_val[ctr_idx] & MHPMEVENT_BIT_OF)) {
170             env->mhpmevent_val[ctr_idx] |= MHPMEVENT_BIT_OF;
171             riscv_cpu_update_mip(env, MIP_LCOFIP, BOOL_TO_MASK(1));
172         }
173     } else {
174         counter->mhpmcounter_val++;
175     }
176     return 0;
177 }
178 
179 int riscv_pmu_incr_ctr(RISCVCPU *cpu, enum riscv_pmu_event_idx event_idx)
180 {
181     uint32_t ctr_idx;
182     int ret;
183     CPURISCVState *env = &cpu->env;
184     gpointer value;
185 
186     if (!cpu->cfg.pmu_mask) {
187         return 0;
188     }
189     value = g_hash_table_lookup(cpu->pmu_event_ctr_map,
190                                 GUINT_TO_POINTER(event_idx));
191     if (!value) {
192         return -1;
193     }
194 
195     ctr_idx = GPOINTER_TO_UINT(value);
196     if (!riscv_pmu_counter_enabled(cpu, ctr_idx) ||
197         get_field(env->mcountinhibit, BIT(ctr_idx))) {
198         return -1;
199     }
200 
201     if (riscv_cpu_mxl(env) == MXL_RV32) {
202         ret = riscv_pmu_incr_ctr_rv32(cpu, ctr_idx);
203     } else {
204         ret = riscv_pmu_incr_ctr_rv64(cpu, ctr_idx);
205     }
206 
207     return ret;
208 }
209 
210 bool riscv_pmu_ctr_monitor_instructions(CPURISCVState *env,
211                                         uint32_t target_ctr)
212 {
213     RISCVCPU *cpu;
214     uint32_t event_idx;
215     uint32_t ctr_idx;
216 
217     /* Fixed instret counter */
218     if (target_ctr == 2) {
219         return true;
220     }
221 
222     cpu = env_archcpu(env);
223     if (!cpu->pmu_event_ctr_map) {
224         return false;
225     }
226 
227     event_idx = RISCV_PMU_EVENT_HW_INSTRUCTIONS;
228     ctr_idx = GPOINTER_TO_UINT(g_hash_table_lookup(cpu->pmu_event_ctr_map,
229                                GUINT_TO_POINTER(event_idx)));
230     if (!ctr_idx) {
231         return false;
232     }
233 
234     return target_ctr == ctr_idx ? true : false;
235 }
236 
237 bool riscv_pmu_ctr_monitor_cycles(CPURISCVState *env, uint32_t target_ctr)
238 {
239     RISCVCPU *cpu;
240     uint32_t event_idx;
241     uint32_t ctr_idx;
242 
243     /* Fixed mcycle counter */
244     if (target_ctr == 0) {
245         return true;
246     }
247 
248     cpu = env_archcpu(env);
249     if (!cpu->pmu_event_ctr_map) {
250         return false;
251     }
252 
253     event_idx = RISCV_PMU_EVENT_HW_CPU_CYCLES;
254     ctr_idx = GPOINTER_TO_UINT(g_hash_table_lookup(cpu->pmu_event_ctr_map,
255                                GUINT_TO_POINTER(event_idx)));
256 
257     /* Counter zero is not used for event_ctr_map */
258     if (!ctr_idx) {
259         return false;
260     }
261 
262     return (target_ctr == ctr_idx) ? true : false;
263 }
264 
265 static gboolean pmu_remove_event_map(gpointer key, gpointer value,
266                                      gpointer udata)
267 {
268     return (GPOINTER_TO_UINT(value) == GPOINTER_TO_UINT(udata)) ? true : false;
269 }
270 
271 static int64_t pmu_icount_ticks_to_ns(int64_t value)
272 {
273     int64_t ret = 0;
274 
275     if (icount_enabled()) {
276         ret = icount_to_ns(value);
277     } else {
278         ret = (NANOSECONDS_PER_SECOND / RISCV_TIMEBASE_FREQ) * value;
279     }
280 
281     return ret;
282 }
283 
284 int riscv_pmu_update_event_map(CPURISCVState *env, uint64_t value,
285                                uint32_t ctr_idx)
286 {
287     uint32_t event_idx;
288     RISCVCPU *cpu = env_archcpu(env);
289 
290     if (!riscv_pmu_counter_valid(cpu, ctr_idx) || !cpu->pmu_event_ctr_map) {
291         return -1;
292     }
293 
294     /*
295      * Expected mhpmevent value is zero for reset case. Remove the current
296      * mapping.
297      */
298     if (!value) {
299         g_hash_table_foreach_remove(cpu->pmu_event_ctr_map,
300                                     pmu_remove_event_map,
301                                     GUINT_TO_POINTER(ctr_idx));
302         return 0;
303     }
304 
305     event_idx = value & MHPMEVENT_IDX_MASK;
306     if (g_hash_table_lookup(cpu->pmu_event_ctr_map,
307                             GUINT_TO_POINTER(event_idx))) {
308         return 0;
309     }
310 
311     switch (event_idx) {
312     case RISCV_PMU_EVENT_HW_CPU_CYCLES:
313     case RISCV_PMU_EVENT_HW_INSTRUCTIONS:
314     case RISCV_PMU_EVENT_CACHE_DTLB_READ_MISS:
315     case RISCV_PMU_EVENT_CACHE_DTLB_WRITE_MISS:
316     case RISCV_PMU_EVENT_CACHE_ITLB_PREFETCH_MISS:
317         break;
318     default:
319         /* We don't support any raw events right now */
320         return -1;
321     }
322     g_hash_table_insert(cpu->pmu_event_ctr_map, GUINT_TO_POINTER(event_idx),
323                         GUINT_TO_POINTER(ctr_idx));
324 
325     return 0;
326 }
327 
328 static void pmu_timer_trigger_irq(RISCVCPU *cpu,
329                                   enum riscv_pmu_event_idx evt_idx)
330 {
331     uint32_t ctr_idx;
332     CPURISCVState *env = &cpu->env;
333     PMUCTRState *counter;
334     target_ulong *mhpmevent_val;
335     uint64_t of_bit_mask;
336     int64_t irq_trigger_at;
337 
338     if (evt_idx != RISCV_PMU_EVENT_HW_CPU_CYCLES &&
339         evt_idx != RISCV_PMU_EVENT_HW_INSTRUCTIONS) {
340         return;
341     }
342 
343     ctr_idx = GPOINTER_TO_UINT(g_hash_table_lookup(cpu->pmu_event_ctr_map,
344                                GUINT_TO_POINTER(evt_idx)));
345     if (!riscv_pmu_counter_enabled(cpu, ctr_idx)) {
346         return;
347     }
348 
349     if (riscv_cpu_mxl(env) == MXL_RV32) {
350         mhpmevent_val = &env->mhpmeventh_val[ctr_idx];
351         of_bit_mask = MHPMEVENTH_BIT_OF;
352      } else {
353         mhpmevent_val = &env->mhpmevent_val[ctr_idx];
354         of_bit_mask = MHPMEVENT_BIT_OF;
355     }
356 
357     counter = &env->pmu_ctrs[ctr_idx];
358     if (counter->irq_overflow_left > 0) {
359         irq_trigger_at = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) +
360                         counter->irq_overflow_left;
361         timer_mod_anticipate_ns(cpu->pmu_timer, irq_trigger_at);
362         counter->irq_overflow_left = 0;
363         return;
364     }
365 
366     if (cpu->pmu_avail_ctrs & BIT(ctr_idx)) {
367         /* Generate interrupt only if OF bit is clear */
368         if (!(*mhpmevent_val & of_bit_mask)) {
369             *mhpmevent_val |= of_bit_mask;
370             riscv_cpu_update_mip(env, MIP_LCOFIP, BOOL_TO_MASK(1));
371         }
372     }
373 }
374 
375 /* Timer callback for instret and cycle counter overflow */
376 void riscv_pmu_timer_cb(void *priv)
377 {
378     RISCVCPU *cpu = priv;
379 
380     /* Timer event was triggered only for these events */
381     pmu_timer_trigger_irq(cpu, RISCV_PMU_EVENT_HW_CPU_CYCLES);
382     pmu_timer_trigger_irq(cpu, RISCV_PMU_EVENT_HW_INSTRUCTIONS);
383 }
384 
385 int riscv_pmu_setup_timer(CPURISCVState *env, uint64_t value, uint32_t ctr_idx)
386 {
387     uint64_t overflow_delta, overflow_at;
388     int64_t overflow_ns, overflow_left = 0;
389     RISCVCPU *cpu = env_archcpu(env);
390     PMUCTRState *counter = &env->pmu_ctrs[ctr_idx];
391 
392     if (!riscv_pmu_counter_valid(cpu, ctr_idx) || !cpu->cfg.ext_sscofpmf) {
393         return -1;
394     }
395 
396     if (value) {
397         overflow_delta = UINT64_MAX - value + 1;
398     } else {
399         overflow_delta = UINT64_MAX;
400     }
401 
402     /*
403      * QEMU supports only int64_t timers while RISC-V counters are uint64_t.
404      * Compute the leftover and save it so that it can be reprogrammed again
405      * when timer expires.
406      */
407     if (overflow_delta > INT64_MAX) {
408         overflow_left = overflow_delta - INT64_MAX;
409     }
410 
411     if (riscv_pmu_ctr_monitor_cycles(env, ctr_idx) ||
412         riscv_pmu_ctr_monitor_instructions(env, ctr_idx)) {
413         overflow_ns = pmu_icount_ticks_to_ns((int64_t)overflow_delta);
414         overflow_left = pmu_icount_ticks_to_ns(overflow_left) ;
415     } else {
416         return -1;
417     }
418     overflow_at = (uint64_t)qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) +
419                   overflow_ns;
420 
421     if (overflow_at > INT64_MAX) {
422         overflow_left += overflow_at - INT64_MAX;
423         counter->irq_overflow_left = overflow_left;
424         overflow_at = INT64_MAX;
425     }
426     timer_mod_anticipate_ns(cpu->pmu_timer, overflow_at);
427 
428     return 0;
429 }
430 
431 
432 void riscv_pmu_init(RISCVCPU *cpu, Error **errp)
433 {
434     if (cpu->cfg.pmu_mask & (COUNTEREN_CY | COUNTEREN_TM | COUNTEREN_IR)) {
435         error_setg(errp, "\"pmu-mask\" contains invalid bits (0-2) set");
436         return;
437     }
438 
439     if (ctpop32(cpu->cfg.pmu_mask) > (RV_MAX_MHPMCOUNTERS - 3)) {
440         error_setg(errp, "Number of counters exceeds maximum available");
441         return;
442     }
443 
444     cpu->pmu_event_ctr_map = g_hash_table_new(g_direct_hash, g_direct_equal);
445     if (!cpu->pmu_event_ctr_map) {
446         error_setg(errp, "Unable to allocate PMU event hash table");
447         return;
448     }
449 
450     cpu->pmu_avail_ctrs = cpu->cfg.pmu_mask;
451 }
452