13780e337SAtish Patra /*
23780e337SAtish Patra * RISC-V PMU file.
33780e337SAtish Patra *
43780e337SAtish Patra * Copyright (c) 2021 Western Digital Corporation or its affiliates.
53780e337SAtish Patra *
63780e337SAtish Patra * This program is free software; you can redistribute it and/or modify it
73780e337SAtish Patra * under the terms and conditions of the GNU General Public License,
83780e337SAtish Patra * version 2 or later, as published by the Free Software Foundation.
93780e337SAtish Patra *
103780e337SAtish Patra * This program is distributed in the hope it will be useful, but WITHOUT
113780e337SAtish Patra * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
123780e337SAtish Patra * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
133780e337SAtish Patra * more details.
143780e337SAtish Patra *
153780e337SAtish Patra * You should have received a copy of the GNU General Public License along with
163780e337SAtish Patra * this program. If not, see <http://www.gnu.org/licenses/>.
173780e337SAtish Patra */
183780e337SAtish Patra
193780e337SAtish Patra #include "qemu/osdep.h"
20abaf3e5bSPhilippe Mathieu-Daudé #include "qemu/log.h"
2169b3849bSRob Bradford #include "qemu/error-report.h"
22b2d7a7c7SAtish Patra #include "qemu/timer.h"
233780e337SAtish Patra #include "cpu.h"
243780e337SAtish Patra #include "pmu.h"
2514664483SAtish Patra #include "sysemu/cpu-timers.h"
26abd9a206SAtish Patra #include "sysemu/device_tree.h"
2714664483SAtish Patra
2814664483SAtish Patra #define RISCV_TIMEBASE_FREQ 1000000000 /* 1Ghz */
2914664483SAtish Patra
30abd9a206SAtish Patra /*
31abd9a206SAtish Patra * To keep it simple, any event can be mapped to any programmable counters in
32abd9a206SAtish Patra * QEMU. The generic cycle & instruction count events can also be monitored
33abd9a206SAtish Patra * using programmable counters. In that case, mcycle & minstret must continue
34abd9a206SAtish Patra * to provide the correct value as well. Heterogeneous PMU per hart is not
35abd9a206SAtish Patra * supported yet. Thus, number of counters are same across all harts.
36abd9a206SAtish Patra */
riscv_pmu_generate_fdt_node(void * fdt,uint32_t cmask,char * pmu_name)372571a642SRob Bradford void riscv_pmu_generate_fdt_node(void *fdt, uint32_t cmask, char *pmu_name)
38abd9a206SAtish Patra {
3948249c02SConor Dooley uint32_t fdt_event_ctr_map[15] = {};
40abd9a206SAtish Patra
41abd9a206SAtish Patra /*
42abd9a206SAtish Patra * The event encoding is specified in the SBI specification
43abd9a206SAtish Patra * Event idx is a 20bits wide number encoded as follows:
44abd9a206SAtish Patra * event_idx[19:16] = type
45abd9a206SAtish Patra * event_idx[15:0] = code
46abd9a206SAtish Patra * The code field in cache events are encoded as follows:
47abd9a206SAtish Patra * event_idx.code[15:3] = cache_id
48abd9a206SAtish Patra * event_idx.code[2:1] = op_id
49abd9a206SAtish Patra * event_idx.code[0:0] = result_id
50abd9a206SAtish Patra */
51abd9a206SAtish Patra
52abd9a206SAtish Patra /* SBI_PMU_HW_CPU_CYCLES: 0x01 : type(0x00) */
53abd9a206SAtish Patra fdt_event_ctr_map[0] = cpu_to_be32(0x00000001);
54abd9a206SAtish Patra fdt_event_ctr_map[1] = cpu_to_be32(0x00000001);
55abd9a206SAtish Patra fdt_event_ctr_map[2] = cpu_to_be32(cmask | 1 << 0);
56abd9a206SAtish Patra
57abd9a206SAtish Patra /* SBI_PMU_HW_INSTRUCTIONS: 0x02 : type(0x00) */
58abd9a206SAtish Patra fdt_event_ctr_map[3] = cpu_to_be32(0x00000002);
59abd9a206SAtish Patra fdt_event_ctr_map[4] = cpu_to_be32(0x00000002);
60abd9a206SAtish Patra fdt_event_ctr_map[5] = cpu_to_be32(cmask | 1 << 2);
61abd9a206SAtish Patra
62abd9a206SAtish Patra /* SBI_PMU_HW_CACHE_DTLB : 0x03 READ : 0x00 MISS : 0x00 type(0x01) */
63abd9a206SAtish Patra fdt_event_ctr_map[6] = cpu_to_be32(0x00010019);
64abd9a206SAtish Patra fdt_event_ctr_map[7] = cpu_to_be32(0x00010019);
65abd9a206SAtish Patra fdt_event_ctr_map[8] = cpu_to_be32(cmask);
66abd9a206SAtish Patra
67abd9a206SAtish Patra /* SBI_PMU_HW_CACHE_DTLB : 0x03 WRITE : 0x01 MISS : 0x00 type(0x01) */
68abd9a206SAtish Patra fdt_event_ctr_map[9] = cpu_to_be32(0x0001001B);
69abd9a206SAtish Patra fdt_event_ctr_map[10] = cpu_to_be32(0x0001001B);
70abd9a206SAtish Patra fdt_event_ctr_map[11] = cpu_to_be32(cmask);
71abd9a206SAtish Patra
72abd9a206SAtish Patra /* SBI_PMU_HW_CACHE_ITLB : 0x04 READ : 0x00 MISS : 0x00 type(0x01) */
73abd9a206SAtish Patra fdt_event_ctr_map[12] = cpu_to_be32(0x00010021);
74abd9a206SAtish Patra fdt_event_ctr_map[13] = cpu_to_be32(0x00010021);
75abd9a206SAtish Patra fdt_event_ctr_map[14] = cpu_to_be32(cmask);
76abd9a206SAtish Patra
77abd9a206SAtish Patra /* This a OpenSBI specific DT property documented in OpenSBI docs */
78abd9a206SAtish Patra qemu_fdt_setprop(fdt, pmu_name, "riscv,event-to-mhpmcounters",
79abd9a206SAtish Patra fdt_event_ctr_map, sizeof(fdt_event_ctr_map));
80abd9a206SAtish Patra }
81abd9a206SAtish Patra
riscv_pmu_counter_valid(RISCVCPU * cpu,uint32_t ctr_idx)8214664483SAtish Patra static bool riscv_pmu_counter_valid(RISCVCPU *cpu, uint32_t ctr_idx)
8314664483SAtish Patra {
8414664483SAtish Patra if (ctr_idx < 3 || ctr_idx >= RV_MAX_MHPMCOUNTERS ||
8514664483SAtish Patra !(cpu->pmu_avail_ctrs & BIT(ctr_idx))) {
8614664483SAtish Patra return false;
8714664483SAtish Patra } else {
8814664483SAtish Patra return true;
8914664483SAtish Patra }
9014664483SAtish Patra }
9114664483SAtish Patra
riscv_pmu_counter_enabled(RISCVCPU * cpu,uint32_t ctr_idx)9214664483SAtish Patra static bool riscv_pmu_counter_enabled(RISCVCPU *cpu, uint32_t ctr_idx)
9314664483SAtish Patra {
9414664483SAtish Patra CPURISCVState *env = &cpu->env;
9514664483SAtish Patra
9614664483SAtish Patra if (riscv_pmu_counter_valid(cpu, ctr_idx) &&
9714664483SAtish Patra !get_field(env->mcountinhibit, BIT(ctr_idx))) {
9814664483SAtish Patra return true;
9914664483SAtish Patra } else {
10014664483SAtish Patra return false;
10114664483SAtish Patra }
10214664483SAtish Patra }
10314664483SAtish Patra
riscv_pmu_incr_ctr_rv32(RISCVCPU * cpu,uint32_t ctr_idx)10414664483SAtish Patra static int riscv_pmu_incr_ctr_rv32(RISCVCPU *cpu, uint32_t ctr_idx)
10514664483SAtish Patra {
10614664483SAtish Patra CPURISCVState *env = &cpu->env;
10714664483SAtish Patra target_ulong max_val = UINT32_MAX;
10814664483SAtish Patra PMUCTRState *counter = &env->pmu_ctrs[ctr_idx];
10938256529SWeiwei Li bool virt_on = env->virt_enabled;
11014664483SAtish Patra
11114664483SAtish Patra /* Privilege mode filtering */
11214664483SAtish Patra if ((env->priv == PRV_M &&
11314664483SAtish Patra (env->mhpmeventh_val[ctr_idx] & MHPMEVENTH_BIT_MINH)) ||
11414664483SAtish Patra (env->priv == PRV_S && virt_on &&
11514664483SAtish Patra (env->mhpmeventh_val[ctr_idx] & MHPMEVENTH_BIT_VSINH)) ||
11614664483SAtish Patra (env->priv == PRV_U && virt_on &&
11714664483SAtish Patra (env->mhpmeventh_val[ctr_idx] & MHPMEVENTH_BIT_VUINH)) ||
11814664483SAtish Patra (env->priv == PRV_S && !virt_on &&
11914664483SAtish Patra (env->mhpmeventh_val[ctr_idx] & MHPMEVENTH_BIT_SINH)) ||
12014664483SAtish Patra (env->priv == PRV_U && !virt_on &&
12114664483SAtish Patra (env->mhpmeventh_val[ctr_idx] & MHPMEVENTH_BIT_UINH))) {
12214664483SAtish Patra return 0;
12314664483SAtish Patra }
12414664483SAtish Patra
12514664483SAtish Patra /* Handle the overflow scenario */
12614664483SAtish Patra if (counter->mhpmcounter_val == max_val) {
12714664483SAtish Patra if (counter->mhpmcounterh_val == max_val) {
12814664483SAtish Patra counter->mhpmcounter_val = 0;
12914664483SAtish Patra counter->mhpmcounterh_val = 0;
13014664483SAtish Patra /* Generate interrupt only if OF bit is clear */
13114664483SAtish Patra if (!(env->mhpmeventh_val[ctr_idx] & MHPMEVENTH_BIT_OF)) {
13214664483SAtish Patra env->mhpmeventh_val[ctr_idx] |= MHPMEVENTH_BIT_OF;
133bbb9fc25SWeiwei Li riscv_cpu_update_mip(env, MIP_LCOFIP, BOOL_TO_MASK(1));
13414664483SAtish Patra }
13514664483SAtish Patra } else {
13614664483SAtish Patra counter->mhpmcounterh_val++;
13714664483SAtish Patra }
13814664483SAtish Patra } else {
13914664483SAtish Patra counter->mhpmcounter_val++;
14014664483SAtish Patra }
14114664483SAtish Patra
14214664483SAtish Patra return 0;
14314664483SAtish Patra }
14414664483SAtish Patra
riscv_pmu_incr_ctr_rv64(RISCVCPU * cpu,uint32_t ctr_idx)14514664483SAtish Patra static int riscv_pmu_incr_ctr_rv64(RISCVCPU *cpu, uint32_t ctr_idx)
14614664483SAtish Patra {
14714664483SAtish Patra CPURISCVState *env = &cpu->env;
14814664483SAtish Patra PMUCTRState *counter = &env->pmu_ctrs[ctr_idx];
14914664483SAtish Patra uint64_t max_val = UINT64_MAX;
15038256529SWeiwei Li bool virt_on = env->virt_enabled;
15114664483SAtish Patra
15214664483SAtish Patra /* Privilege mode filtering */
15314664483SAtish Patra if ((env->priv == PRV_M &&
15414664483SAtish Patra (env->mhpmevent_val[ctr_idx] & MHPMEVENT_BIT_MINH)) ||
15514664483SAtish Patra (env->priv == PRV_S && virt_on &&
15614664483SAtish Patra (env->mhpmevent_val[ctr_idx] & MHPMEVENT_BIT_VSINH)) ||
15714664483SAtish Patra (env->priv == PRV_U && virt_on &&
15814664483SAtish Patra (env->mhpmevent_val[ctr_idx] & MHPMEVENT_BIT_VUINH)) ||
15914664483SAtish Patra (env->priv == PRV_S && !virt_on &&
16014664483SAtish Patra (env->mhpmevent_val[ctr_idx] & MHPMEVENT_BIT_SINH)) ||
16114664483SAtish Patra (env->priv == PRV_U && !virt_on &&
16214664483SAtish Patra (env->mhpmevent_val[ctr_idx] & MHPMEVENT_BIT_UINH))) {
16314664483SAtish Patra return 0;
16414664483SAtish Patra }
16514664483SAtish Patra
16614664483SAtish Patra /* Handle the overflow scenario */
16714664483SAtish Patra if (counter->mhpmcounter_val == max_val) {
16814664483SAtish Patra counter->mhpmcounter_val = 0;
16914664483SAtish Patra /* Generate interrupt only if OF bit is clear */
17014664483SAtish Patra if (!(env->mhpmevent_val[ctr_idx] & MHPMEVENT_BIT_OF)) {
17114664483SAtish Patra env->mhpmevent_val[ctr_idx] |= MHPMEVENT_BIT_OF;
172bbb9fc25SWeiwei Li riscv_cpu_update_mip(env, MIP_LCOFIP, BOOL_TO_MASK(1));
17314664483SAtish Patra }
17414664483SAtish Patra } else {
17514664483SAtish Patra counter->mhpmcounter_val++;
17614664483SAtish Patra }
17714664483SAtish Patra return 0;
17814664483SAtish Patra }
17914664483SAtish Patra
180b2d7a7c7SAtish Patra /*
181b2d7a7c7SAtish Patra * Information needed to update counters:
182b2d7a7c7SAtish Patra * new_priv, new_virt: To correctly save starting snapshot for the newly
183b2d7a7c7SAtish Patra * started mode. Look at array being indexed with newprv.
184b2d7a7c7SAtish Patra * old_priv, old_virt: To correctly select previous snapshot for old priv
185b2d7a7c7SAtish Patra * and compute delta. Also to select correct counter
186b2d7a7c7SAtish Patra * to inc. Look at arrays being indexed with env->priv.
187b2d7a7c7SAtish Patra *
188b2d7a7c7SAtish Patra * To avoid the complexity of calling this function, we assume that
189b2d7a7c7SAtish Patra * env->priv and env->virt_enabled contain old priv and old virt and
190b2d7a7c7SAtish Patra * new priv and new virt values are passed in as arguments.
191b2d7a7c7SAtish Patra */
riscv_pmu_icount_update_priv(CPURISCVState * env,target_ulong newpriv,bool new_virt)192b2d7a7c7SAtish Patra static void riscv_pmu_icount_update_priv(CPURISCVState *env,
193b2d7a7c7SAtish Patra target_ulong newpriv, bool new_virt)
194b2d7a7c7SAtish Patra {
195b2d7a7c7SAtish Patra uint64_t *snapshot_prev, *snapshot_new;
196b2d7a7c7SAtish Patra uint64_t current_icount;
197b2d7a7c7SAtish Patra uint64_t *counter_arr;
198b2d7a7c7SAtish Patra uint64_t delta;
199b2d7a7c7SAtish Patra
200b2d7a7c7SAtish Patra if (icount_enabled()) {
201b2d7a7c7SAtish Patra current_icount = icount_get_raw();
202b2d7a7c7SAtish Patra } else {
203b2d7a7c7SAtish Patra current_icount = cpu_get_host_ticks();
204b2d7a7c7SAtish Patra }
205b2d7a7c7SAtish Patra
206b2d7a7c7SAtish Patra if (env->virt_enabled) {
207*73b01954SAtish Patra g_assert(env->priv <= PRV_S);
208b2d7a7c7SAtish Patra counter_arr = env->pmu_fixed_ctrs[1].counter_virt;
209b2d7a7c7SAtish Patra snapshot_prev = env->pmu_fixed_ctrs[1].counter_virt_prev;
210b2d7a7c7SAtish Patra } else {
211b2d7a7c7SAtish Patra counter_arr = env->pmu_fixed_ctrs[1].counter;
212b2d7a7c7SAtish Patra snapshot_prev = env->pmu_fixed_ctrs[1].counter_prev;
213b2d7a7c7SAtish Patra }
214b2d7a7c7SAtish Patra
215b2d7a7c7SAtish Patra if (new_virt) {
216*73b01954SAtish Patra g_assert(newpriv <= PRV_S);
217b2d7a7c7SAtish Patra snapshot_new = env->pmu_fixed_ctrs[1].counter_virt_prev;
218b2d7a7c7SAtish Patra } else {
219b2d7a7c7SAtish Patra snapshot_new = env->pmu_fixed_ctrs[1].counter_prev;
220b2d7a7c7SAtish Patra }
221b2d7a7c7SAtish Patra
222b2d7a7c7SAtish Patra /*
223b2d7a7c7SAtish Patra * new_priv can be same as env->priv. So we need to calculate
224b2d7a7c7SAtish Patra * delta first before updating snapshot_new[new_priv].
225b2d7a7c7SAtish Patra */
226b2d7a7c7SAtish Patra delta = current_icount - snapshot_prev[env->priv];
227b2d7a7c7SAtish Patra snapshot_new[newpriv] = current_icount;
228b2d7a7c7SAtish Patra
229b2d7a7c7SAtish Patra counter_arr[env->priv] += delta;
230b2d7a7c7SAtish Patra }
231b2d7a7c7SAtish Patra
riscv_pmu_cycle_update_priv(CPURISCVState * env,target_ulong newpriv,bool new_virt)232b2d7a7c7SAtish Patra static void riscv_pmu_cycle_update_priv(CPURISCVState *env,
233b2d7a7c7SAtish Patra target_ulong newpriv, bool new_virt)
234b2d7a7c7SAtish Patra {
235b2d7a7c7SAtish Patra uint64_t *snapshot_prev, *snapshot_new;
236b2d7a7c7SAtish Patra uint64_t current_ticks;
237b2d7a7c7SAtish Patra uint64_t *counter_arr;
238b2d7a7c7SAtish Patra uint64_t delta;
239b2d7a7c7SAtish Patra
240b2d7a7c7SAtish Patra if (icount_enabled()) {
241b2d7a7c7SAtish Patra current_ticks = icount_get();
242b2d7a7c7SAtish Patra } else {
243b2d7a7c7SAtish Patra current_ticks = cpu_get_host_ticks();
244b2d7a7c7SAtish Patra }
245b2d7a7c7SAtish Patra
246b2d7a7c7SAtish Patra if (env->virt_enabled) {
247*73b01954SAtish Patra g_assert(env->priv <= PRV_S);
248b2d7a7c7SAtish Patra counter_arr = env->pmu_fixed_ctrs[0].counter_virt;
249b2d7a7c7SAtish Patra snapshot_prev = env->pmu_fixed_ctrs[0].counter_virt_prev;
250b2d7a7c7SAtish Patra } else {
251b2d7a7c7SAtish Patra counter_arr = env->pmu_fixed_ctrs[0].counter;
252b2d7a7c7SAtish Patra snapshot_prev = env->pmu_fixed_ctrs[0].counter_prev;
253b2d7a7c7SAtish Patra }
254b2d7a7c7SAtish Patra
255b2d7a7c7SAtish Patra if (new_virt) {
256*73b01954SAtish Patra g_assert(newpriv <= PRV_S);
257b2d7a7c7SAtish Patra snapshot_new = env->pmu_fixed_ctrs[0].counter_virt_prev;
258b2d7a7c7SAtish Patra } else {
259b2d7a7c7SAtish Patra snapshot_new = env->pmu_fixed_ctrs[0].counter_prev;
260b2d7a7c7SAtish Patra }
261b2d7a7c7SAtish Patra
262b2d7a7c7SAtish Patra delta = current_ticks - snapshot_prev[env->priv];
263b2d7a7c7SAtish Patra snapshot_new[newpriv] = current_ticks;
264b2d7a7c7SAtish Patra
265b2d7a7c7SAtish Patra counter_arr[env->priv] += delta;
266b2d7a7c7SAtish Patra }
267b2d7a7c7SAtish Patra
riscv_pmu_update_fixed_ctrs(CPURISCVState * env,target_ulong newpriv,bool new_virt)268b2d7a7c7SAtish Patra void riscv_pmu_update_fixed_ctrs(CPURISCVState *env, target_ulong newpriv,
269b2d7a7c7SAtish Patra bool new_virt)
270b2d7a7c7SAtish Patra {
271b2d7a7c7SAtish Patra riscv_pmu_cycle_update_priv(env, newpriv, new_virt);
272b2d7a7c7SAtish Patra riscv_pmu_icount_update_priv(env, newpriv, new_virt);
273b2d7a7c7SAtish Patra }
274b2d7a7c7SAtish Patra
riscv_pmu_incr_ctr(RISCVCPU * cpu,enum riscv_pmu_event_idx event_idx)27514664483SAtish Patra int riscv_pmu_incr_ctr(RISCVCPU *cpu, enum riscv_pmu_event_idx event_idx)
27614664483SAtish Patra {
27714664483SAtish Patra uint32_t ctr_idx;
27814664483SAtish Patra int ret;
27914664483SAtish Patra CPURISCVState *env = &cpu->env;
28014664483SAtish Patra gpointer value;
28114664483SAtish Patra
28269b3849bSRob Bradford if (!cpu->cfg.pmu_mask) {
28314664483SAtish Patra return 0;
28414664483SAtish Patra }
28514664483SAtish Patra value = g_hash_table_lookup(cpu->pmu_event_ctr_map,
28614664483SAtish Patra GUINT_TO_POINTER(event_idx));
28714664483SAtish Patra if (!value) {
28814664483SAtish Patra return -1;
28914664483SAtish Patra }
29014664483SAtish Patra
29114664483SAtish Patra ctr_idx = GPOINTER_TO_UINT(value);
29222c721c3SRajnesh Kanwal if (!riscv_pmu_counter_enabled(cpu, ctr_idx)) {
29314664483SAtish Patra return -1;
29414664483SAtish Patra }
29514664483SAtish Patra
29614664483SAtish Patra if (riscv_cpu_mxl(env) == MXL_RV32) {
29714664483SAtish Patra ret = riscv_pmu_incr_ctr_rv32(cpu, ctr_idx);
29814664483SAtish Patra } else {
29914664483SAtish Patra ret = riscv_pmu_incr_ctr_rv64(cpu, ctr_idx);
30014664483SAtish Patra }
30114664483SAtish Patra
30214664483SAtish Patra return ret;
30314664483SAtish Patra }
3043780e337SAtish Patra
riscv_pmu_ctr_monitor_instructions(CPURISCVState * env,uint32_t target_ctr)3053780e337SAtish Patra bool riscv_pmu_ctr_monitor_instructions(CPURISCVState *env,
3063780e337SAtish Patra uint32_t target_ctr)
3073780e337SAtish Patra {
30814664483SAtish Patra RISCVCPU *cpu;
30914664483SAtish Patra uint32_t event_idx;
31014664483SAtish Patra uint32_t ctr_idx;
31114664483SAtish Patra
31214664483SAtish Patra /* Fixed instret counter */
31314664483SAtish Patra if (target_ctr == 2) {
31414664483SAtish Patra return true;
31514664483SAtish Patra }
31614664483SAtish Patra
31799c2f5c4SWeiwei Li cpu = env_archcpu(env);
31814664483SAtish Patra if (!cpu->pmu_event_ctr_map) {
31914664483SAtish Patra return false;
32014664483SAtish Patra }
32114664483SAtish Patra
32214664483SAtish Patra event_idx = RISCV_PMU_EVENT_HW_INSTRUCTIONS;
32314664483SAtish Patra ctr_idx = GPOINTER_TO_UINT(g_hash_table_lookup(cpu->pmu_event_ctr_map,
32414664483SAtish Patra GUINT_TO_POINTER(event_idx)));
32514664483SAtish Patra if (!ctr_idx) {
32614664483SAtish Patra return false;
32714664483SAtish Patra }
32814664483SAtish Patra
32914664483SAtish Patra return target_ctr == ctr_idx ? true : false;
3303780e337SAtish Patra }
3313780e337SAtish Patra
riscv_pmu_ctr_monitor_cycles(CPURISCVState * env,uint32_t target_ctr)3323780e337SAtish Patra bool riscv_pmu_ctr_monitor_cycles(CPURISCVState *env, uint32_t target_ctr)
3333780e337SAtish Patra {
33414664483SAtish Patra RISCVCPU *cpu;
33514664483SAtish Patra uint32_t event_idx;
33614664483SAtish Patra uint32_t ctr_idx;
33714664483SAtish Patra
33814664483SAtish Patra /* Fixed mcycle counter */
33914664483SAtish Patra if (target_ctr == 0) {
34014664483SAtish Patra return true;
34114664483SAtish Patra }
34214664483SAtish Patra
34399c2f5c4SWeiwei Li cpu = env_archcpu(env);
34414664483SAtish Patra if (!cpu->pmu_event_ctr_map) {
34514664483SAtish Patra return false;
34614664483SAtish Patra }
34714664483SAtish Patra
34814664483SAtish Patra event_idx = RISCV_PMU_EVENT_HW_CPU_CYCLES;
34914664483SAtish Patra ctr_idx = GPOINTER_TO_UINT(g_hash_table_lookup(cpu->pmu_event_ctr_map,
35014664483SAtish Patra GUINT_TO_POINTER(event_idx)));
35114664483SAtish Patra
35214664483SAtish Patra /* Counter zero is not used for event_ctr_map */
35314664483SAtish Patra if (!ctr_idx) {
35414664483SAtish Patra return false;
35514664483SAtish Patra }
35614664483SAtish Patra
35714664483SAtish Patra return (target_ctr == ctr_idx) ? true : false;
35814664483SAtish Patra }
35914664483SAtish Patra
pmu_remove_event_map(gpointer key,gpointer value,gpointer udata)36014664483SAtish Patra static gboolean pmu_remove_event_map(gpointer key, gpointer value,
36114664483SAtish Patra gpointer udata)
36214664483SAtish Patra {
36314664483SAtish Patra return (GPOINTER_TO_UINT(value) == GPOINTER_TO_UINT(udata)) ? true : false;
36414664483SAtish Patra }
36514664483SAtish Patra
pmu_icount_ticks_to_ns(int64_t value)36614664483SAtish Patra static int64_t pmu_icount_ticks_to_ns(int64_t value)
36714664483SAtish Patra {
36814664483SAtish Patra int64_t ret = 0;
36914664483SAtish Patra
37014664483SAtish Patra if (icount_enabled()) {
37114664483SAtish Patra ret = icount_to_ns(value);
37214664483SAtish Patra } else {
37314664483SAtish Patra ret = (NANOSECONDS_PER_SECOND / RISCV_TIMEBASE_FREQ) * value;
37414664483SAtish Patra }
37514664483SAtish Patra
37614664483SAtish Patra return ret;
37714664483SAtish Patra }
37814664483SAtish Patra
riscv_pmu_update_event_map(CPURISCVState * env,uint64_t value,uint32_t ctr_idx)37914664483SAtish Patra int riscv_pmu_update_event_map(CPURISCVState *env, uint64_t value,
38014664483SAtish Patra uint32_t ctr_idx)
38114664483SAtish Patra {
38214664483SAtish Patra uint32_t event_idx;
38399c2f5c4SWeiwei Li RISCVCPU *cpu = env_archcpu(env);
38414664483SAtish Patra
38514664483SAtish Patra if (!riscv_pmu_counter_valid(cpu, ctr_idx) || !cpu->pmu_event_ctr_map) {
38614664483SAtish Patra return -1;
38714664483SAtish Patra }
38814664483SAtish Patra
38914664483SAtish Patra /*
39014664483SAtish Patra * Expected mhpmevent value is zero for reset case. Remove the current
39114664483SAtish Patra * mapping.
39214664483SAtish Patra */
39314664483SAtish Patra if (!value) {
39414664483SAtish Patra g_hash_table_foreach_remove(cpu->pmu_event_ctr_map,
39514664483SAtish Patra pmu_remove_event_map,
39614664483SAtish Patra GUINT_TO_POINTER(ctr_idx));
39714664483SAtish Patra return 0;
39814664483SAtish Patra }
39914664483SAtish Patra
40014664483SAtish Patra event_idx = value & MHPMEVENT_IDX_MASK;
40114664483SAtish Patra if (g_hash_table_lookup(cpu->pmu_event_ctr_map,
40214664483SAtish Patra GUINT_TO_POINTER(event_idx))) {
40314664483SAtish Patra return 0;
40414664483SAtish Patra }
40514664483SAtish Patra
40614664483SAtish Patra switch (event_idx) {
40714664483SAtish Patra case RISCV_PMU_EVENT_HW_CPU_CYCLES:
40814664483SAtish Patra case RISCV_PMU_EVENT_HW_INSTRUCTIONS:
40914664483SAtish Patra case RISCV_PMU_EVENT_CACHE_DTLB_READ_MISS:
41014664483SAtish Patra case RISCV_PMU_EVENT_CACHE_DTLB_WRITE_MISS:
41114664483SAtish Patra case RISCV_PMU_EVENT_CACHE_ITLB_PREFETCH_MISS:
41214664483SAtish Patra break;
41314664483SAtish Patra default:
41414664483SAtish Patra /* We don't support any raw events right now */
41514664483SAtish Patra return -1;
41614664483SAtish Patra }
41714664483SAtish Patra g_hash_table_insert(cpu->pmu_event_ctr_map, GUINT_TO_POINTER(event_idx),
41814664483SAtish Patra GUINT_TO_POINTER(ctr_idx));
41914664483SAtish Patra
42014664483SAtish Patra return 0;
42114664483SAtish Patra }
42214664483SAtish Patra
pmu_hpmevent_is_of_set(CPURISCVState * env,uint32_t ctr_idx)423dd4c1236SAtish Patra static bool pmu_hpmevent_is_of_set(CPURISCVState *env, uint32_t ctr_idx)
424dd4c1236SAtish Patra {
425dd4c1236SAtish Patra target_ulong mhpmevent_val;
426dd4c1236SAtish Patra uint64_t of_bit_mask;
427dd4c1236SAtish Patra
428dd4c1236SAtish Patra if (riscv_cpu_mxl(env) == MXL_RV32) {
429dd4c1236SAtish Patra mhpmevent_val = env->mhpmeventh_val[ctr_idx];
430dd4c1236SAtish Patra of_bit_mask = MHPMEVENTH_BIT_OF;
431dd4c1236SAtish Patra } else {
432dd4c1236SAtish Patra mhpmevent_val = env->mhpmevent_val[ctr_idx];
433dd4c1236SAtish Patra of_bit_mask = MHPMEVENT_BIT_OF;
434dd4c1236SAtish Patra }
435dd4c1236SAtish Patra
436dd4c1236SAtish Patra return get_field(mhpmevent_val, of_bit_mask);
437dd4c1236SAtish Patra }
438dd4c1236SAtish Patra
pmu_hpmevent_set_of_if_clear(CPURISCVState * env,uint32_t ctr_idx)439dd4c1236SAtish Patra static bool pmu_hpmevent_set_of_if_clear(CPURISCVState *env, uint32_t ctr_idx)
440dd4c1236SAtish Patra {
441dd4c1236SAtish Patra target_ulong *mhpmevent_val;
442dd4c1236SAtish Patra uint64_t of_bit_mask;
443dd4c1236SAtish Patra
444dd4c1236SAtish Patra if (riscv_cpu_mxl(env) == MXL_RV32) {
445dd4c1236SAtish Patra mhpmevent_val = &env->mhpmeventh_val[ctr_idx];
446dd4c1236SAtish Patra of_bit_mask = MHPMEVENTH_BIT_OF;
447dd4c1236SAtish Patra } else {
448dd4c1236SAtish Patra mhpmevent_val = &env->mhpmevent_val[ctr_idx];
449dd4c1236SAtish Patra of_bit_mask = MHPMEVENT_BIT_OF;
450dd4c1236SAtish Patra }
451dd4c1236SAtish Patra
452dd4c1236SAtish Patra if (!get_field(*mhpmevent_val, of_bit_mask)) {
453dd4c1236SAtish Patra *mhpmevent_val |= of_bit_mask;
454dd4c1236SAtish Patra return true;
455dd4c1236SAtish Patra }
456dd4c1236SAtish Patra
457dd4c1236SAtish Patra return false;
458dd4c1236SAtish Patra }
459dd4c1236SAtish Patra
pmu_timer_trigger_irq(RISCVCPU * cpu,enum riscv_pmu_event_idx evt_idx)46014664483SAtish Patra static void pmu_timer_trigger_irq(RISCVCPU *cpu,
46114664483SAtish Patra enum riscv_pmu_event_idx evt_idx)
46214664483SAtish Patra {
46314664483SAtish Patra uint32_t ctr_idx;
46414664483SAtish Patra CPURISCVState *env = &cpu->env;
46514664483SAtish Patra PMUCTRState *counter;
46614664483SAtish Patra int64_t irq_trigger_at;
46774112400SRajnesh Kanwal uint64_t curr_ctr_val, curr_ctrh_val;
46874112400SRajnesh Kanwal uint64_t ctr_val;
46914664483SAtish Patra
47014664483SAtish Patra if (evt_idx != RISCV_PMU_EVENT_HW_CPU_CYCLES &&
47114664483SAtish Patra evt_idx != RISCV_PMU_EVENT_HW_INSTRUCTIONS) {
47214664483SAtish Patra return;
47314664483SAtish Patra }
47414664483SAtish Patra
47514664483SAtish Patra ctr_idx = GPOINTER_TO_UINT(g_hash_table_lookup(cpu->pmu_event_ctr_map,
47614664483SAtish Patra GUINT_TO_POINTER(evt_idx)));
47714664483SAtish Patra if (!riscv_pmu_counter_enabled(cpu, ctr_idx)) {
47814664483SAtish Patra return;
47914664483SAtish Patra }
48014664483SAtish Patra
481dd4c1236SAtish Patra /* Generate interrupt only if OF bit is clear */
482dd4c1236SAtish Patra if (pmu_hpmevent_is_of_set(env, ctr_idx)) {
483dd4c1236SAtish Patra return;
48414664483SAtish Patra }
48514664483SAtish Patra
48614664483SAtish Patra counter = &env->pmu_ctrs[ctr_idx];
48714664483SAtish Patra if (counter->irq_overflow_left > 0) {
48814664483SAtish Patra irq_trigger_at = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) +
48914664483SAtish Patra counter->irq_overflow_left;
49014664483SAtish Patra timer_mod_anticipate_ns(cpu->pmu_timer, irq_trigger_at);
49114664483SAtish Patra counter->irq_overflow_left = 0;
49214664483SAtish Patra return;
49314664483SAtish Patra }
49414664483SAtish Patra
49574112400SRajnesh Kanwal riscv_pmu_read_ctr(env, (target_ulong *)&curr_ctr_val, false, ctr_idx);
49674112400SRajnesh Kanwal ctr_val = counter->mhpmcounter_val;
49774112400SRajnesh Kanwal if (riscv_cpu_mxl(env) == MXL_RV32) {
49874112400SRajnesh Kanwal riscv_pmu_read_ctr(env, (target_ulong *)&curr_ctrh_val, true, ctr_idx);
49974112400SRajnesh Kanwal curr_ctr_val = curr_ctr_val | (curr_ctrh_val << 32);
50074112400SRajnesh Kanwal ctr_val = ctr_val |
50174112400SRajnesh Kanwal ((uint64_t)counter->mhpmcounterh_val << 32);
50274112400SRajnesh Kanwal }
50374112400SRajnesh Kanwal
50474112400SRajnesh Kanwal /*
50574112400SRajnesh Kanwal * We can not accommodate for inhibited modes when setting up timer. Check
50674112400SRajnesh Kanwal * if the counter has actually overflowed or not by comparing current
50774112400SRajnesh Kanwal * counter value (accommodated for inhibited modes) with software written
50874112400SRajnesh Kanwal * counter value.
50974112400SRajnesh Kanwal */
51074112400SRajnesh Kanwal if (curr_ctr_val >= ctr_val) {
51174112400SRajnesh Kanwal riscv_pmu_setup_timer(env, curr_ctr_val, ctr_idx);
51274112400SRajnesh Kanwal return;
51374112400SRajnesh Kanwal }
51474112400SRajnesh Kanwal
51514664483SAtish Patra if (cpu->pmu_avail_ctrs & BIT(ctr_idx)) {
516dd4c1236SAtish Patra if (pmu_hpmevent_set_of_if_clear(env, ctr_idx)) {
517bbb9fc25SWeiwei Li riscv_cpu_update_mip(env, MIP_LCOFIP, BOOL_TO_MASK(1));
51814664483SAtish Patra }
51914664483SAtish Patra }
52014664483SAtish Patra }
52114664483SAtish Patra
52214664483SAtish Patra /* Timer callback for instret and cycle counter overflow */
riscv_pmu_timer_cb(void * priv)52314664483SAtish Patra void riscv_pmu_timer_cb(void *priv)
52414664483SAtish Patra {
52514664483SAtish Patra RISCVCPU *cpu = priv;
52614664483SAtish Patra
52714664483SAtish Patra /* Timer event was triggered only for these events */
52814664483SAtish Patra pmu_timer_trigger_irq(cpu, RISCV_PMU_EVENT_HW_CPU_CYCLES);
52914664483SAtish Patra pmu_timer_trigger_irq(cpu, RISCV_PMU_EVENT_HW_INSTRUCTIONS);
53014664483SAtish Patra }
53114664483SAtish Patra
riscv_pmu_setup_timer(CPURISCVState * env,uint64_t value,uint32_t ctr_idx)53214664483SAtish Patra int riscv_pmu_setup_timer(CPURISCVState *env, uint64_t value, uint32_t ctr_idx)
53314664483SAtish Patra {
53474112400SRajnesh Kanwal uint64_t overflow_delta, overflow_at, curr_ns;
53514664483SAtish Patra int64_t overflow_ns, overflow_left = 0;
53699c2f5c4SWeiwei Li RISCVCPU *cpu = env_archcpu(env);
53714664483SAtish Patra PMUCTRState *counter = &env->pmu_ctrs[ctr_idx];
53814664483SAtish Patra
539dd4c1236SAtish Patra /* No need to setup a timer if LCOFI is disabled when OF is set */
540dd4c1236SAtish Patra if (!riscv_pmu_counter_valid(cpu, ctr_idx) || !cpu->cfg.ext_sscofpmf ||
541dd4c1236SAtish Patra pmu_hpmevent_is_of_set(env, ctr_idx)) {
54214664483SAtish Patra return -1;
54314664483SAtish Patra }
54414664483SAtish Patra
54514664483SAtish Patra if (value) {
54614664483SAtish Patra overflow_delta = UINT64_MAX - value + 1;
54714664483SAtish Patra } else {
54814664483SAtish Patra overflow_delta = UINT64_MAX;
54914664483SAtish Patra }
55014664483SAtish Patra
55114664483SAtish Patra /*
55214664483SAtish Patra * QEMU supports only int64_t timers while RISC-V counters are uint64_t.
55314664483SAtish Patra * Compute the leftover and save it so that it can be reprogrammed again
55414664483SAtish Patra * when timer expires.
55514664483SAtish Patra */
55614664483SAtish Patra if (overflow_delta > INT64_MAX) {
55714664483SAtish Patra overflow_left = overflow_delta - INT64_MAX;
55814664483SAtish Patra }
55914664483SAtish Patra
56014664483SAtish Patra if (riscv_pmu_ctr_monitor_cycles(env, ctr_idx) ||
56114664483SAtish Patra riscv_pmu_ctr_monitor_instructions(env, ctr_idx)) {
56214664483SAtish Patra overflow_ns = pmu_icount_ticks_to_ns((int64_t)overflow_delta);
56314664483SAtish Patra overflow_left = pmu_icount_ticks_to_ns(overflow_left) ;
56414664483SAtish Patra } else {
56514664483SAtish Patra return -1;
56614664483SAtish Patra }
56774112400SRajnesh Kanwal curr_ns = (uint64_t)qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL);
56874112400SRajnesh Kanwal overflow_at = curr_ns + overflow_ns;
56974112400SRajnesh Kanwal if (overflow_at <= curr_ns)
57074112400SRajnesh Kanwal overflow_at = UINT64_MAX;
57114664483SAtish Patra
57214664483SAtish Patra if (overflow_at > INT64_MAX) {
57314664483SAtish Patra overflow_left += overflow_at - INT64_MAX;
57414664483SAtish Patra counter->irq_overflow_left = overflow_left;
57514664483SAtish Patra overflow_at = INT64_MAX;
57614664483SAtish Patra }
57714664483SAtish Patra timer_mod_anticipate_ns(cpu->pmu_timer, overflow_at);
57814664483SAtish Patra
57914664483SAtish Patra return 0;
58014664483SAtish Patra }
58114664483SAtish Patra
58214664483SAtish Patra
riscv_pmu_init(RISCVCPU * cpu,Error ** errp)583755b41d0SRob Bradford void riscv_pmu_init(RISCVCPU *cpu, Error **errp)
58414664483SAtish Patra {
58569b3849bSRob Bradford if (cpu->cfg.pmu_mask & (COUNTEREN_CY | COUNTEREN_TM | COUNTEREN_IR)) {
58669b3849bSRob Bradford error_setg(errp, "\"pmu-mask\" contains invalid bits (0-2) set");
58769b3849bSRob Bradford return;
58869b3849bSRob Bradford }
589755b41d0SRob Bradford
59069b3849bSRob Bradford if (ctpop32(cpu->cfg.pmu_mask) > (RV_MAX_MHPMCOUNTERS - 3)) {
591755b41d0SRob Bradford error_setg(errp, "Number of counters exceeds maximum available");
592755b41d0SRob Bradford return;
59314664483SAtish Patra }
59414664483SAtish Patra
59514664483SAtish Patra cpu->pmu_event_ctr_map = g_hash_table_new(g_direct_hash, g_direct_equal);
59614664483SAtish Patra if (!cpu->pmu_event_ctr_map) {
597755b41d0SRob Bradford error_setg(errp, "Unable to allocate PMU event hash table");
598755b41d0SRob Bradford return;
59914664483SAtish Patra }
60014664483SAtish Patra
60169b3849bSRob Bradford cpu->pmu_avail_ctrs = cpu->cfg.pmu_mask;
6023780e337SAtish Patra }
603