xref: /openbmc/qemu/target/riscv/cpu.h (revision 74b97760)
1 /*
2  * QEMU RISC-V CPU
3  *
4  * Copyright (c) 2016-2017 Sagar Karandikar, sagark@eecs.berkeley.edu
5  * Copyright (c) 2017-2018 SiFive, Inc.
6  *
7  * This program is free software; you can redistribute it and/or modify it
8  * under the terms and conditions of the GNU General Public License,
9  * version 2 or later, as published by the Free Software Foundation.
10  *
11  * This program is distributed in the hope it will be useful, but WITHOUT
12  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
14  * more details.
15  *
16  * You should have received a copy of the GNU General Public License along with
17  * this program.  If not, see <http://www.gnu.org/licenses/>.
18  */
19 
20 #ifndef RISCV_CPU_H
21 #define RISCV_CPU_H
22 
23 #include "hw/core/cpu.h"
24 #include "hw/registerfields.h"
25 #include "exec/cpu-defs.h"
26 #include "fpu/softfloat-types.h"
27 #include "qom/object.h"
28 
29 #define TCG_GUEST_DEFAULT_MO 0
30 
31 #define TYPE_RISCV_CPU "riscv-cpu"
32 
33 #define RISCV_CPU_TYPE_SUFFIX "-" TYPE_RISCV_CPU
34 #define RISCV_CPU_TYPE_NAME(name) (name RISCV_CPU_TYPE_SUFFIX)
35 #define CPU_RESOLVING_TYPE TYPE_RISCV_CPU
36 
37 #define TYPE_RISCV_CPU_ANY              RISCV_CPU_TYPE_NAME("any")
38 #define TYPE_RISCV_CPU_BASE32           RISCV_CPU_TYPE_NAME("rv32")
39 #define TYPE_RISCV_CPU_BASE64           RISCV_CPU_TYPE_NAME("rv64")
40 #define TYPE_RISCV_CPU_IBEX             RISCV_CPU_TYPE_NAME("lowrisc-ibex")
41 #define TYPE_RISCV_CPU_SIFIVE_E31       RISCV_CPU_TYPE_NAME("sifive-e31")
42 #define TYPE_RISCV_CPU_SIFIVE_E34       RISCV_CPU_TYPE_NAME("sifive-e34")
43 #define TYPE_RISCV_CPU_SIFIVE_E51       RISCV_CPU_TYPE_NAME("sifive-e51")
44 #define TYPE_RISCV_CPU_SIFIVE_U34       RISCV_CPU_TYPE_NAME("sifive-u34")
45 #define TYPE_RISCV_CPU_SIFIVE_U54       RISCV_CPU_TYPE_NAME("sifive-u54")
46 
47 #define RV32 ((target_ulong)1 << (TARGET_LONG_BITS - 2))
48 #define RV64 ((target_ulong)2 << (TARGET_LONG_BITS - 2))
49 
50 #if defined(TARGET_RISCV32)
51 #define RVXLEN RV32
52 #elif defined(TARGET_RISCV64)
53 #define RVXLEN RV64
54 #endif
55 
56 #define RV(x) ((target_ulong)1 << (x - 'A'))
57 
58 #define RVI RV('I')
59 #define RVE RV('E') /* E and I are mutually exclusive */
60 #define RVM RV('M')
61 #define RVA RV('A')
62 #define RVF RV('F')
63 #define RVD RV('D')
64 #define RVV RV('V')
65 #define RVC RV('C')
66 #define RVS RV('S')
67 #define RVU RV('U')
68 #define RVH RV('H')
69 
70 /* S extension denotes that Supervisor mode exists, however it is possible
71    to have a core that support S mode but does not have an MMU and there
72    is currently no bit in misa to indicate whether an MMU exists or not
73    so a cpu features bitfield is required, likewise for optional PMP support */
74 enum {
75     RISCV_FEATURE_MMU,
76     RISCV_FEATURE_PMP,
77     RISCV_FEATURE_MISA
78 };
79 
80 #define PRIV_VERSION_1_10_0 0x00011000
81 #define PRIV_VERSION_1_11_0 0x00011100
82 
83 #define VEXT_VERSION_0_07_1 0x00000701
84 
85 enum {
86     TRANSLATE_SUCCESS,
87     TRANSLATE_FAIL,
88     TRANSLATE_PMP_FAIL,
89     TRANSLATE_G_STAGE_FAIL
90 };
91 
92 #define MMU_USER_IDX 3
93 
94 #define MAX_RISCV_PMPS (16)
95 
96 typedef struct CPURISCVState CPURISCVState;
97 
98 #include "pmp.h"
99 
100 #define RV_VLEN_MAX 256
101 
102 FIELD(VTYPE, VLMUL, 0, 2)
103 FIELD(VTYPE, VSEW, 2, 3)
104 FIELD(VTYPE, VEDIV, 5, 2)
105 FIELD(VTYPE, RESERVED, 7, sizeof(target_ulong) * 8 - 9)
106 FIELD(VTYPE, VILL, sizeof(target_ulong) * 8 - 1, 1)
107 
108 struct CPURISCVState {
109     target_ulong gpr[32];
110     uint64_t fpr[32]; /* assume both F and D extensions */
111 
112     /* vector coprocessor state. */
113     uint64_t vreg[32 * RV_VLEN_MAX / 64] QEMU_ALIGNED(16);
114     target_ulong vxrm;
115     target_ulong vxsat;
116     target_ulong vl;
117     target_ulong vstart;
118     target_ulong vtype;
119 
120     target_ulong pc;
121     target_ulong load_res;
122     target_ulong load_val;
123 
124     target_ulong frm;
125 
126     target_ulong badaddr;
127     target_ulong guest_phys_fault_addr;
128 
129     target_ulong priv_ver;
130     target_ulong vext_ver;
131     target_ulong misa;
132     target_ulong misa_mask;
133 
134     uint32_t features;
135 
136 #ifdef CONFIG_USER_ONLY
137     uint32_t elf_flags;
138 #endif
139 
140 #ifndef CONFIG_USER_ONLY
141     target_ulong priv;
142     /* This contains QEMU specific information about the virt state. */
143     target_ulong virt;
144     target_ulong resetvec;
145 
146     target_ulong mhartid;
147     /*
148      * For RV32 this is 32-bit mstatus and 32-bit mstatush.
149      * For RV64 this is a 64-bit mstatus.
150      */
151     uint64_t mstatus;
152 
153     target_ulong mip;
154 
155     uint32_t miclaim;
156 
157     target_ulong mie;
158     target_ulong mideleg;
159 
160     target_ulong sptbr;  /* until: priv-1.9.1 */
161     target_ulong satp;   /* since: priv-1.10.0 */
162     target_ulong sbadaddr;
163     target_ulong mbadaddr;
164     target_ulong medeleg;
165 
166     target_ulong stvec;
167     target_ulong sepc;
168     target_ulong scause;
169 
170     target_ulong mtvec;
171     target_ulong mepc;
172     target_ulong mcause;
173     target_ulong mtval;  /* since: priv-1.10.0 */
174 
175     /* Hypervisor CSRs */
176     target_ulong hstatus;
177     target_ulong hedeleg;
178     target_ulong hideleg;
179     target_ulong hcounteren;
180     target_ulong htval;
181     target_ulong htinst;
182     target_ulong hgatp;
183     uint64_t htimedelta;
184 
185     /* Virtual CSRs */
186     /*
187      * For RV32 this is 32-bit vsstatus and 32-bit vsstatush.
188      * For RV64 this is a 64-bit vsstatus.
189      */
190     uint64_t vsstatus;
191     target_ulong vstvec;
192     target_ulong vsscratch;
193     target_ulong vsepc;
194     target_ulong vscause;
195     target_ulong vstval;
196     target_ulong vsatp;
197 
198     target_ulong mtval2;
199     target_ulong mtinst;
200 
201     /* HS Backup CSRs */
202     target_ulong stvec_hs;
203     target_ulong sscratch_hs;
204     target_ulong sepc_hs;
205     target_ulong scause_hs;
206     target_ulong stval_hs;
207     target_ulong satp_hs;
208     uint64_t mstatus_hs;
209 
210     target_ulong scounteren;
211     target_ulong mcounteren;
212 
213     target_ulong sscratch;
214     target_ulong mscratch;
215 
216     /* temporary htif regs */
217     uint64_t mfromhost;
218     uint64_t mtohost;
219     uint64_t timecmp;
220 
221     /* physical memory protection */
222     pmp_table_t pmp_state;
223 
224     /* machine specific rdtime callback */
225     uint64_t (*rdtime_fn)(uint32_t);
226     uint32_t rdtime_fn_arg;
227 
228     /* True if in debugger mode.  */
229     bool debugger;
230 #endif
231 
232     float_status fp_status;
233 
234     /* Fields from here on are preserved across CPU reset. */
235     QEMUTimer *timer; /* Internal timer */
236 };
237 
238 OBJECT_DECLARE_TYPE(RISCVCPU, RISCVCPUClass,
239                     RISCV_CPU)
240 
241 /**
242  * RISCVCPUClass:
243  * @parent_realize: The parent class' realize handler.
244  * @parent_reset: The parent class' reset handler.
245  *
246  * A RISCV CPU model.
247  */
248 struct RISCVCPUClass {
249     /*< private >*/
250     CPUClass parent_class;
251     /*< public >*/
252     DeviceRealize parent_realize;
253     DeviceReset parent_reset;
254 };
255 
256 /**
257  * RISCVCPU:
258  * @env: #CPURISCVState
259  *
260  * A RISCV CPU.
261  */
262 struct RISCVCPU {
263     /*< private >*/
264     CPUState parent_obj;
265     /*< public >*/
266     CPUNegativeOffsetState neg;
267     CPURISCVState env;
268 
269     /* Configuration Settings */
270     struct {
271         bool ext_i;
272         bool ext_e;
273         bool ext_g;
274         bool ext_m;
275         bool ext_a;
276         bool ext_f;
277         bool ext_d;
278         bool ext_c;
279         bool ext_s;
280         bool ext_u;
281         bool ext_h;
282         bool ext_v;
283         bool ext_counters;
284         bool ext_ifencei;
285         bool ext_icsr;
286 
287         char *priv_spec;
288         char *user_spec;
289         char *vext_spec;
290         uint16_t vlen;
291         uint16_t elen;
292         bool mmu;
293         bool pmp;
294         uint64_t resetvec;
295     } cfg;
296 };
297 
298 static inline int riscv_has_ext(CPURISCVState *env, target_ulong ext)
299 {
300     return (env->misa & ext) != 0;
301 }
302 
303 static inline bool riscv_feature(CPURISCVState *env, int feature)
304 {
305     return env->features & (1ULL << feature);
306 }
307 
308 #include "cpu_user.h"
309 #include "cpu_bits.h"
310 
311 extern const char * const riscv_int_regnames[];
312 extern const char * const riscv_fpr_regnames[];
313 extern const char * const riscv_excp_names[];
314 extern const char * const riscv_intr_names[];
315 
316 const char *riscv_cpu_get_trap_name(target_ulong cause, bool async);
317 void riscv_cpu_do_interrupt(CPUState *cpu);
318 int riscv_cpu_gdb_read_register(CPUState *cpu, GByteArray *buf, int reg);
319 int riscv_cpu_gdb_write_register(CPUState *cpu, uint8_t *buf, int reg);
320 bool riscv_cpu_exec_interrupt(CPUState *cs, int interrupt_request);
321 bool riscv_cpu_fp_enabled(CPURISCVState *env);
322 bool riscv_cpu_virt_enabled(CPURISCVState *env);
323 void riscv_cpu_set_virt_enabled(CPURISCVState *env, bool enable);
324 bool riscv_cpu_force_hs_excep_enabled(CPURISCVState *env);
325 void riscv_cpu_set_force_hs_excep(CPURISCVState *env, bool enable);
326 bool riscv_cpu_two_stage_lookup(int mmu_idx);
327 int riscv_cpu_mmu_index(CPURISCVState *env, bool ifetch);
328 hwaddr riscv_cpu_get_phys_page_debug(CPUState *cpu, vaddr addr);
329 void  riscv_cpu_do_unaligned_access(CPUState *cs, vaddr addr,
330                                     MMUAccessType access_type, int mmu_idx,
331                                     uintptr_t retaddr);
332 bool riscv_cpu_tlb_fill(CPUState *cs, vaddr address, int size,
333                         MMUAccessType access_type, int mmu_idx,
334                         bool probe, uintptr_t retaddr);
335 void riscv_cpu_do_transaction_failed(CPUState *cs, hwaddr physaddr,
336                                      vaddr addr, unsigned size,
337                                      MMUAccessType access_type,
338                                      int mmu_idx, MemTxAttrs attrs,
339                                      MemTxResult response, uintptr_t retaddr);
340 char *riscv_isa_string(RISCVCPU *cpu);
341 void riscv_cpu_list(void);
342 
343 #define cpu_signal_handler riscv_cpu_signal_handler
344 #define cpu_list riscv_cpu_list
345 #define cpu_mmu_index riscv_cpu_mmu_index
346 
347 #ifndef CONFIG_USER_ONLY
348 void riscv_cpu_swap_hypervisor_regs(CPURISCVState *env);
349 int riscv_cpu_claim_interrupts(RISCVCPU *cpu, uint32_t interrupts);
350 uint32_t riscv_cpu_update_mip(RISCVCPU *cpu, uint32_t mask, uint32_t value);
351 #define BOOL_TO_MASK(x) (-!!(x)) /* helper for riscv_cpu_update_mip value */
352 void riscv_cpu_set_rdtime_fn(CPURISCVState *env, uint64_t (*fn)(uint32_t),
353                              uint32_t arg);
354 #endif
355 void riscv_cpu_set_mode(CPURISCVState *env, target_ulong newpriv);
356 
357 void riscv_translate_init(void);
358 int riscv_cpu_signal_handler(int host_signum, void *pinfo, void *puc);
359 void QEMU_NORETURN riscv_raise_exception(CPURISCVState *env,
360                                          uint32_t exception, uintptr_t pc);
361 
362 target_ulong riscv_cpu_get_fflags(CPURISCVState *env);
363 void riscv_cpu_set_fflags(CPURISCVState *env, target_ulong);
364 
365 #define TB_FLAGS_MMU_MASK   7
366 #define TB_FLAGS_PRIV_MMU_MASK                3
367 #define TB_FLAGS_PRIV_HYP_ACCESS_MASK   (1 << 2)
368 #define TB_FLAGS_MSTATUS_FS MSTATUS_FS
369 
370 typedef CPURISCVState CPUArchState;
371 typedef RISCVCPU ArchCPU;
372 #include "exec/cpu-all.h"
373 
374 FIELD(TB_FLAGS, VL_EQ_VLMAX, 2, 1)
375 FIELD(TB_FLAGS, LMUL, 3, 2)
376 FIELD(TB_FLAGS, SEW, 5, 3)
377 FIELD(TB_FLAGS, VILL, 8, 1)
378 /* Is a Hypervisor instruction load/store allowed? */
379 FIELD(TB_FLAGS, HLSX, 9, 1)
380 
381 /*
382  * A simplification for VLMAX
383  * = (1 << LMUL) * VLEN / (8 * (1 << SEW))
384  * = (VLEN << LMUL) / (8 << SEW)
385  * = (VLEN << LMUL) >> (SEW + 3)
386  * = VLEN >> (SEW + 3 - LMUL)
387  */
388 static inline uint32_t vext_get_vlmax(RISCVCPU *cpu, target_ulong vtype)
389 {
390     uint8_t sew, lmul;
391 
392     sew = FIELD_EX64(vtype, VTYPE, VSEW);
393     lmul = FIELD_EX64(vtype, VTYPE, VLMUL);
394     return cpu->cfg.vlen >> (sew + 3 - lmul);
395 }
396 
397 static inline void cpu_get_tb_cpu_state(CPURISCVState *env, target_ulong *pc,
398                                         target_ulong *cs_base, uint32_t *pflags)
399 {
400     uint32_t flags = 0;
401 
402     *pc = env->pc;
403     *cs_base = 0;
404 
405     if (riscv_has_ext(env, RVV)) {
406         uint32_t vlmax = vext_get_vlmax(env_archcpu(env), env->vtype);
407         bool vl_eq_vlmax = (env->vstart == 0) && (vlmax == env->vl);
408         flags = FIELD_DP32(flags, TB_FLAGS, VILL,
409                     FIELD_EX64(env->vtype, VTYPE, VILL));
410         flags = FIELD_DP32(flags, TB_FLAGS, SEW,
411                     FIELD_EX64(env->vtype, VTYPE, VSEW));
412         flags = FIELD_DP32(flags, TB_FLAGS, LMUL,
413                     FIELD_EX64(env->vtype, VTYPE, VLMUL));
414         flags = FIELD_DP32(flags, TB_FLAGS, VL_EQ_VLMAX, vl_eq_vlmax);
415     } else {
416         flags = FIELD_DP32(flags, TB_FLAGS, VILL, 1);
417     }
418 
419 #ifdef CONFIG_USER_ONLY
420     flags |= TB_FLAGS_MSTATUS_FS;
421 #else
422     flags |= cpu_mmu_index(env, 0);
423     if (riscv_cpu_fp_enabled(env)) {
424         flags |= env->mstatus & MSTATUS_FS;
425     }
426 
427     if (riscv_has_ext(env, RVH)) {
428         if (env->priv == PRV_M ||
429             (env->priv == PRV_S && !riscv_cpu_virt_enabled(env)) ||
430             (env->priv == PRV_U && !riscv_cpu_virt_enabled(env) &&
431                 get_field(env->hstatus, HSTATUS_HU))) {
432             flags = FIELD_DP32(flags, TB_FLAGS, HLSX, 1);
433         }
434     }
435 #endif
436 
437     *pflags = flags;
438 }
439 
440 int riscv_csrrw(CPURISCVState *env, int csrno, target_ulong *ret_value,
441                 target_ulong new_value, target_ulong write_mask);
442 int riscv_csrrw_debug(CPURISCVState *env, int csrno, target_ulong *ret_value,
443                       target_ulong new_value, target_ulong write_mask);
444 
445 static inline void riscv_csr_write(CPURISCVState *env, int csrno,
446                                    target_ulong val)
447 {
448     riscv_csrrw(env, csrno, NULL, val, MAKE_64BIT_MASK(0, TARGET_LONG_BITS));
449 }
450 
451 static inline target_ulong riscv_csr_read(CPURISCVState *env, int csrno)
452 {
453     target_ulong val = 0;
454     riscv_csrrw(env, csrno, &val, 0, 0);
455     return val;
456 }
457 
458 typedef int (*riscv_csr_predicate_fn)(CPURISCVState *env, int csrno);
459 typedef int (*riscv_csr_read_fn)(CPURISCVState *env, int csrno,
460     target_ulong *ret_value);
461 typedef int (*riscv_csr_write_fn)(CPURISCVState *env, int csrno,
462     target_ulong new_value);
463 typedef int (*riscv_csr_op_fn)(CPURISCVState *env, int csrno,
464     target_ulong *ret_value, target_ulong new_value, target_ulong write_mask);
465 
466 typedef struct {
467     riscv_csr_predicate_fn predicate;
468     riscv_csr_read_fn read;
469     riscv_csr_write_fn write;
470     riscv_csr_op_fn op;
471 } riscv_csr_operations;
472 
473 void riscv_get_csr_ops(int csrno, riscv_csr_operations *ops);
474 void riscv_set_csr_ops(int csrno, riscv_csr_operations *ops);
475 
476 void riscv_cpu_register_gdb_regs_for_features(CPUState *cs);
477 
478 #endif /* RISCV_CPU_H */
479