xref: /openbmc/qemu/target/riscv/internals.h (revision 340b5805)
1 /*
2  * QEMU RISC-V CPU -- internal functions and types
3  *
4  * Copyright (c) 2020 T-Head Semiconductor Co., Ltd. All rights reserved.
5  *
6  * This program is free software; you can redistribute it and/or modify it
7  * under the terms and conditions of the GNU General Public License,
8  * version 2 or later, as published by the Free Software Foundation.
9  *
10  * This program is distributed in the hope it will be useful, but WITHOUT
11  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
13  * more details.
14  *
15  * You should have received a copy of the GNU General Public License along with
16  * this program.  If not, see <http://www.gnu.org/licenses/>.
17  */
18 
19 #ifndef RISCV_CPU_INTERNALS_H
20 #define RISCV_CPU_INTERNALS_H
21 
22 #include "hw/registerfields.h"
23 
24 /*
25  * The current MMU Modes are:
26  *  - U                 0b000
27  *  - S                 0b001
28  *  - S+SUM             0b010
29  *  - M                 0b011
30  *  - U+2STAGE          0b100
31  *  - S+2STAGE          0b101
32  *  - S+SUM+2STAGE      0b110
33  */
34 #define MMUIdx_U            0
35 #define MMUIdx_S            1
36 #define MMUIdx_S_SUM        2
37 #define MMUIdx_M            3
38 #define MMU_2STAGE_BIT      (1 << 2)
39 
40 static inline int mmuidx_priv(int mmu_idx)
41 {
42     int ret = mmu_idx & 3;
43     if (ret == MMUIdx_S_SUM) {
44         ret = PRV_S;
45     }
46     return ret;
47 }
48 
49 static inline bool mmuidx_sum(int mmu_idx)
50 {
51     return (mmu_idx & 3) == MMUIdx_S_SUM;
52 }
53 
54 /* share data between vector helpers and decode code */
55 FIELD(VDATA, VM, 0, 1)
56 FIELD(VDATA, LMUL, 1, 3)
57 FIELD(VDATA, VTA, 4, 1)
58 FIELD(VDATA, VTA_ALL_1S, 5, 1)
59 FIELD(VDATA, VMA, 6, 1)
60 FIELD(VDATA, NF, 7, 4)
61 FIELD(VDATA, WD, 7, 1)
62 
63 /* float point classify helpers */
64 target_ulong fclass_h(uint64_t frs1);
65 target_ulong fclass_s(uint64_t frs1);
66 target_ulong fclass_d(uint64_t frs1);
67 
68 #ifndef CONFIG_USER_ONLY
69 extern const VMStateDescription vmstate_riscv_cpu;
70 #endif
71 
72 enum {
73     RISCV_FRM_RNE = 0,  /* Round to Nearest, ties to Even */
74     RISCV_FRM_RTZ = 1,  /* Round towards Zero */
75     RISCV_FRM_RDN = 2,  /* Round Down */
76     RISCV_FRM_RUP = 3,  /* Round Up */
77     RISCV_FRM_RMM = 4,  /* Round to Nearest, ties to Max Magnitude */
78     RISCV_FRM_DYN = 7,  /* Dynamic rounding mode */
79     RISCV_FRM_ROD = 8,  /* Round to Odd */
80 };
81 
82 static inline uint64_t nanbox_s(CPURISCVState *env, float32 f)
83 {
84     /* the value is sign-extended instead of NaN-boxing for zfinx */
85     if (RISCV_CPU(env_cpu(env))->cfg.ext_zfinx) {
86         return (int32_t)f;
87     } else {
88         return f | MAKE_64BIT_MASK(32, 32);
89     }
90 }
91 
92 static inline float32 check_nanbox_s(CPURISCVState *env, uint64_t f)
93 {
94     /* Disable NaN-boxing check when enable zfinx */
95     if (RISCV_CPU(env_cpu(env))->cfg.ext_zfinx) {
96         return (uint32_t)f;
97     }
98 
99     uint64_t mask = MAKE_64BIT_MASK(32, 32);
100 
101     if (likely((f & mask) == mask)) {
102         return (uint32_t)f;
103     } else {
104         return 0x7fc00000u; /* default qnan */
105     }
106 }
107 
108 static inline uint64_t nanbox_h(CPURISCVState *env, float16 f)
109 {
110     /* the value is sign-extended instead of NaN-boxing for zfinx */
111     if (RISCV_CPU(env_cpu(env))->cfg.ext_zfinx) {
112         return (int16_t)f;
113     } else {
114         return f | MAKE_64BIT_MASK(16, 48);
115     }
116 }
117 
118 static inline float16 check_nanbox_h(CPURISCVState *env, uint64_t f)
119 {
120     /* Disable nanbox check when enable zfinx */
121     if (RISCV_CPU(env_cpu(env))->cfg.ext_zfinx) {
122         return (uint16_t)f;
123     }
124 
125     uint64_t mask = MAKE_64BIT_MASK(16, 48);
126 
127     if (likely((f & mask) == mask)) {
128         return (uint16_t)f;
129     } else {
130         return 0x7E00u; /* default qnan */
131     }
132 }
133 
134 #endif
135