1 /* 2 * QEMU RISC-V CPU -- internal functions and types 3 * 4 * Copyright (c) 2020 T-Head Semiconductor Co., Ltd. All rights reserved. 5 * 6 * This program is free software; you can redistribute it and/or modify it 7 * under the terms and conditions of the GNU General Public License, 8 * version 2 or later, as published by the Free Software Foundation. 9 * 10 * This program is distributed in the hope it will be useful, but WITHOUT 11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 13 * more details. 14 * 15 * You should have received a copy of the GNU General Public License along with 16 * this program. If not, see <http://www.gnu.org/licenses/>. 17 */ 18 19 #ifndef RISCV_CPU_INTERNALS_H 20 #define RISCV_CPU_INTERNALS_H 21 22 #include "exec/cpu-common.h" 23 #include "hw/registerfields.h" 24 #include "fpu/softfloat-types.h" 25 #include "target/riscv/cpu_bits.h" 26 27 /* 28 * The current MMU Modes are: 29 * - U 0b000 30 * - S 0b001 31 * - S+SUM 0b010 32 * - M 0b011 33 * - U+2STAGE 0b100 34 * - S+2STAGE 0b101 35 * - S+SUM+2STAGE 0b110 36 * - Shadow stack+U 0b1000 37 * - Shadow stack+S 0b1001 38 */ 39 #define MMUIdx_U 0 40 #define MMUIdx_S 1 41 #define MMUIdx_S_SUM 2 42 #define MMUIdx_M 3 43 #define MMU_2STAGE_BIT (1 << 2) 44 #define MMU_IDX_SS_WRITE (1 << 3) 45 46 static inline int mmuidx_priv(int mmu_idx) 47 { 48 int ret = mmu_idx & 3; 49 if (ret == MMUIdx_S_SUM) { 50 ret = PRV_S; 51 } 52 return ret; 53 } 54 55 static inline bool mmuidx_sum(int mmu_idx) 56 { 57 return (mmu_idx & 3) == MMUIdx_S_SUM; 58 } 59 60 static inline bool mmuidx_2stage(int mmu_idx) 61 { 62 return mmu_idx & MMU_2STAGE_BIT; 63 } 64 65 /* share data between vector helpers and decode code */ 66 FIELD(VDATA, VM, 0, 1) 67 FIELD(VDATA, LMUL, 1, 3) 68 FIELD(VDATA, VTA, 4, 1) 69 FIELD(VDATA, VTA_ALL_1S, 5, 1) 70 FIELD(VDATA, VMA, 6, 1) 71 FIELD(VDATA, NF, 7, 4) 72 FIELD(VDATA, WD, 7, 1) 73 74 /* float point classify helpers */ 75 target_ulong fclass_h(uint64_t frs1); 76 target_ulong fclass_s(uint64_t frs1); 77 target_ulong fclass_d(uint64_t frs1); 78 79 #ifndef CONFIG_USER_ONLY 80 extern const VMStateDescription vmstate_riscv_cpu; 81 #endif 82 83 enum { 84 RISCV_FRM_RNE = 0, /* Round to Nearest, ties to Even */ 85 RISCV_FRM_RTZ = 1, /* Round towards Zero */ 86 RISCV_FRM_RDN = 2, /* Round Down */ 87 RISCV_FRM_RUP = 3, /* Round Up */ 88 RISCV_FRM_RMM = 4, /* Round to Nearest, ties to Max Magnitude */ 89 RISCV_FRM_DYN = 7, /* Dynamic rounding mode */ 90 RISCV_FRM_ROD = 8, /* Round to Odd */ 91 }; 92 93 static inline uint64_t nanbox_s(CPURISCVState *env, float32 f) 94 { 95 /* the value is sign-extended instead of NaN-boxing for zfinx */ 96 if (env_archcpu(env)->cfg.ext_zfinx) { 97 return (int32_t)f; 98 } else { 99 return f | MAKE_64BIT_MASK(32, 32); 100 } 101 } 102 103 static inline float32 check_nanbox_s(CPURISCVState *env, uint64_t f) 104 { 105 /* Disable NaN-boxing check when enable zfinx */ 106 if (env_archcpu(env)->cfg.ext_zfinx) { 107 return (uint32_t)f; 108 } 109 110 uint64_t mask = MAKE_64BIT_MASK(32, 32); 111 112 if (likely((f & mask) == mask)) { 113 return (uint32_t)f; 114 } else { 115 return 0x7fc00000u; /* default qnan */ 116 } 117 } 118 119 static inline uint64_t nanbox_h(CPURISCVState *env, float16 f) 120 { 121 /* the value is sign-extended instead of NaN-boxing for zfinx */ 122 if (env_archcpu(env)->cfg.ext_zfinx) { 123 return (int16_t)f; 124 } else { 125 return f | MAKE_64BIT_MASK(16, 48); 126 } 127 } 128 129 static inline float16 check_nanbox_h(CPURISCVState *env, uint64_t f) 130 { 131 /* Disable nanbox check when enable zfinx */ 132 if (env_archcpu(env)->cfg.ext_zfinx) { 133 return (uint16_t)f; 134 } 135 136 uint64_t mask = MAKE_64BIT_MASK(16, 48); 137 138 if (likely((f & mask) == mask)) { 139 return (uint16_t)f; 140 } else { 141 return 0x7E00u; /* default qnan */ 142 } 143 } 144 145 static inline float16 check_nanbox_bf16(CPURISCVState *env, uint64_t f) 146 { 147 /* Disable nanbox check when enable zfinx */ 148 if (env_archcpu(env)->cfg.ext_zfinx) { 149 return (uint16_t)f; 150 } 151 152 uint64_t mask = MAKE_64BIT_MASK(16, 48); 153 154 if (likely((f & mask) == mask)) { 155 return (uint16_t)f; 156 } else { 157 return 0x7FC0u; /* default qnan */ 158 } 159 } 160 161 #ifndef CONFIG_USER_ONLY 162 /* Our implementation of SysemuCPUOps::has_work */ 163 bool riscv_cpu_has_work(CPUState *cs); 164 #endif 165 166 /* Zjpm addr masking routine */ 167 static inline target_ulong adjust_addr_body(CPURISCVState *env, 168 target_ulong addr, 169 bool is_virt_addr) 170 { 171 RISCVPmPmm pmm = PMM_FIELD_DISABLED; 172 uint32_t pmlen = 0; 173 bool signext = false; 174 175 /* do nothing for rv32 mode */ 176 if (riscv_cpu_mxl(env) == MXL_RV32) { 177 return addr; 178 } 179 180 /* get pmm field depending on whether addr is */ 181 if (is_virt_addr) { 182 pmm = riscv_pm_get_virt_pmm(env); 183 } else { 184 pmm = riscv_pm_get_pmm(env); 185 } 186 187 /* if pointer masking is disabled, return original addr */ 188 if (pmm == PMM_FIELD_DISABLED) { 189 return addr; 190 } 191 192 if (!is_virt_addr) { 193 signext = riscv_cpu_virt_mem_enabled(env); 194 } 195 addr = addr << pmlen; 196 pmlen = riscv_pm_get_pmlen(pmm); 197 198 /* sign/zero extend masked address by N-1 bit */ 199 if (signext) { 200 addr = (target_long)addr >> pmlen; 201 } else { 202 addr = addr >> pmlen; 203 } 204 205 return addr; 206 } 207 208 static inline target_ulong adjust_addr(CPURISCVState *env, 209 target_ulong addr) 210 { 211 return adjust_addr_body(env, addr, false); 212 } 213 214 static inline target_ulong adjust_addr_virt(CPURISCVState *env, 215 target_ulong addr) 216 { 217 return adjust_addr_body(env, addr, true); 218 } 219 220 static inline int insn_len(uint16_t first_word) 221 { 222 return (first_word & 3) == 3 ? 4 : 2; 223 } 224 225 #endif 226