1 /* 2 * QEMU RISC-V CPU -- internal functions and types 3 * 4 * Copyright (c) 2020 T-Head Semiconductor Co., Ltd. All rights reserved. 5 * 6 * This program is free software; you can redistribute it and/or modify it 7 * under the terms and conditions of the GNU General Public License, 8 * version 2 or later, as published by the Free Software Foundation. 9 * 10 * This program is distributed in the hope it will be useful, but WITHOUT 11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 13 * more details. 14 * 15 * You should have received a copy of the GNU General Public License along with 16 * this program. If not, see <http://www.gnu.org/licenses/>. 17 */ 18 19 #ifndef RISCV_CPU_INTERNALS_H 20 #define RISCV_CPU_INTERNALS_H 21 22 #include "hw/registerfields.h" 23 24 /* 25 * The current MMU Modes are: 26 * - U 0b000 27 * - S 0b001 28 * - S+SUM 0b010 29 * - M 0b011 30 * - U+2STAGE 0b100 31 * - S+2STAGE 0b101 32 * - S+SUM+2STAGE 0b110 33 */ 34 #define MMUIdx_U 0 35 #define MMUIdx_S 1 36 #define MMUIdx_S_SUM 2 37 #define MMUIdx_M 3 38 #define MMU_2STAGE_BIT (1 << 2) 39 40 static inline int mmuidx_priv(int mmu_idx) 41 { 42 int ret = mmu_idx & 3; 43 if (ret == MMUIdx_S_SUM) { 44 ret = PRV_S; 45 } 46 return ret; 47 } 48 49 static inline bool mmuidx_sum(int mmu_idx) 50 { 51 return (mmu_idx & 3) == MMUIdx_S_SUM; 52 } 53 54 static inline bool mmuidx_2stage(int mmu_idx) 55 { 56 return mmu_idx & MMU_2STAGE_BIT; 57 } 58 59 /* share data between vector helpers and decode code */ 60 FIELD(VDATA, VM, 0, 1) 61 FIELD(VDATA, LMUL, 1, 3) 62 FIELD(VDATA, VTA, 4, 1) 63 FIELD(VDATA, VTA_ALL_1S, 5, 1) 64 FIELD(VDATA, VMA, 6, 1) 65 FIELD(VDATA, NF, 7, 4) 66 FIELD(VDATA, WD, 7, 1) 67 68 /* float point classify helpers */ 69 target_ulong fclass_h(uint64_t frs1); 70 target_ulong fclass_s(uint64_t frs1); 71 target_ulong fclass_d(uint64_t frs1); 72 73 #ifndef CONFIG_USER_ONLY 74 extern const VMStateDescription vmstate_riscv_cpu; 75 #endif 76 77 enum { 78 RISCV_FRM_RNE = 0, /* Round to Nearest, ties to Even */ 79 RISCV_FRM_RTZ = 1, /* Round towards Zero */ 80 RISCV_FRM_RDN = 2, /* Round Down */ 81 RISCV_FRM_RUP = 3, /* Round Up */ 82 RISCV_FRM_RMM = 4, /* Round to Nearest, ties to Max Magnitude */ 83 RISCV_FRM_DYN = 7, /* Dynamic rounding mode */ 84 RISCV_FRM_ROD = 8, /* Round to Odd */ 85 }; 86 87 static inline uint64_t nanbox_s(CPURISCVState *env, float32 f) 88 { 89 /* the value is sign-extended instead of NaN-boxing for zfinx */ 90 if (env_archcpu(env)->cfg.ext_zfinx) { 91 return (int32_t)f; 92 } else { 93 return f | MAKE_64BIT_MASK(32, 32); 94 } 95 } 96 97 static inline float32 check_nanbox_s(CPURISCVState *env, uint64_t f) 98 { 99 /* Disable NaN-boxing check when enable zfinx */ 100 if (env_archcpu(env)->cfg.ext_zfinx) { 101 return (uint32_t)f; 102 } 103 104 uint64_t mask = MAKE_64BIT_MASK(32, 32); 105 106 if (likely((f & mask) == mask)) { 107 return (uint32_t)f; 108 } else { 109 return 0x7fc00000u; /* default qnan */ 110 } 111 } 112 113 static inline uint64_t nanbox_h(CPURISCVState *env, float16 f) 114 { 115 /* the value is sign-extended instead of NaN-boxing for zfinx */ 116 if (env_archcpu(env)->cfg.ext_zfinx) { 117 return (int16_t)f; 118 } else { 119 return f | MAKE_64BIT_MASK(16, 48); 120 } 121 } 122 123 static inline float16 check_nanbox_h(CPURISCVState *env, uint64_t f) 124 { 125 /* Disable nanbox check when enable zfinx */ 126 if (env_archcpu(env)->cfg.ext_zfinx) { 127 return (uint16_t)f; 128 } 129 130 uint64_t mask = MAKE_64BIT_MASK(16, 48); 131 132 if (likely((f & mask) == mask)) { 133 return (uint16_t)f; 134 } else { 135 return 0x7E00u; /* default qnan */ 136 } 137 } 138 139 /* Our implementation of CPUClass::has_work */ 140 bool riscv_cpu_has_work(CPUState *cs); 141 142 #endif 143