1 /* 2 * QEMU RISC-V PMP (Physical Memory Protection) 3 * 4 * Author: Daire McNamara, daire.mcnamara@emdalo.com 5 * Ivan Griffin, ivan.griffin@emdalo.com 6 * 7 * This provides a RISC-V Physical Memory Protection implementation 8 * 9 * This program is free software; you can redistribute it and/or modify it 10 * under the terms and conditions of the GNU General Public License, 11 * version 2 or later, as published by the Free Software Foundation. 12 * 13 * This program is distributed in the hope it will be useful, but WITHOUT 14 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 15 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 16 * more details. 17 * 18 * You should have received a copy of the GNU General Public License along with 19 * this program. If not, see <http://www.gnu.org/licenses/>. 20 */ 21 22 /* 23 * PMP (Physical Memory Protection) is as-of-yet unused and needs testing. 24 */ 25 26 #include "qemu/osdep.h" 27 #include "qemu/log.h" 28 #include "qapi/error.h" 29 #include "cpu.h" 30 31 #define RISCV_DEBUG_PMP 0 32 #define PMP_DEBUG(fmt, ...) \ 33 do { \ 34 if (RISCV_DEBUG_PMP) { \ 35 qemu_log_mask(LOG_TRACE, "%s: " fmt "\n", __func__, ##__VA_ARGS__);\ 36 } \ 37 } while (0) 38 39 static void pmp_write_cfg(CPURISCVState *env, uint32_t addr_index, 40 uint8_t val); 41 static uint8_t pmp_read_cfg(CPURISCVState *env, uint32_t addr_index); 42 static void pmp_update_rule(CPURISCVState *env, uint32_t pmp_index); 43 44 /* 45 * Accessor method to extract address matching type 'a field' from cfg reg 46 */ 47 static inline uint8_t pmp_get_a_field(uint8_t cfg) 48 { 49 uint8_t a = cfg >> 3; 50 return a & 0x3; 51 } 52 53 /* 54 * Check whether a PMP is locked or not. 55 */ 56 static inline int pmp_is_locked(CPURISCVState *env, uint32_t pmp_index) 57 { 58 59 if (env->pmp_state.pmp[pmp_index].cfg_reg & PMP_LOCK) { 60 return 1; 61 } 62 63 /* Top PMP has no 'next' to check */ 64 if ((pmp_index + 1u) >= MAX_RISCV_PMPS) { 65 return 0; 66 } 67 68 /* In TOR mode, need to check the lock bit of the next pmp 69 * (if there is a next) 70 */ 71 const uint8_t a_field = 72 pmp_get_a_field(env->pmp_state.pmp[pmp_index + 1].cfg_reg); 73 if ((env->pmp_state.pmp[pmp_index + 1u].cfg_reg & PMP_LOCK) && 74 (PMP_AMATCH_TOR == a_field)) { 75 return 1; 76 } 77 78 return 0; 79 } 80 81 /* 82 * Count the number of active rules. 83 */ 84 static inline uint32_t pmp_get_num_rules(CPURISCVState *env) 85 { 86 return env->pmp_state.num_rules; 87 } 88 89 /* 90 * Accessor to get the cfg reg for a specific PMP/HART 91 */ 92 static inline uint8_t pmp_read_cfg(CPURISCVState *env, uint32_t pmp_index) 93 { 94 if (pmp_index < MAX_RISCV_PMPS) { 95 return env->pmp_state.pmp[pmp_index].cfg_reg; 96 } 97 98 return 0; 99 } 100 101 102 /* 103 * Accessor to set the cfg reg for a specific PMP/HART 104 * Bounds checks and relevant lock bit. 105 */ 106 static void pmp_write_cfg(CPURISCVState *env, uint32_t pmp_index, uint8_t val) 107 { 108 if (pmp_index < MAX_RISCV_PMPS) { 109 if (!pmp_is_locked(env, pmp_index)) { 110 env->pmp_state.pmp[pmp_index].cfg_reg = val; 111 pmp_update_rule(env, pmp_index); 112 } else { 113 qemu_log_mask(LOG_GUEST_ERROR, "ignoring pmpcfg write - locked\n"); 114 } 115 } else { 116 qemu_log_mask(LOG_GUEST_ERROR, 117 "ignoring pmpcfg write - out of bounds\n"); 118 } 119 } 120 121 static void pmp_decode_napot(target_ulong a, target_ulong *sa, target_ulong *ea) 122 { 123 /* 124 aaaa...aaa0 8-byte NAPOT range 125 aaaa...aa01 16-byte NAPOT range 126 aaaa...a011 32-byte NAPOT range 127 ... 128 aa01...1111 2^XLEN-byte NAPOT range 129 a011...1111 2^(XLEN+1)-byte NAPOT range 130 0111...1111 2^(XLEN+2)-byte NAPOT range 131 1111...1111 Reserved 132 */ 133 if (a == -1) { 134 *sa = 0u; 135 *ea = -1; 136 return; 137 } else { 138 target_ulong t1 = ctz64(~a); 139 target_ulong base = (a & ~(((target_ulong)1 << t1) - 1)) << 2; 140 target_ulong range = ((target_ulong)1 << (t1 + 3)) - 1; 141 *sa = base; 142 *ea = base + range; 143 } 144 } 145 146 147 /* Convert cfg/addr reg values here into simple 'sa' --> start address and 'ea' 148 * end address values. 149 * This function is called relatively infrequently whereas the check that 150 * an address is within a pmp rule is called often, so optimise that one 151 */ 152 static void pmp_update_rule(CPURISCVState *env, uint32_t pmp_index) 153 { 154 int i; 155 156 env->pmp_state.num_rules = 0; 157 158 uint8_t this_cfg = env->pmp_state.pmp[pmp_index].cfg_reg; 159 target_ulong this_addr = env->pmp_state.pmp[pmp_index].addr_reg; 160 target_ulong prev_addr = 0u; 161 target_ulong sa = 0u; 162 target_ulong ea = 0u; 163 164 if (pmp_index >= 1u) { 165 prev_addr = env->pmp_state.pmp[pmp_index - 1].addr_reg; 166 } 167 168 switch (pmp_get_a_field(this_cfg)) { 169 case PMP_AMATCH_OFF: 170 sa = 0u; 171 ea = -1; 172 break; 173 174 case PMP_AMATCH_TOR: 175 sa = prev_addr << 2; /* shift up from [xx:0] to [xx+2:2] */ 176 ea = (this_addr << 2) - 1u; 177 break; 178 179 case PMP_AMATCH_NA4: 180 sa = this_addr << 2; /* shift up from [xx:0] to [xx+2:2] */ 181 ea = (this_addr + 4u) - 1u; 182 break; 183 184 case PMP_AMATCH_NAPOT: 185 pmp_decode_napot(this_addr, &sa, &ea); 186 break; 187 188 default: 189 sa = 0u; 190 ea = 0u; 191 break; 192 } 193 194 env->pmp_state.addr[pmp_index].sa = sa; 195 env->pmp_state.addr[pmp_index].ea = ea; 196 197 for (i = 0; i < MAX_RISCV_PMPS; i++) { 198 const uint8_t a_field = 199 pmp_get_a_field(env->pmp_state.pmp[i].cfg_reg); 200 if (PMP_AMATCH_OFF != a_field) { 201 env->pmp_state.num_rules++; 202 } 203 } 204 } 205 206 static int pmp_is_in_range(CPURISCVState *env, int pmp_index, target_ulong addr) 207 { 208 int result = 0; 209 210 if ((addr >= env->pmp_state.addr[pmp_index].sa) 211 && (addr <= env->pmp_state.addr[pmp_index].ea)) { 212 result = 1; 213 } else { 214 result = 0; 215 } 216 217 return result; 218 } 219 220 221 /* 222 * Public Interface 223 */ 224 225 /* 226 * Check if the address has required RWX privs to complete desired operation 227 */ 228 bool pmp_hart_has_privs(CPURISCVState *env, target_ulong addr, 229 target_ulong size, pmp_priv_t privs, target_ulong mode) 230 { 231 int i = 0; 232 int ret = -1; 233 target_ulong s = 0; 234 target_ulong e = 0; 235 pmp_priv_t allowed_privs = 0; 236 237 /* Short cut if no rules */ 238 if (0 == pmp_get_num_rules(env)) { 239 return true; 240 } 241 242 /* 1.10 draft priv spec states there is an implicit order 243 from low to high */ 244 for (i = 0; i < MAX_RISCV_PMPS; i++) { 245 s = pmp_is_in_range(env, i, addr); 246 e = pmp_is_in_range(env, i, addr + size - 1); 247 248 /* partially inside */ 249 if ((s + e) == 1) { 250 qemu_log_mask(LOG_GUEST_ERROR, 251 "pmp violation - access is partially inside\n"); 252 ret = 0; 253 break; 254 } 255 256 /* fully inside */ 257 const uint8_t a_field = 258 pmp_get_a_field(env->pmp_state.pmp[i].cfg_reg); 259 260 /* 261 * If the PMP entry is not off and the address is in range, do the priv 262 * check 263 */ 264 if (((s + e) == 2) && (PMP_AMATCH_OFF != a_field)) { 265 allowed_privs = PMP_READ | PMP_WRITE | PMP_EXEC; 266 if ((mode != PRV_M) || pmp_is_locked(env, i)) { 267 allowed_privs &= env->pmp_state.pmp[i].cfg_reg; 268 } 269 270 if ((privs & allowed_privs) == privs) { 271 ret = 1; 272 break; 273 } else { 274 ret = 0; 275 break; 276 } 277 } 278 } 279 280 /* No rule matched */ 281 if (ret == -1) { 282 if (mode == PRV_M) { 283 ret = 1; /* Privileged spec v1.10 states if no PMP entry matches an 284 * M-Mode access, the access succeeds */ 285 } else { 286 ret = 0; /* Other modes are not allowed to succeed if they don't 287 * match a rule, but there are rules. We've checked for 288 * no rule earlier in this function. */ 289 } 290 } 291 292 return ret == 1 ? true : false; 293 } 294 295 296 /* 297 * Handle a write to a pmpcfg CSP 298 */ 299 void pmpcfg_csr_write(CPURISCVState *env, uint32_t reg_index, 300 target_ulong val) 301 { 302 int i; 303 uint8_t cfg_val; 304 305 PMP_DEBUG("hart " TARGET_FMT_ld ": reg%d, val: 0x" TARGET_FMT_lx, 306 env->mhartid, reg_index, val); 307 308 if ((reg_index & 1) && (sizeof(target_ulong) == 8)) { 309 qemu_log_mask(LOG_GUEST_ERROR, 310 "ignoring pmpcfg write - incorrect address\n"); 311 return; 312 } 313 314 for (i = 0; i < sizeof(target_ulong); i++) { 315 cfg_val = (val >> 8 * i) & 0xff; 316 pmp_write_cfg(env, (reg_index * sizeof(target_ulong)) + i, 317 cfg_val); 318 } 319 } 320 321 322 /* 323 * Handle a read from a pmpcfg CSP 324 */ 325 target_ulong pmpcfg_csr_read(CPURISCVState *env, uint32_t reg_index) 326 { 327 int i; 328 target_ulong cfg_val = 0; 329 target_ulong val = 0; 330 331 for (i = 0; i < sizeof(target_ulong); i++) { 332 val = pmp_read_cfg(env, (reg_index * sizeof(target_ulong)) + i); 333 cfg_val |= (val << (i * 8)); 334 } 335 336 PMP_DEBUG("hart " TARGET_FMT_ld ": reg%d, val: 0x" TARGET_FMT_lx, 337 env->mhartid, reg_index, cfg_val); 338 339 return cfg_val; 340 } 341 342 343 /* 344 * Handle a write to a pmpaddr CSP 345 */ 346 void pmpaddr_csr_write(CPURISCVState *env, uint32_t addr_index, 347 target_ulong val) 348 { 349 PMP_DEBUG("hart " TARGET_FMT_ld ": addr%d, val: 0x" TARGET_FMT_lx, 350 env->mhartid, addr_index, val); 351 352 if (addr_index < MAX_RISCV_PMPS) { 353 if (!pmp_is_locked(env, addr_index)) { 354 env->pmp_state.pmp[addr_index].addr_reg = val; 355 pmp_update_rule(env, addr_index); 356 } else { 357 qemu_log_mask(LOG_GUEST_ERROR, 358 "ignoring pmpaddr write - locked\n"); 359 } 360 } else { 361 qemu_log_mask(LOG_GUEST_ERROR, 362 "ignoring pmpaddr write - out of bounds\n"); 363 } 364 } 365 366 367 /* 368 * Handle a read from a pmpaddr CSP 369 */ 370 target_ulong pmpaddr_csr_read(CPURISCVState *env, uint32_t addr_index) 371 { 372 PMP_DEBUG("hart " TARGET_FMT_ld ": addr%d, val: 0x" TARGET_FMT_lx, 373 env->mhartid, addr_index, 374 env->pmp_state.pmp[addr_index].addr_reg); 375 if (addr_index < MAX_RISCV_PMPS) { 376 return env->pmp_state.pmp[addr_index].addr_reg; 377 } else { 378 qemu_log_mask(LOG_GUEST_ERROR, 379 "ignoring pmpaddr read - out of bounds\n"); 380 return 0; 381 } 382 } 383