1 /* 2 * QEMU RISC-V PMP (Physical Memory Protection) 3 * 4 * Author: Daire McNamara, daire.mcnamara@emdalo.com 5 * Ivan Griffin, ivan.griffin@emdalo.com 6 * 7 * This provides a RISC-V Physical Memory Protection implementation 8 * 9 * This program is free software; you can redistribute it and/or modify it 10 * under the terms and conditions of the GNU General Public License, 11 * version 2 or later, as published by the Free Software Foundation. 12 * 13 * This program is distributed in the hope it will be useful, but WITHOUT 14 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 15 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 16 * more details. 17 * 18 * You should have received a copy of the GNU General Public License along with 19 * this program. If not, see <http://www.gnu.org/licenses/>. 20 */ 21 22 #include "qemu/osdep.h" 23 #include "qemu/log.h" 24 #include "qapi/error.h" 25 #include "cpu.h" 26 #include "trace.h" 27 #include "exec/exec-all.h" 28 29 static bool pmp_write_cfg(CPURISCVState *env, uint32_t addr_index, 30 uint8_t val); 31 static uint8_t pmp_read_cfg(CPURISCVState *env, uint32_t addr_index); 32 33 /* 34 * Accessor method to extract address matching type 'a field' from cfg reg 35 */ 36 static inline uint8_t pmp_get_a_field(uint8_t cfg) 37 { 38 uint8_t a = cfg >> 3; 39 return a & 0x3; 40 } 41 42 /* 43 * Check whether a PMP is locked or not. 44 */ 45 static inline int pmp_is_locked(CPURISCVState *env, uint32_t pmp_index) 46 { 47 /* mseccfg.RLB is set */ 48 if (MSECCFG_RLB_ISSET(env)) { 49 return 0; 50 } 51 52 if (env->pmp_state.pmp[pmp_index].cfg_reg & PMP_LOCK) { 53 return 1; 54 } 55 56 /* Top PMP has no 'next' to check */ 57 if ((pmp_index + 1u) >= MAX_RISCV_PMPS) { 58 return 0; 59 } 60 61 return 0; 62 } 63 64 /* 65 * Count the number of active rules. 66 */ 67 uint32_t pmp_get_num_rules(CPURISCVState *env) 68 { 69 return env->pmp_state.num_rules; 70 } 71 72 /* 73 * Accessor to get the cfg reg for a specific PMP/HART 74 */ 75 static inline uint8_t pmp_read_cfg(CPURISCVState *env, uint32_t pmp_index) 76 { 77 if (pmp_index < MAX_RISCV_PMPS) { 78 return env->pmp_state.pmp[pmp_index].cfg_reg; 79 } 80 81 return 0; 82 } 83 84 85 /* 86 * Accessor to set the cfg reg for a specific PMP/HART 87 * Bounds checks and relevant lock bit. 88 */ 89 static bool pmp_write_cfg(CPURISCVState *env, uint32_t pmp_index, uint8_t val) 90 { 91 if (pmp_index < MAX_RISCV_PMPS) { 92 bool locked = true; 93 94 if (riscv_cpu_cfg(env)->epmp) { 95 /* mseccfg.RLB is set */ 96 if (MSECCFG_RLB_ISSET(env)) { 97 locked = false; 98 } 99 100 /* mseccfg.MML is not set */ 101 if (!MSECCFG_MML_ISSET(env) && !pmp_is_locked(env, pmp_index)) { 102 locked = false; 103 } 104 105 /* mseccfg.MML is set */ 106 if (MSECCFG_MML_ISSET(env)) { 107 /* not adding execute bit */ 108 if ((val & PMP_LOCK) != 0 && (val & PMP_EXEC) != PMP_EXEC) { 109 locked = false; 110 } 111 /* shared region and not adding X bit */ 112 if ((val & PMP_LOCK) != PMP_LOCK && 113 (val & 0x7) != (PMP_WRITE | PMP_EXEC)) { 114 locked = false; 115 } 116 } 117 } else { 118 if (!pmp_is_locked(env, pmp_index)) { 119 locked = false; 120 } 121 } 122 123 if (locked) { 124 qemu_log_mask(LOG_GUEST_ERROR, "ignoring pmpcfg write - locked\n"); 125 } else if (env->pmp_state.pmp[pmp_index].cfg_reg != val) { 126 env->pmp_state.pmp[pmp_index].cfg_reg = val; 127 pmp_update_rule_addr(env, pmp_index); 128 return true; 129 } 130 } else { 131 qemu_log_mask(LOG_GUEST_ERROR, 132 "ignoring pmpcfg write - out of bounds\n"); 133 } 134 135 return false; 136 } 137 138 static void pmp_decode_napot(target_ulong a, target_ulong *sa, 139 target_ulong *ea) 140 { 141 /* 142 * aaaa...aaa0 8-byte NAPOT range 143 * aaaa...aa01 16-byte NAPOT range 144 * aaaa...a011 32-byte NAPOT range 145 * ... 146 * aa01...1111 2^XLEN-byte NAPOT range 147 * a011...1111 2^(XLEN+1)-byte NAPOT range 148 * 0111...1111 2^(XLEN+2)-byte NAPOT range 149 * 1111...1111 Reserved 150 */ 151 a = (a << 2) | 0x3; 152 *sa = a & (a + 1); 153 *ea = a | (a + 1); 154 } 155 156 void pmp_update_rule_addr(CPURISCVState *env, uint32_t pmp_index) 157 { 158 uint8_t this_cfg = env->pmp_state.pmp[pmp_index].cfg_reg; 159 target_ulong this_addr = env->pmp_state.pmp[pmp_index].addr_reg; 160 target_ulong prev_addr = 0u; 161 target_ulong sa = 0u; 162 target_ulong ea = 0u; 163 164 if (pmp_index >= 1u) { 165 prev_addr = env->pmp_state.pmp[pmp_index - 1].addr_reg; 166 } 167 168 switch (pmp_get_a_field(this_cfg)) { 169 case PMP_AMATCH_OFF: 170 sa = 0u; 171 ea = -1; 172 break; 173 174 case PMP_AMATCH_TOR: 175 sa = prev_addr << 2; /* shift up from [xx:0] to [xx+2:2] */ 176 ea = (this_addr << 2) - 1u; 177 if (sa > ea) { 178 sa = ea = 0u; 179 } 180 break; 181 182 case PMP_AMATCH_NA4: 183 sa = this_addr << 2; /* shift up from [xx:0] to [xx+2:2] */ 184 ea = (sa + 4u) - 1u; 185 break; 186 187 case PMP_AMATCH_NAPOT: 188 pmp_decode_napot(this_addr, &sa, &ea); 189 break; 190 191 default: 192 sa = 0u; 193 ea = 0u; 194 break; 195 } 196 197 env->pmp_state.addr[pmp_index].sa = sa; 198 env->pmp_state.addr[pmp_index].ea = ea; 199 } 200 201 void pmp_update_rule_nums(CPURISCVState *env) 202 { 203 int i; 204 205 env->pmp_state.num_rules = 0; 206 for (i = 0; i < MAX_RISCV_PMPS; i++) { 207 const uint8_t a_field = 208 pmp_get_a_field(env->pmp_state.pmp[i].cfg_reg); 209 if (PMP_AMATCH_OFF != a_field) { 210 env->pmp_state.num_rules++; 211 } 212 } 213 } 214 215 static int pmp_is_in_range(CPURISCVState *env, int pmp_index, 216 target_ulong addr) 217 { 218 int result = 0; 219 220 if ((addr >= env->pmp_state.addr[pmp_index].sa) && 221 (addr <= env->pmp_state.addr[pmp_index].ea)) { 222 result = 1; 223 } else { 224 result = 0; 225 } 226 227 return result; 228 } 229 230 /* 231 * Check if the address has required RWX privs when no PMP entry is matched. 232 */ 233 static bool pmp_hart_has_privs_default(CPURISCVState *env, pmp_priv_t privs, 234 pmp_priv_t *allowed_privs, 235 target_ulong mode) 236 { 237 bool ret; 238 239 if (MSECCFG_MMWP_ISSET(env)) { 240 /* 241 * The Machine Mode Whitelist Policy (mseccfg.MMWP) is set 242 * so we default to deny all, even for M-mode. 243 */ 244 *allowed_privs = 0; 245 return false; 246 } else if (MSECCFG_MML_ISSET(env)) { 247 /* 248 * The Machine Mode Lockdown (mseccfg.MML) bit is set 249 * so we can only execute code in M-mode with an applicable 250 * rule. Other modes are disabled. 251 */ 252 if (mode == PRV_M && !(privs & PMP_EXEC)) { 253 ret = true; 254 *allowed_privs = PMP_READ | PMP_WRITE; 255 } else { 256 ret = false; 257 *allowed_privs = 0; 258 } 259 260 return ret; 261 } 262 263 if (!riscv_cpu_cfg(env)->pmp || (mode == PRV_M)) { 264 /* 265 * Privileged spec v1.10 states if HW doesn't implement any PMP entry 266 * or no PMP entry matches an M-Mode access, the access succeeds. 267 */ 268 ret = true; 269 *allowed_privs = PMP_READ | PMP_WRITE | PMP_EXEC; 270 } else { 271 /* 272 * Other modes are not allowed to succeed if they don't * match a rule, 273 * but there are rules. We've checked for no rule earlier in this 274 * function. 275 */ 276 ret = false; 277 *allowed_privs = 0; 278 } 279 280 return ret; 281 } 282 283 284 /* 285 * Public Interface 286 */ 287 288 /* 289 * Check if the address has required RWX privs to complete desired operation 290 * Return true if a pmp rule match or default match 291 * Return false if no match 292 */ 293 bool pmp_hart_has_privs(CPURISCVState *env, target_ulong addr, 294 target_ulong size, pmp_priv_t privs, 295 pmp_priv_t *allowed_privs, target_ulong mode) 296 { 297 int i = 0; 298 int pmp_size = 0; 299 target_ulong s = 0; 300 target_ulong e = 0; 301 302 /* Short cut if no rules */ 303 if (0 == pmp_get_num_rules(env)) { 304 return pmp_hart_has_privs_default(env, privs, allowed_privs, mode); 305 } 306 307 if (size == 0) { 308 if (riscv_cpu_cfg(env)->mmu) { 309 /* 310 * If size is unknown (0), assume that all bytes 311 * from addr to the end of the page will be accessed. 312 */ 313 pmp_size = -(addr | TARGET_PAGE_MASK); 314 } else { 315 pmp_size = sizeof(target_ulong); 316 } 317 } else { 318 pmp_size = size; 319 } 320 321 /* 322 * 1.10 draft priv spec states there is an implicit order 323 * from low to high 324 */ 325 for (i = 0; i < MAX_RISCV_PMPS; i++) { 326 s = pmp_is_in_range(env, i, addr); 327 e = pmp_is_in_range(env, i, addr + pmp_size - 1); 328 329 /* partially inside */ 330 if ((s + e) == 1) { 331 qemu_log_mask(LOG_GUEST_ERROR, 332 "pmp violation - access is partially inside\n"); 333 *allowed_privs = 0; 334 return false; 335 } 336 337 /* fully inside */ 338 const uint8_t a_field = 339 pmp_get_a_field(env->pmp_state.pmp[i].cfg_reg); 340 341 /* 342 * Convert the PMP permissions to match the truth table in the 343 * ePMP spec. 344 */ 345 const uint8_t epmp_operation = 346 ((env->pmp_state.pmp[i].cfg_reg & PMP_LOCK) >> 4) | 347 ((env->pmp_state.pmp[i].cfg_reg & PMP_READ) << 2) | 348 (env->pmp_state.pmp[i].cfg_reg & PMP_WRITE) | 349 ((env->pmp_state.pmp[i].cfg_reg & PMP_EXEC) >> 2); 350 351 if (((s + e) == 2) && (PMP_AMATCH_OFF != a_field)) { 352 /* 353 * If the PMP entry is not off and the address is in range, 354 * do the priv check 355 */ 356 if (!MSECCFG_MML_ISSET(env)) { 357 /* 358 * If mseccfg.MML Bit is not set, do pmp priv check 359 * This will always apply to regular PMP. 360 */ 361 *allowed_privs = PMP_READ | PMP_WRITE | PMP_EXEC; 362 if ((mode != PRV_M) || pmp_is_locked(env, i)) { 363 *allowed_privs &= env->pmp_state.pmp[i].cfg_reg; 364 } 365 } else { 366 /* 367 * If mseccfg.MML Bit set, do the enhanced pmp priv check 368 */ 369 if (mode == PRV_M) { 370 switch (epmp_operation) { 371 case 0: 372 case 1: 373 case 4: 374 case 5: 375 case 6: 376 case 7: 377 case 8: 378 *allowed_privs = 0; 379 break; 380 case 2: 381 case 3: 382 case 14: 383 *allowed_privs = PMP_READ | PMP_WRITE; 384 break; 385 case 9: 386 case 10: 387 *allowed_privs = PMP_EXEC; 388 break; 389 case 11: 390 case 13: 391 *allowed_privs = PMP_READ | PMP_EXEC; 392 break; 393 case 12: 394 case 15: 395 *allowed_privs = PMP_READ; 396 break; 397 default: 398 g_assert_not_reached(); 399 } 400 } else { 401 switch (epmp_operation) { 402 case 0: 403 case 8: 404 case 9: 405 case 12: 406 case 13: 407 case 14: 408 *allowed_privs = 0; 409 break; 410 case 1: 411 case 10: 412 case 11: 413 *allowed_privs = PMP_EXEC; 414 break; 415 case 2: 416 case 4: 417 case 15: 418 *allowed_privs = PMP_READ; 419 break; 420 case 3: 421 case 6: 422 *allowed_privs = PMP_READ | PMP_WRITE; 423 break; 424 case 5: 425 *allowed_privs = PMP_READ | PMP_EXEC; 426 break; 427 case 7: 428 *allowed_privs = PMP_READ | PMP_WRITE | PMP_EXEC; 429 break; 430 default: 431 g_assert_not_reached(); 432 } 433 } 434 } 435 436 /* 437 * If matching address range was found, the protection bits 438 * defined with PMP must be used. We shouldn't fallback on 439 * finding default privileges. 440 */ 441 return (privs & *allowed_privs) == privs; 442 } 443 } 444 445 /* No rule matched */ 446 return pmp_hart_has_privs_default(env, privs, allowed_privs, mode); 447 } 448 449 /* 450 * Handle a write to a pmpcfg CSR 451 */ 452 void pmpcfg_csr_write(CPURISCVState *env, uint32_t reg_index, 453 target_ulong val) 454 { 455 int i; 456 uint8_t cfg_val; 457 int pmpcfg_nums = 2 << riscv_cpu_mxl(env); 458 bool modified = false; 459 460 trace_pmpcfg_csr_write(env->mhartid, reg_index, val); 461 462 for (i = 0; i < pmpcfg_nums; i++) { 463 cfg_val = (val >> 8 * i) & 0xff; 464 modified |= pmp_write_cfg(env, (reg_index * 4) + i, cfg_val); 465 } 466 467 /* If PMP permission of any addr has been changed, flush TLB pages. */ 468 if (modified) { 469 pmp_update_rule_nums(env); 470 tlb_flush(env_cpu(env)); 471 } 472 } 473 474 475 /* 476 * Handle a read from a pmpcfg CSR 477 */ 478 target_ulong pmpcfg_csr_read(CPURISCVState *env, uint32_t reg_index) 479 { 480 int i; 481 target_ulong cfg_val = 0; 482 target_ulong val = 0; 483 int pmpcfg_nums = 2 << riscv_cpu_mxl(env); 484 485 for (i = 0; i < pmpcfg_nums; i++) { 486 val = pmp_read_cfg(env, (reg_index * 4) + i); 487 cfg_val |= (val << (i * 8)); 488 } 489 trace_pmpcfg_csr_read(env->mhartid, reg_index, cfg_val); 490 491 return cfg_val; 492 } 493 494 495 /* 496 * Handle a write to a pmpaddr CSR 497 */ 498 void pmpaddr_csr_write(CPURISCVState *env, uint32_t addr_index, 499 target_ulong val) 500 { 501 trace_pmpaddr_csr_write(env->mhartid, addr_index, val); 502 bool is_next_cfg_tor = false; 503 504 if (addr_index < MAX_RISCV_PMPS) { 505 /* 506 * In TOR mode, need to check the lock bit of the next pmp 507 * (if there is a next). 508 */ 509 if (addr_index + 1 < MAX_RISCV_PMPS) { 510 uint8_t pmp_cfg = env->pmp_state.pmp[addr_index + 1].cfg_reg; 511 is_next_cfg_tor = PMP_AMATCH_TOR == pmp_get_a_field(pmp_cfg); 512 513 if (pmp_cfg & PMP_LOCK && is_next_cfg_tor) { 514 qemu_log_mask(LOG_GUEST_ERROR, 515 "ignoring pmpaddr write - pmpcfg + 1 locked\n"); 516 return; 517 } 518 } 519 520 if (!pmp_is_locked(env, addr_index)) { 521 if (env->pmp_state.pmp[addr_index].addr_reg != val) { 522 env->pmp_state.pmp[addr_index].addr_reg = val; 523 pmp_update_rule_addr(env, addr_index); 524 if (is_next_cfg_tor) { 525 pmp_update_rule_addr(env, addr_index + 1); 526 } 527 tlb_flush(env_cpu(env)); 528 } 529 } else { 530 qemu_log_mask(LOG_GUEST_ERROR, 531 "ignoring pmpaddr write - locked\n"); 532 } 533 } else { 534 qemu_log_mask(LOG_GUEST_ERROR, 535 "ignoring pmpaddr write - out of bounds\n"); 536 } 537 } 538 539 540 /* 541 * Handle a read from a pmpaddr CSR 542 */ 543 target_ulong pmpaddr_csr_read(CPURISCVState *env, uint32_t addr_index) 544 { 545 target_ulong val = 0; 546 547 if (addr_index < MAX_RISCV_PMPS) { 548 val = env->pmp_state.pmp[addr_index].addr_reg; 549 trace_pmpaddr_csr_read(env->mhartid, addr_index, val); 550 } else { 551 qemu_log_mask(LOG_GUEST_ERROR, 552 "ignoring pmpaddr read - out of bounds\n"); 553 } 554 555 return val; 556 } 557 558 /* 559 * Handle a write to a mseccfg CSR 560 */ 561 void mseccfg_csr_write(CPURISCVState *env, target_ulong val) 562 { 563 int i; 564 565 trace_mseccfg_csr_write(env->mhartid, val); 566 567 /* RLB cannot be enabled if it's already 0 and if any regions are locked */ 568 if (!MSECCFG_RLB_ISSET(env)) { 569 for (i = 0; i < MAX_RISCV_PMPS; i++) { 570 if (pmp_is_locked(env, i)) { 571 val &= ~MSECCFG_RLB; 572 break; 573 } 574 } 575 } 576 577 if (riscv_cpu_cfg(env)->epmp) { 578 /* Sticky bits */ 579 val |= (env->mseccfg & (MSECCFG_MMWP | MSECCFG_MML)); 580 if ((val ^ env->mseccfg) & (MSECCFG_MMWP | MSECCFG_MML)) { 581 tlb_flush(env_cpu(env)); 582 } 583 } else { 584 val &= ~(MSECCFG_MMWP | MSECCFG_MML | MSECCFG_RLB); 585 } 586 587 env->mseccfg = val; 588 } 589 590 /* 591 * Handle a read from a mseccfg CSR 592 */ 593 target_ulong mseccfg_csr_read(CPURISCVState *env) 594 { 595 trace_mseccfg_csr_read(env->mhartid, env->mseccfg); 596 return env->mseccfg; 597 } 598 599 /* 600 * Calculate the TLB size. 601 * It's possible that PMP regions only cover partial of the TLB page, and 602 * this may split the page into regions with different permissions. 603 * For example if PMP0 is (0x80000008~0x8000000F, R) and PMP1 is (0x80000000 604 * ~0x80000FFF, RWX), then region 0x80000008~0x8000000F has R permission, and 605 * the other regions in this page have RWX permissions. 606 * A write access to 0x80000000 will match PMP1. However we cannot cache the 607 * translation result in the TLB since this will make the write access to 608 * 0x80000008 bypass the check of PMP0. 609 * To avoid this we return a size of 1 (which means no caching) if the PMP 610 * region only covers partial of the TLB page. 611 */ 612 target_ulong pmp_get_tlb_size(CPURISCVState *env, target_ulong addr) 613 { 614 target_ulong pmp_sa; 615 target_ulong pmp_ea; 616 target_ulong tlb_sa = addr & ~(TARGET_PAGE_SIZE - 1); 617 target_ulong tlb_ea = tlb_sa + TARGET_PAGE_SIZE - 1; 618 int i; 619 620 /* 621 * If PMP is not supported or there are no PMP rules, the TLB page will not 622 * be split into regions with different permissions by PMP so we set the 623 * size to TARGET_PAGE_SIZE. 624 */ 625 if (!riscv_cpu_cfg(env)->pmp || !pmp_get_num_rules(env)) { 626 return TARGET_PAGE_SIZE; 627 } 628 629 for (i = 0; i < MAX_RISCV_PMPS; i++) { 630 if (pmp_get_a_field(env->pmp_state.pmp[i].cfg_reg) == PMP_AMATCH_OFF) { 631 continue; 632 } 633 634 pmp_sa = env->pmp_state.addr[i].sa; 635 pmp_ea = env->pmp_state.addr[i].ea; 636 637 /* 638 * Only the first PMP entry that covers (whole or partial of) the TLB 639 * page really matters: 640 * If it covers the whole TLB page, set the size to TARGET_PAGE_SIZE, 641 * since the following PMP entries have lower priority and will not 642 * affect the permissions of the page. 643 * If it only covers partial of the TLB page, set the size to 1 since 644 * the allowed permissions of the region may be different from other 645 * region of the page. 646 */ 647 if (pmp_sa <= tlb_sa && pmp_ea >= tlb_ea) { 648 return TARGET_PAGE_SIZE; 649 } else if ((pmp_sa >= tlb_sa && pmp_sa <= tlb_ea) || 650 (pmp_ea >= tlb_sa && pmp_ea <= tlb_ea)) { 651 return 1; 652 } 653 } 654 655 /* 656 * If no PMP entry matches the TLB page, the TLB page will also not be 657 * split into regions with different permissions by PMP so we set the size 658 * to TARGET_PAGE_SIZE. 659 */ 660 return TARGET_PAGE_SIZE; 661 } 662 663 /* 664 * Convert PMP privilege to TLB page privilege. 665 */ 666 int pmp_priv_to_page_prot(pmp_priv_t pmp_priv) 667 { 668 int prot = 0; 669 670 if (pmp_priv & PMP_READ) { 671 prot |= PAGE_READ; 672 } 673 if (pmp_priv & PMP_WRITE) { 674 prot |= PAGE_WRITE; 675 } 676 if (pmp_priv & PMP_EXEC) { 677 prot |= PAGE_EXEC; 678 } 679 680 return prot; 681 } 682