1 /* 2 * QEMU RISC-V PMP (Physical Memory Protection) 3 * 4 * Author: Daire McNamara, daire.mcnamara@emdalo.com 5 * Ivan Griffin, ivan.griffin@emdalo.com 6 * 7 * This provides a RISC-V Physical Memory Protection implementation 8 * 9 * This program is free software; you can redistribute it and/or modify it 10 * under the terms and conditions of the GNU General Public License, 11 * version 2 or later, as published by the Free Software Foundation. 12 * 13 * This program is distributed in the hope it will be useful, but WITHOUT 14 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 15 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 16 * more details. 17 * 18 * You should have received a copy of the GNU General Public License along with 19 * this program. If not, see <http://www.gnu.org/licenses/>. 20 */ 21 22 #include "qemu/osdep.h" 23 #include "qemu/log.h" 24 #include "qapi/error.h" 25 #include "cpu.h" 26 #include "trace.h" 27 #include "exec/exec-all.h" 28 29 static bool pmp_write_cfg(CPURISCVState *env, uint32_t addr_index, 30 uint8_t val); 31 static uint8_t pmp_read_cfg(CPURISCVState *env, uint32_t addr_index); 32 33 /* 34 * Accessor method to extract address matching type 'a field' from cfg reg 35 */ 36 static inline uint8_t pmp_get_a_field(uint8_t cfg) 37 { 38 uint8_t a = cfg >> 3; 39 return a & 0x3; 40 } 41 42 /* 43 * Check whether a PMP is locked or not. 44 */ 45 static inline int pmp_is_locked(CPURISCVState *env, uint32_t pmp_index) 46 { 47 48 if (env->pmp_state.pmp[pmp_index].cfg_reg & PMP_LOCK) { 49 return 1; 50 } 51 52 /* Top PMP has no 'next' to check */ 53 if ((pmp_index + 1u) >= MAX_RISCV_PMPS) { 54 return 0; 55 } 56 57 return 0; 58 } 59 60 /* 61 * Count the number of active rules. 62 */ 63 uint32_t pmp_get_num_rules(CPURISCVState *env) 64 { 65 return env->pmp_state.num_rules; 66 } 67 68 /* 69 * Accessor to get the cfg reg for a specific PMP/HART 70 */ 71 static inline uint8_t pmp_read_cfg(CPURISCVState *env, uint32_t pmp_index) 72 { 73 if (pmp_index < MAX_RISCV_PMPS) { 74 return env->pmp_state.pmp[pmp_index].cfg_reg; 75 } 76 77 return 0; 78 } 79 80 81 /* 82 * Accessor to set the cfg reg for a specific PMP/HART 83 * Bounds checks and relevant lock bit. 84 */ 85 static bool pmp_write_cfg(CPURISCVState *env, uint32_t pmp_index, uint8_t val) 86 { 87 if (pmp_index < MAX_RISCV_PMPS) { 88 bool locked = true; 89 90 if (riscv_cpu_cfg(env)->epmp) { 91 /* mseccfg.RLB is set */ 92 if (MSECCFG_RLB_ISSET(env)) { 93 locked = false; 94 } 95 96 /* mseccfg.MML is not set */ 97 if (!MSECCFG_MML_ISSET(env) && !pmp_is_locked(env, pmp_index)) { 98 locked = false; 99 } 100 101 /* mseccfg.MML is set */ 102 if (MSECCFG_MML_ISSET(env)) { 103 /* not adding execute bit */ 104 if ((val & PMP_LOCK) != 0 && (val & PMP_EXEC) != PMP_EXEC) { 105 locked = false; 106 } 107 /* shared region and not adding X bit */ 108 if ((val & PMP_LOCK) != PMP_LOCK && 109 (val & 0x7) != (PMP_WRITE | PMP_EXEC)) { 110 locked = false; 111 } 112 } 113 } else { 114 if (!pmp_is_locked(env, pmp_index)) { 115 locked = false; 116 } 117 } 118 119 if (locked) { 120 qemu_log_mask(LOG_GUEST_ERROR, "ignoring pmpcfg write - locked\n"); 121 } else if (env->pmp_state.pmp[pmp_index].cfg_reg != val) { 122 env->pmp_state.pmp[pmp_index].cfg_reg = val; 123 pmp_update_rule_addr(env, pmp_index); 124 return true; 125 } 126 } else { 127 qemu_log_mask(LOG_GUEST_ERROR, 128 "ignoring pmpcfg write - out of bounds\n"); 129 } 130 131 return false; 132 } 133 134 static void pmp_decode_napot(target_ulong a, target_ulong *sa, 135 target_ulong *ea) 136 { 137 /* 138 * aaaa...aaa0 8-byte NAPOT range 139 * aaaa...aa01 16-byte NAPOT range 140 * aaaa...a011 32-byte NAPOT range 141 * ... 142 * aa01...1111 2^XLEN-byte NAPOT range 143 * a011...1111 2^(XLEN+1)-byte NAPOT range 144 * 0111...1111 2^(XLEN+2)-byte NAPOT range 145 * 1111...1111 Reserved 146 */ 147 a = (a << 2) | 0x3; 148 *sa = a & (a + 1); 149 *ea = a | (a + 1); 150 } 151 152 void pmp_update_rule_addr(CPURISCVState *env, uint32_t pmp_index) 153 { 154 uint8_t this_cfg = env->pmp_state.pmp[pmp_index].cfg_reg; 155 target_ulong this_addr = env->pmp_state.pmp[pmp_index].addr_reg; 156 target_ulong prev_addr = 0u; 157 target_ulong sa = 0u; 158 target_ulong ea = 0u; 159 160 if (pmp_index >= 1u) { 161 prev_addr = env->pmp_state.pmp[pmp_index - 1].addr_reg; 162 } 163 164 switch (pmp_get_a_field(this_cfg)) { 165 case PMP_AMATCH_OFF: 166 sa = 0u; 167 ea = -1; 168 break; 169 170 case PMP_AMATCH_TOR: 171 sa = prev_addr << 2; /* shift up from [xx:0] to [xx+2:2] */ 172 ea = (this_addr << 2) - 1u; 173 if (sa > ea) { 174 sa = ea = 0u; 175 } 176 break; 177 178 case PMP_AMATCH_NA4: 179 sa = this_addr << 2; /* shift up from [xx:0] to [xx+2:2] */ 180 ea = (sa + 4u) - 1u; 181 break; 182 183 case PMP_AMATCH_NAPOT: 184 pmp_decode_napot(this_addr, &sa, &ea); 185 break; 186 187 default: 188 sa = 0u; 189 ea = 0u; 190 break; 191 } 192 193 env->pmp_state.addr[pmp_index].sa = sa; 194 env->pmp_state.addr[pmp_index].ea = ea; 195 } 196 197 void pmp_update_rule_nums(CPURISCVState *env) 198 { 199 int i; 200 201 env->pmp_state.num_rules = 0; 202 for (i = 0; i < MAX_RISCV_PMPS; i++) { 203 const uint8_t a_field = 204 pmp_get_a_field(env->pmp_state.pmp[i].cfg_reg); 205 if (PMP_AMATCH_OFF != a_field) { 206 env->pmp_state.num_rules++; 207 } 208 } 209 } 210 211 static int pmp_is_in_range(CPURISCVState *env, int pmp_index, 212 target_ulong addr) 213 { 214 int result = 0; 215 216 if ((addr >= env->pmp_state.addr[pmp_index].sa) && 217 (addr <= env->pmp_state.addr[pmp_index].ea)) { 218 result = 1; 219 } else { 220 result = 0; 221 } 222 223 return result; 224 } 225 226 /* 227 * Check if the address has required RWX privs when no PMP entry is matched. 228 */ 229 static bool pmp_hart_has_privs_default(CPURISCVState *env, pmp_priv_t privs, 230 pmp_priv_t *allowed_privs, 231 target_ulong mode) 232 { 233 bool ret; 234 235 if (MSECCFG_MMWP_ISSET(env)) { 236 /* 237 * The Machine Mode Whitelist Policy (mseccfg.MMWP) is set 238 * so we default to deny all, even for M-mode. 239 */ 240 *allowed_privs = 0; 241 return false; 242 } else if (MSECCFG_MML_ISSET(env)) { 243 /* 244 * The Machine Mode Lockdown (mseccfg.MML) bit is set 245 * so we can only execute code in M-mode with an applicable 246 * rule. Other modes are disabled. 247 */ 248 if (mode == PRV_M && !(privs & PMP_EXEC)) { 249 ret = true; 250 *allowed_privs = PMP_READ | PMP_WRITE; 251 } else { 252 ret = false; 253 *allowed_privs = 0; 254 } 255 256 return ret; 257 } 258 259 if (!riscv_cpu_cfg(env)->pmp || (mode == PRV_M)) { 260 /* 261 * Privileged spec v1.10 states if HW doesn't implement any PMP entry 262 * or no PMP entry matches an M-Mode access, the access succeeds. 263 */ 264 ret = true; 265 *allowed_privs = PMP_READ | PMP_WRITE | PMP_EXEC; 266 } else { 267 /* 268 * Other modes are not allowed to succeed if they don't * match a rule, 269 * but there are rules. We've checked for no rule earlier in this 270 * function. 271 */ 272 ret = false; 273 *allowed_privs = 0; 274 } 275 276 return ret; 277 } 278 279 280 /* 281 * Public Interface 282 */ 283 284 /* 285 * Check if the address has required RWX privs to complete desired operation 286 * Return true if a pmp rule match or default match 287 * Return false if no match 288 */ 289 bool pmp_hart_has_privs(CPURISCVState *env, target_ulong addr, 290 target_ulong size, pmp_priv_t privs, 291 pmp_priv_t *allowed_privs, target_ulong mode) 292 { 293 int i = 0; 294 bool ret = false; 295 int pmp_size = 0; 296 target_ulong s = 0; 297 target_ulong e = 0; 298 299 /* Short cut if no rules */ 300 if (0 == pmp_get_num_rules(env)) { 301 return pmp_hart_has_privs_default(env, privs, allowed_privs, mode); 302 } 303 304 if (size == 0) { 305 if (riscv_cpu_cfg(env)->mmu) { 306 /* 307 * If size is unknown (0), assume that all bytes 308 * from addr to the end of the page will be accessed. 309 */ 310 pmp_size = -(addr | TARGET_PAGE_MASK); 311 } else { 312 pmp_size = sizeof(target_ulong); 313 } 314 } else { 315 pmp_size = size; 316 } 317 318 /* 319 * 1.10 draft priv spec states there is an implicit order 320 * from low to high 321 */ 322 for (i = 0; i < MAX_RISCV_PMPS; i++) { 323 s = pmp_is_in_range(env, i, addr); 324 e = pmp_is_in_range(env, i, addr + pmp_size - 1); 325 326 /* partially inside */ 327 if ((s + e) == 1) { 328 qemu_log_mask(LOG_GUEST_ERROR, 329 "pmp violation - access is partially inside\n"); 330 *allowed_privs = 0; 331 return false; 332 } 333 334 /* fully inside */ 335 const uint8_t a_field = 336 pmp_get_a_field(env->pmp_state.pmp[i].cfg_reg); 337 338 /* 339 * Convert the PMP permissions to match the truth table in the 340 * ePMP spec. 341 */ 342 const uint8_t epmp_operation = 343 ((env->pmp_state.pmp[i].cfg_reg & PMP_LOCK) >> 4) | 344 ((env->pmp_state.pmp[i].cfg_reg & PMP_READ) << 2) | 345 (env->pmp_state.pmp[i].cfg_reg & PMP_WRITE) | 346 ((env->pmp_state.pmp[i].cfg_reg & PMP_EXEC) >> 2); 347 348 if (((s + e) == 2) && (PMP_AMATCH_OFF != a_field)) { 349 /* 350 * If the PMP entry is not off and the address is in range, 351 * do the priv check 352 */ 353 if (!MSECCFG_MML_ISSET(env)) { 354 /* 355 * If mseccfg.MML Bit is not set, do pmp priv check 356 * This will always apply to regular PMP. 357 */ 358 *allowed_privs = PMP_READ | PMP_WRITE | PMP_EXEC; 359 if ((mode != PRV_M) || pmp_is_locked(env, i)) { 360 *allowed_privs &= env->pmp_state.pmp[i].cfg_reg; 361 } 362 } else { 363 /* 364 * If mseccfg.MML Bit set, do the enhanced pmp priv check 365 */ 366 if (mode == PRV_M) { 367 switch (epmp_operation) { 368 case 0: 369 case 1: 370 case 4: 371 case 5: 372 case 6: 373 case 7: 374 case 8: 375 *allowed_privs = 0; 376 break; 377 case 2: 378 case 3: 379 case 14: 380 *allowed_privs = PMP_READ | PMP_WRITE; 381 break; 382 case 9: 383 case 10: 384 *allowed_privs = PMP_EXEC; 385 break; 386 case 11: 387 case 13: 388 *allowed_privs = PMP_READ | PMP_EXEC; 389 break; 390 case 12: 391 case 15: 392 *allowed_privs = PMP_READ; 393 break; 394 default: 395 g_assert_not_reached(); 396 } 397 } else { 398 switch (epmp_operation) { 399 case 0: 400 case 8: 401 case 9: 402 case 12: 403 case 13: 404 case 14: 405 *allowed_privs = 0; 406 break; 407 case 1: 408 case 10: 409 case 11: 410 *allowed_privs = PMP_EXEC; 411 break; 412 case 2: 413 case 4: 414 case 15: 415 *allowed_privs = PMP_READ; 416 break; 417 case 3: 418 case 6: 419 *allowed_privs = PMP_READ | PMP_WRITE; 420 break; 421 case 5: 422 *allowed_privs = PMP_READ | PMP_EXEC; 423 break; 424 case 7: 425 *allowed_privs = PMP_READ | PMP_WRITE | PMP_EXEC; 426 break; 427 default: 428 g_assert_not_reached(); 429 } 430 } 431 } 432 433 /* 434 * If matching address range was found, the protection bits 435 * defined with PMP must be used. We shouldn't fallback on 436 * finding default privileges. 437 */ 438 ret = true; 439 break; 440 } 441 } 442 443 /* No rule matched */ 444 if (!ret) { 445 ret = pmp_hart_has_privs_default(env, privs, allowed_privs, mode); 446 } 447 448 return ret; 449 } 450 451 /* 452 * Handle a write to a pmpcfg CSR 453 */ 454 void pmpcfg_csr_write(CPURISCVState *env, uint32_t reg_index, 455 target_ulong val) 456 { 457 int i; 458 uint8_t cfg_val; 459 int pmpcfg_nums = 2 << riscv_cpu_mxl(env); 460 bool modified = false; 461 462 trace_pmpcfg_csr_write(env->mhartid, reg_index, val); 463 464 for (i = 0; i < pmpcfg_nums; i++) { 465 cfg_val = (val >> 8 * i) & 0xff; 466 modified |= pmp_write_cfg(env, (reg_index * 4) + i, cfg_val); 467 } 468 469 /* If PMP permission of any addr has been changed, flush TLB pages. */ 470 if (modified) { 471 pmp_update_rule_nums(env); 472 tlb_flush(env_cpu(env)); 473 } 474 } 475 476 477 /* 478 * Handle a read from a pmpcfg CSR 479 */ 480 target_ulong pmpcfg_csr_read(CPURISCVState *env, uint32_t reg_index) 481 { 482 int i; 483 target_ulong cfg_val = 0; 484 target_ulong val = 0; 485 int pmpcfg_nums = 2 << riscv_cpu_mxl(env); 486 487 for (i = 0; i < pmpcfg_nums; i++) { 488 val = pmp_read_cfg(env, (reg_index * 4) + i); 489 cfg_val |= (val << (i * 8)); 490 } 491 trace_pmpcfg_csr_read(env->mhartid, reg_index, cfg_val); 492 493 return cfg_val; 494 } 495 496 497 /* 498 * Handle a write to a pmpaddr CSR 499 */ 500 void pmpaddr_csr_write(CPURISCVState *env, uint32_t addr_index, 501 target_ulong val) 502 { 503 trace_pmpaddr_csr_write(env->mhartid, addr_index, val); 504 bool is_next_cfg_tor = false; 505 506 if (addr_index < MAX_RISCV_PMPS) { 507 /* 508 * In TOR mode, need to check the lock bit of the next pmp 509 * (if there is a next). 510 */ 511 if (addr_index + 1 < MAX_RISCV_PMPS) { 512 uint8_t pmp_cfg = env->pmp_state.pmp[addr_index + 1].cfg_reg; 513 is_next_cfg_tor = PMP_AMATCH_TOR == pmp_get_a_field(pmp_cfg); 514 515 if (pmp_cfg & PMP_LOCK && is_next_cfg_tor) { 516 qemu_log_mask(LOG_GUEST_ERROR, 517 "ignoring pmpaddr write - pmpcfg + 1 locked\n"); 518 return; 519 } 520 } 521 522 if (!pmp_is_locked(env, addr_index)) { 523 if (env->pmp_state.pmp[addr_index].addr_reg != val) { 524 env->pmp_state.pmp[addr_index].addr_reg = val; 525 pmp_update_rule_addr(env, addr_index); 526 if (is_next_cfg_tor) { 527 pmp_update_rule_addr(env, addr_index + 1); 528 } 529 tlb_flush(env_cpu(env)); 530 } 531 } else { 532 qemu_log_mask(LOG_GUEST_ERROR, 533 "ignoring pmpaddr write - locked\n"); 534 } 535 } else { 536 qemu_log_mask(LOG_GUEST_ERROR, 537 "ignoring pmpaddr write - out of bounds\n"); 538 } 539 } 540 541 542 /* 543 * Handle a read from a pmpaddr CSR 544 */ 545 target_ulong pmpaddr_csr_read(CPURISCVState *env, uint32_t addr_index) 546 { 547 target_ulong val = 0; 548 549 if (addr_index < MAX_RISCV_PMPS) { 550 val = env->pmp_state.pmp[addr_index].addr_reg; 551 trace_pmpaddr_csr_read(env->mhartid, addr_index, val); 552 } else { 553 qemu_log_mask(LOG_GUEST_ERROR, 554 "ignoring pmpaddr read - out of bounds\n"); 555 } 556 557 return val; 558 } 559 560 /* 561 * Handle a write to a mseccfg CSR 562 */ 563 void mseccfg_csr_write(CPURISCVState *env, target_ulong val) 564 { 565 int i; 566 567 trace_mseccfg_csr_write(env->mhartid, val); 568 569 /* RLB cannot be enabled if it's already 0 and if any regions are locked */ 570 if (!MSECCFG_RLB_ISSET(env)) { 571 for (i = 0; i < MAX_RISCV_PMPS; i++) { 572 if (pmp_is_locked(env, i)) { 573 val &= ~MSECCFG_RLB; 574 break; 575 } 576 } 577 } 578 579 if (riscv_cpu_cfg(env)->epmp) { 580 /* Sticky bits */ 581 val |= (env->mseccfg & (MSECCFG_MMWP | MSECCFG_MML)); 582 if ((val ^ env->mseccfg) & (MSECCFG_MMWP | MSECCFG_MML)) { 583 tlb_flush(env_cpu(env)); 584 } 585 } else { 586 val &= ~(MSECCFG_MMWP | MSECCFG_MML | MSECCFG_RLB); 587 } 588 589 env->mseccfg = val; 590 } 591 592 /* 593 * Handle a read from a mseccfg CSR 594 */ 595 target_ulong mseccfg_csr_read(CPURISCVState *env) 596 { 597 trace_mseccfg_csr_read(env->mhartid, env->mseccfg); 598 return env->mseccfg; 599 } 600 601 /* 602 * Calculate the TLB size. 603 * It's possible that PMP regions only cover partial of the TLB page, and 604 * this may split the page into regions with different permissions. 605 * For example if PMP0 is (0x80000008~0x8000000F, R) and PMP1 is (0x80000000 606 * ~0x80000FFF, RWX), then region 0x80000008~0x8000000F has R permission, and 607 * the other regions in this page have RWX permissions. 608 * A write access to 0x80000000 will match PMP1. However we cannot cache the 609 * translation result in the TLB since this will make the write access to 610 * 0x80000008 bypass the check of PMP0. 611 * To avoid this we return a size of 1 (which means no caching) if the PMP 612 * region only covers partial of the TLB page. 613 */ 614 target_ulong pmp_get_tlb_size(CPURISCVState *env, target_ulong addr) 615 { 616 target_ulong pmp_sa; 617 target_ulong pmp_ea; 618 target_ulong tlb_sa = addr & ~(TARGET_PAGE_SIZE - 1); 619 target_ulong tlb_ea = tlb_sa + TARGET_PAGE_SIZE - 1; 620 int i; 621 622 /* 623 * If PMP is not supported or there are no PMP rules, the TLB page will not 624 * be split into regions with different permissions by PMP so we set the 625 * size to TARGET_PAGE_SIZE. 626 */ 627 if (!riscv_cpu_cfg(env)->pmp || !pmp_get_num_rules(env)) { 628 return TARGET_PAGE_SIZE; 629 } 630 631 for (i = 0; i < MAX_RISCV_PMPS; i++) { 632 if (pmp_get_a_field(env->pmp_state.pmp[i].cfg_reg) == PMP_AMATCH_OFF) { 633 continue; 634 } 635 636 pmp_sa = env->pmp_state.addr[i].sa; 637 pmp_ea = env->pmp_state.addr[i].ea; 638 639 /* 640 * Only the first PMP entry that covers (whole or partial of) the TLB 641 * page really matters: 642 * If it covers the whole TLB page, set the size to TARGET_PAGE_SIZE, 643 * since the following PMP entries have lower priority and will not 644 * affect the permissions of the page. 645 * If it only covers partial of the TLB page, set the size to 1 since 646 * the allowed permissions of the region may be different from other 647 * region of the page. 648 */ 649 if (pmp_sa <= tlb_sa && pmp_ea >= tlb_ea) { 650 return TARGET_PAGE_SIZE; 651 } else if ((pmp_sa >= tlb_sa && pmp_sa <= tlb_ea) || 652 (pmp_ea >= tlb_sa && pmp_ea <= tlb_ea)) { 653 return 1; 654 } 655 } 656 657 /* 658 * If no PMP entry matches the TLB page, the TLB page will also not be 659 * split into regions with different permissions by PMP so we set the size 660 * to TARGET_PAGE_SIZE. 661 */ 662 return TARGET_PAGE_SIZE; 663 } 664 665 /* 666 * Convert PMP privilege to TLB page privilege. 667 */ 668 int pmp_priv_to_page_prot(pmp_priv_t pmp_priv) 669 { 670 int prot = 0; 671 672 if (pmp_priv & PMP_READ) { 673 prot |= PAGE_READ; 674 } 675 if (pmp_priv & PMP_WRITE) { 676 prot |= PAGE_WRITE; 677 } 678 if (pmp_priv & PMP_EXEC) { 679 prot |= PAGE_EXEC; 680 } 681 682 return prot; 683 } 684