Lines Matching +full:i +full:- +full:tlb +full:- +full:size

2  * QEMU RISC-V PMP (Physical Memory Protection)
7 * This provides a RISC-V Physical Memory Protection implementation
27 #include "exec/exec-all.h"
28 #include "exec/page-protection.h"
53 if (env->pmp_state.pmp[pmp_index].cfg_reg & PMP_LOCK) { in pmp_is_locked()
70 return env->pmp_state.num_rules; in pmp_get_num_rules()
79 return env->pmp_state.pmp[pmp_index].cfg_reg; in pmp_read_cfg()
95 if (riscv_cpu_cfg(env)->ext_smepmp) { in pmp_write_cfg()
125 qemu_log_mask(LOG_GUEST_ERROR, "ignoring pmpcfg write - locked\n"); in pmp_write_cfg()
126 } else if (env->pmp_state.pmp[pmp_index].cfg_reg != val) { in pmp_write_cfg()
132 env->pmp_state.pmp[pmp_index].cfg_reg = val; in pmp_write_cfg()
138 "ignoring pmpcfg write - out of bounds\n"); in pmp_write_cfg()
147 int i; in pmp_unlock_entries() local
149 for (i = 0; i < pmp_num; i++) { in pmp_unlock_entries()
150 env->pmp_state.pmp[i].cfg_reg &= ~(PMP_LOCK | PMP_AMATCH); in pmp_unlock_entries()
157 * aaaa...aaa0 8-byte NAPOT range in pmp_decode_napot()
158 * aaaa...aa01 16-byte NAPOT range in pmp_decode_napot()
159 * aaaa...a011 32-byte NAPOT range in pmp_decode_napot()
161 * aa01...1111 2^XLEN-byte NAPOT range in pmp_decode_napot()
162 * a011...1111 2^(XLEN+1)-byte NAPOT range in pmp_decode_napot()
163 * 0111...1111 2^(XLEN+2)-byte NAPOT range in pmp_decode_napot()
173 uint8_t this_cfg = env->pmp_state.pmp[pmp_index].cfg_reg; in pmp_update_rule_addr()
174 target_ulong this_addr = env->pmp_state.pmp[pmp_index].addr_reg; in pmp_update_rule_addr()
180 prev_addr = env->pmp_state.pmp[pmp_index - 1].addr_reg; in pmp_update_rule_addr()
186 ea = -1; in pmp_update_rule_addr()
191 ea = (this_addr << 2) - 1u; in pmp_update_rule_addr()
199 ea = (sa + 4u) - 1u; in pmp_update_rule_addr()
212 env->pmp_state.addr[pmp_index].sa = sa; in pmp_update_rule_addr()
213 env->pmp_state.addr[pmp_index].ea = ea; in pmp_update_rule_addr()
218 int i; in pmp_update_rule_nums() local
220 env->pmp_state.num_rules = 0; in pmp_update_rule_nums()
221 for (i = 0; i < MAX_RISCV_PMPS; i++) { in pmp_update_rule_nums()
223 pmp_get_a_field(env->pmp_state.pmp[i].cfg_reg); in pmp_update_rule_nums()
225 env->pmp_state.num_rules++; in pmp_update_rule_nums()
234 if ((addr >= env->pmp_state.addr[pmp_index].sa) && in pmp_is_in_range()
235 (addr <= env->pmp_state.addr[pmp_index].ea)) { in pmp_is_in_range()
256 * so we default to deny all, even for M-mode. in pmp_hart_has_privs_default()
263 * so we can only execute code in M-mode with an applicable in pmp_hart_has_privs_default()
277 if (!riscv_cpu_cfg(env)->pmp || (mode == PRV_M)) { in pmp_hart_has_privs_default()
280 * or no PMP entry matches an M-Mode access, the access succeeds. in pmp_hart_has_privs_default()
308 target_ulong size, pmp_priv_t privs, in pmp_hart_has_privs() argument
311 int i = 0; in pmp_hart_has_privs() local
321 if (size == 0) { in pmp_hart_has_privs()
322 if (riscv_cpu_cfg(env)->mmu) { in pmp_hart_has_privs()
324 * If size is unknown (0), assume that all bytes in pmp_hart_has_privs()
327 pmp_size = -(addr | TARGET_PAGE_MASK); in pmp_hart_has_privs()
332 pmp_size = size; in pmp_hart_has_privs()
339 for (i = 0; i < MAX_RISCV_PMPS; i++) { in pmp_hart_has_privs()
340 s = pmp_is_in_range(env, i, addr); in pmp_hart_has_privs()
341 e = pmp_is_in_range(env, i, addr + pmp_size - 1); in pmp_hart_has_privs()
346 "pmp violation - access is partially inside\n"); in pmp_hart_has_privs()
353 pmp_get_a_field(env->pmp_state.pmp[i].cfg_reg); in pmp_hart_has_privs()
360 ((env->pmp_state.pmp[i].cfg_reg & PMP_LOCK) >> 4) | in pmp_hart_has_privs()
361 ((env->pmp_state.pmp[i].cfg_reg & PMP_READ) << 2) | in pmp_hart_has_privs()
362 (env->pmp_state.pmp[i].cfg_reg & PMP_WRITE) | in pmp_hart_has_privs()
363 ((env->pmp_state.pmp[i].cfg_reg & PMP_EXEC) >> 2); in pmp_hart_has_privs()
376 if ((mode != PRV_M) || pmp_is_locked(env, i)) { in pmp_hart_has_privs()
377 *allowed_privs &= env->pmp_state.pmp[i].cfg_reg; in pmp_hart_has_privs()
469 int i; in pmpcfg_csr_write() local
474 trace_pmpcfg_csr_write(env->mhartid, reg_index, val); in pmpcfg_csr_write()
476 for (i = 0; i < pmpcfg_nums; i++) { in pmpcfg_csr_write()
477 cfg_val = (val >> 8 * i) & 0xff; in pmpcfg_csr_write()
478 modified |= pmp_write_cfg(env, (reg_index * 4) + i, cfg_val); in pmpcfg_csr_write()
481 /* If PMP permission of any addr has been changed, flush TLB pages. */ in pmpcfg_csr_write()
494 int i; in pmpcfg_csr_read() local
499 for (i = 0; i < pmpcfg_nums; i++) { in pmpcfg_csr_read()
500 val = pmp_read_cfg(env, (reg_index * 4) + i); in pmpcfg_csr_read()
501 cfg_val |= (val << (i * 8)); in pmpcfg_csr_read()
503 trace_pmpcfg_csr_read(env->mhartid, reg_index, cfg_val); in pmpcfg_csr_read()
515 trace_pmpaddr_csr_write(env->mhartid, addr_index, val); in pmpaddr_csr_write()
524 uint8_t pmp_cfg = env->pmp_state.pmp[addr_index + 1].cfg_reg; in pmpaddr_csr_write()
529 "ignoring pmpaddr write - pmpcfg + 1 locked\n"); in pmpaddr_csr_write()
535 if (env->pmp_state.pmp[addr_index].addr_reg != val) { in pmpaddr_csr_write()
536 env->pmp_state.pmp[addr_index].addr_reg = val; in pmpaddr_csr_write()
545 "ignoring pmpaddr write - locked\n"); in pmpaddr_csr_write()
549 "ignoring pmpaddr write - out of bounds\n"); in pmpaddr_csr_write()
562 val = env->pmp_state.pmp[addr_index].addr_reg; in pmpaddr_csr_read()
563 trace_pmpaddr_csr_read(env->mhartid, addr_index, val); in pmpaddr_csr_read()
566 "ignoring pmpaddr read - out of bounds\n"); in pmpaddr_csr_read()
577 int i; in mseccfg_csr_write() local
579 trace_mseccfg_csr_write(env->mhartid, val); in mseccfg_csr_write()
583 for (i = 0; i < MAX_RISCV_PMPS; i++) { in mseccfg_csr_write()
584 if (pmp_is_locked(env, i)) { in mseccfg_csr_write()
591 if (riscv_cpu_cfg(env)->ext_smepmp) { in mseccfg_csr_write()
593 val |= (env->mseccfg & (MSECCFG_MMWP | MSECCFG_MML)); in mseccfg_csr_write()
594 if ((val ^ env->mseccfg) & (MSECCFG_MMWP | MSECCFG_MML)) { in mseccfg_csr_write()
601 /* M-mode forward cfi to be enabled if cfi extension is implemented */ in mseccfg_csr_write()
602 if (env_archcpu(env)->cfg.ext_zicfilp) { in mseccfg_csr_write()
606 env->mseccfg = val; in mseccfg_csr_write()
614 trace_mseccfg_csr_read(env->mhartid, env->mseccfg); in mseccfg_csr_read()
615 return env->mseccfg; in mseccfg_csr_read()
619 * Calculate the TLB size.
620 * It's possible that PMP regions only cover partial of the TLB page, and
626 * translation result in the TLB since this will make the write access to
628 * To avoid this we return a size of 1 (which means no caching) if the PMP
629 * region only covers partial of the TLB page.
635 hwaddr tlb_sa = addr & ~(TARGET_PAGE_SIZE - 1); in pmp_get_tlb_size()
636 hwaddr tlb_ea = tlb_sa + TARGET_PAGE_SIZE - 1; in pmp_get_tlb_size()
637 int i; in pmp_get_tlb_size() local
640 * If PMP is not supported or there are no PMP rules, the TLB page will not in pmp_get_tlb_size()
642 * size to TARGET_PAGE_SIZE. in pmp_get_tlb_size()
644 if (!riscv_cpu_cfg(env)->pmp || !pmp_get_num_rules(env)) { in pmp_get_tlb_size()
648 for (i = 0; i < MAX_RISCV_PMPS; i++) { in pmp_get_tlb_size()
649 if (pmp_get_a_field(env->pmp_state.pmp[i].cfg_reg) == PMP_AMATCH_OFF) { in pmp_get_tlb_size()
653 pmp_sa = env->pmp_state.addr[i].sa; in pmp_get_tlb_size()
654 pmp_ea = env->pmp_state.addr[i].ea; in pmp_get_tlb_size()
657 * Only the first PMP entry that covers (whole or partial of) the TLB in pmp_get_tlb_size()
659 * If it covers the whole TLB page, set the size to TARGET_PAGE_SIZE, in pmp_get_tlb_size()
662 * If it only covers partial of the TLB page, set the size to 1 since in pmp_get_tlb_size()
675 * If no PMP entry matches the TLB page, the TLB page will also not be in pmp_get_tlb_size()
676 * split into regions with different permissions by PMP so we set the size in pmp_get_tlb_size()
683 * Convert PMP privilege to TLB page privilege.