xref: /openbmc/qemu/target/riscv/pmp.c (revision 92ec7805190313c9e628f8fc4eb4f932c15247bd)
165c5b75cSMichael Clark /*
265c5b75cSMichael Clark  * QEMU RISC-V PMP (Physical Memory Protection)
365c5b75cSMichael Clark  *
465c5b75cSMichael Clark  * Author: Daire McNamara, daire.mcnamara@emdalo.com
565c5b75cSMichael Clark  *         Ivan Griffin, ivan.griffin@emdalo.com
665c5b75cSMichael Clark  *
765c5b75cSMichael Clark  * This provides a RISC-V Physical Memory Protection implementation
865c5b75cSMichael Clark  *
965c5b75cSMichael Clark  * This program is free software; you can redistribute it and/or modify it
1065c5b75cSMichael Clark  * under the terms and conditions of the GNU General Public License,
1165c5b75cSMichael Clark  * version 2 or later, as published by the Free Software Foundation.
1265c5b75cSMichael Clark  *
1365c5b75cSMichael Clark  * This program is distributed in the hope it will be useful, but WITHOUT
1465c5b75cSMichael Clark  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
1565c5b75cSMichael Clark  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
1665c5b75cSMichael Clark  * more details.
1765c5b75cSMichael Clark  *
1865c5b75cSMichael Clark  * You should have received a copy of the GNU General Public License along with
1965c5b75cSMichael Clark  * this program.  If not, see <http://www.gnu.org/licenses/>.
2065c5b75cSMichael Clark  */
2165c5b75cSMichael Clark 
2265c5b75cSMichael Clark #include "qemu/osdep.h"
2365c5b75cSMichael Clark #include "qemu/log.h"
2465c5b75cSMichael Clark #include "qapi/error.h"
2565c5b75cSMichael Clark #include "cpu.h"
266591efb5SPhilippe Mathieu-Daudé #include "trace.h"
272c2e0f28SJim Shu #include "exec/exec-all.h"
2874781c08SPhilippe Mathieu-Daudé #include "exec/page-protection.h"
2965c5b75cSMichael Clark 
30e924074fSWeiwei Li static bool pmp_write_cfg(CPURISCVState *env, uint32_t addr_index,
3165c5b75cSMichael Clark                           uint8_t val);
3265c5b75cSMichael Clark static uint8_t pmp_read_cfg(CPURISCVState *env, uint32_t addr_index);
3365c5b75cSMichael Clark 
3465c5b75cSMichael Clark /*
3565c5b75cSMichael Clark  * Accessor method to extract address matching type 'a field' from cfg reg
3665c5b75cSMichael Clark  */
pmp_get_a_field(uint8_t cfg)3765c5b75cSMichael Clark static inline uint8_t pmp_get_a_field(uint8_t cfg)
3865c5b75cSMichael Clark {
3965c5b75cSMichael Clark     uint8_t a = cfg >> 3;
4065c5b75cSMichael Clark     return a & 0x3;
4165c5b75cSMichael Clark }
4265c5b75cSMichael Clark 
4365c5b75cSMichael Clark /*
4465c5b75cSMichael Clark  * Check whether a PMP is locked or not.
4565c5b75cSMichael Clark  */
pmp_is_locked(CPURISCVState * env,uint32_t pmp_index)4665c5b75cSMichael Clark static inline int pmp_is_locked(CPURISCVState *env, uint32_t pmp_index)
4765c5b75cSMichael Clark {
484e3adce1SLeon Schuermann     /* mseccfg.RLB is set */
494e3adce1SLeon Schuermann     if (MSECCFG_RLB_ISSET(env)) {
504e3adce1SLeon Schuermann         return 0;
514e3adce1SLeon Schuermann     }
5265c5b75cSMichael Clark 
5365c5b75cSMichael Clark     if (env->pmp_state.pmp[pmp_index].cfg_reg & PMP_LOCK) {
5465c5b75cSMichael Clark         return 1;
5565c5b75cSMichael Clark     }
5665c5b75cSMichael Clark 
5765c5b75cSMichael Clark     /* Top PMP has no 'next' to check */
5865c5b75cSMichael Clark     if ((pmp_index + 1u) >= MAX_RISCV_PMPS) {
5965c5b75cSMichael Clark         return 0;
6065c5b75cSMichael Clark     }
6165c5b75cSMichael Clark 
6265c5b75cSMichael Clark     return 0;
6365c5b75cSMichael Clark }
6465c5b75cSMichael Clark 
6565c5b75cSMichael Clark /*
6665c5b75cSMichael Clark  * Count the number of active rules.
6765c5b75cSMichael Clark  */
pmp_get_num_rules(CPURISCVState * env)68d102f19aSAtish Patra uint32_t pmp_get_num_rules(CPURISCVState *env)
6965c5b75cSMichael Clark {
7065c5b75cSMichael Clark      return env->pmp_state.num_rules;
7165c5b75cSMichael Clark }
7265c5b75cSMichael Clark 
7365c5b75cSMichael Clark /*
7465c5b75cSMichael Clark  * Accessor to get the cfg reg for a specific PMP/HART
7565c5b75cSMichael Clark  */
pmp_read_cfg(CPURISCVState * env,uint32_t pmp_index)7665c5b75cSMichael Clark static inline uint8_t pmp_read_cfg(CPURISCVState *env, uint32_t pmp_index)
7765c5b75cSMichael Clark {
7865c5b75cSMichael Clark     if (pmp_index < MAX_RISCV_PMPS) {
7965c5b75cSMichael Clark         return env->pmp_state.pmp[pmp_index].cfg_reg;
8065c5b75cSMichael Clark     }
8165c5b75cSMichael Clark 
8265c5b75cSMichael Clark     return 0;
8365c5b75cSMichael Clark }
8465c5b75cSMichael Clark 
8565c5b75cSMichael Clark 
8665c5b75cSMichael Clark /*
8765c5b75cSMichael Clark  * Accessor to set the cfg reg for a specific PMP/HART
8865c5b75cSMichael Clark  * Bounds checks and relevant lock bit.
8965c5b75cSMichael Clark  */
pmp_write_cfg(CPURISCVState * env,uint32_t pmp_index,uint8_t val)90e924074fSWeiwei Li static bool pmp_write_cfg(CPURISCVState *env, uint32_t pmp_index, uint8_t val)
9165c5b75cSMichael Clark {
9265c5b75cSMichael Clark     if (pmp_index < MAX_RISCV_PMPS) {
93ae39e4ceSHou Weiying         bool locked = true;
94ae39e4ceSHou Weiying 
95095fe72aSHimanshu Chauhan         if (riscv_cpu_cfg(env)->ext_smepmp) {
96ae39e4ceSHou Weiying             /* mseccfg.RLB is set */
97ae39e4ceSHou Weiying             if (MSECCFG_RLB_ISSET(env)) {
98ae39e4ceSHou Weiying                 locked = false;
99ae39e4ceSHou Weiying             }
100ae39e4ceSHou Weiying 
101ae39e4ceSHou Weiying             /* mseccfg.MML is not set */
102ae39e4ceSHou Weiying             if (!MSECCFG_MML_ISSET(env) && !pmp_is_locked(env, pmp_index)) {
103ae39e4ceSHou Weiying                 locked = false;
104ae39e4ceSHou Weiying             }
105ae39e4ceSHou Weiying 
106ae39e4ceSHou Weiying             /* mseccfg.MML is set */
107ae39e4ceSHou Weiying             if (MSECCFG_MML_ISSET(env)) {
108ae39e4ceSHou Weiying                 /* not adding execute bit */
109ae39e4ceSHou Weiying                 if ((val & PMP_LOCK) != 0 && (val & PMP_EXEC) != PMP_EXEC) {
110ae39e4ceSHou Weiying                     locked = false;
111ae39e4ceSHou Weiying                 }
112ae39e4ceSHou Weiying                 /* shared region and not adding X bit */
113ae39e4ceSHou Weiying                 if ((val & PMP_LOCK) != PMP_LOCK &&
114ae39e4ceSHou Weiying                     (val & 0x7) != (PMP_WRITE | PMP_EXEC)) {
115ae39e4ceSHou Weiying                     locked = false;
116ae39e4ceSHou Weiying                 }
117ae39e4ceSHou Weiying             }
118ae39e4ceSHou Weiying         } else {
11965c5b75cSMichael Clark             if (!pmp_is_locked(env, pmp_index)) {
120ae39e4ceSHou Weiying                 locked = false;
121ae39e4ceSHou Weiying             }
122ae39e4ceSHou Weiying         }
123ae39e4ceSHou Weiying 
124ae39e4ceSHou Weiying         if (locked) {
125ae39e4ceSHou Weiying             qemu_log_mask(LOG_GUEST_ERROR, "ignoring pmpcfg write - locked\n");
126e924074fSWeiwei Li         } else if (env->pmp_state.pmp[pmp_index].cfg_reg != val) {
127ac66f2f0SMayuresh Chitale             /* If !mseccfg.MML then ignore writes with encoding RW=01 */
128ac66f2f0SMayuresh Chitale             if ((val & PMP_WRITE) && !(val & PMP_READ) &&
129ac66f2f0SMayuresh Chitale                 !MSECCFG_MML_ISSET(env)) {
1301a25e59cSIvan Klokov                 return false;
131ac66f2f0SMayuresh Chitale             }
13265c5b75cSMichael Clark             env->pmp_state.pmp[pmp_index].cfg_reg = val;
1331b63f2feSWeiwei Li             pmp_update_rule_addr(env, pmp_index);
134e924074fSWeiwei Li             return true;
13565c5b75cSMichael Clark         }
13665c5b75cSMichael Clark     } else {
137aad5ac23SAlistair Francis         qemu_log_mask(LOG_GUEST_ERROR,
138aad5ac23SAlistair Francis                       "ignoring pmpcfg write - out of bounds\n");
13965c5b75cSMichael Clark     }
140e924074fSWeiwei Li 
141e924074fSWeiwei Li     return false;
14265c5b75cSMichael Clark }
14365c5b75cSMichael Clark 
pmp_unlock_entries(CPURISCVState * env)1444bf501dcSMayuresh Chitale void pmp_unlock_entries(CPURISCVState *env)
1454bf501dcSMayuresh Chitale {
1464bf501dcSMayuresh Chitale     uint32_t pmp_num = pmp_get_num_rules(env);
1474bf501dcSMayuresh Chitale     int i;
1484bf501dcSMayuresh Chitale 
1494bf501dcSMayuresh Chitale     for (i = 0; i < pmp_num; i++) {
1504bf501dcSMayuresh Chitale         env->pmp_state.pmp[i].cfg_reg &= ~(PMP_LOCK | PMP_AMATCH);
1514bf501dcSMayuresh Chitale     }
1524bf501dcSMayuresh Chitale }
1534bf501dcSMayuresh Chitale 
pmp_decode_napot(hwaddr a,hwaddr * sa,hwaddr * ea)1546f5bb7d4SIvan Klokov static void pmp_decode_napot(hwaddr a, hwaddr *sa, hwaddr *ea)
15565c5b75cSMichael Clark {
15665c5b75cSMichael Clark     /*
1573b57254dSWeiwei Li      * aaaa...aaa0   8-byte NAPOT range
1583b57254dSWeiwei Li      * aaaa...aa01   16-byte NAPOT range
1593b57254dSWeiwei Li      * aaaa...a011   32-byte NAPOT range
1603b57254dSWeiwei Li      * ...
1613b57254dSWeiwei Li      * aa01...1111   2^XLEN-byte NAPOT range
1623b57254dSWeiwei Li      * a011...1111   2^(XLEN+1)-byte NAPOT range
1633b57254dSWeiwei Li      * 0111...1111   2^(XLEN+2)-byte NAPOT range
1643b57254dSWeiwei Li      * 1111...1111   Reserved
16565c5b75cSMichael Clark      */
1666248a8feSNicolas Pitre     a = (a << 2) | 0x3;
1676248a8feSNicolas Pitre     *sa = a & (a + 1);
1686248a8feSNicolas Pitre     *ea = a | (a + 1);
16965c5b75cSMichael Clark }
17065c5b75cSMichael Clark 
pmp_update_rule_addr(CPURISCVState * env,uint32_t pmp_index)17124beb03eSYifei Jiang void pmp_update_rule_addr(CPURISCVState *env, uint32_t pmp_index)
17265c5b75cSMichael Clark {
17365c5b75cSMichael Clark     uint8_t this_cfg = env->pmp_state.pmp[pmp_index].cfg_reg;
17465c5b75cSMichael Clark     target_ulong this_addr = env->pmp_state.pmp[pmp_index].addr_reg;
17565c5b75cSMichael Clark     target_ulong prev_addr = 0u;
1766f5bb7d4SIvan Klokov     hwaddr sa = 0u;
1776f5bb7d4SIvan Klokov     hwaddr ea = 0u;
17865c5b75cSMichael Clark 
17965c5b75cSMichael Clark     if (pmp_index >= 1u) {
18065c5b75cSMichael Clark         prev_addr = env->pmp_state.pmp[pmp_index - 1].addr_reg;
18165c5b75cSMichael Clark     }
18265c5b75cSMichael Clark 
18365c5b75cSMichael Clark     switch (pmp_get_a_field(this_cfg)) {
18465c5b75cSMichael Clark     case PMP_AMATCH_OFF:
18565c5b75cSMichael Clark         sa = 0u;
18665c5b75cSMichael Clark         ea = -1;
18765c5b75cSMichael Clark         break;
18865c5b75cSMichael Clark 
18965c5b75cSMichael Clark     case PMP_AMATCH_TOR:
19065c5b75cSMichael Clark         sa = prev_addr << 2; /* shift up from [xx:0] to [xx+2:2] */
19165c5b75cSMichael Clark         ea = (this_addr << 2) - 1u;
1922e983399SNicolas Pitre         if (sa > ea) {
1932e983399SNicolas Pitre             sa = ea = 0u;
1942e983399SNicolas Pitre         }
19565c5b75cSMichael Clark         break;
19665c5b75cSMichael Clark 
19765c5b75cSMichael Clark     case PMP_AMATCH_NA4:
19865c5b75cSMichael Clark         sa = this_addr << 2; /* shift up from [xx:0] to [xx+2:2] */
199cfad709bSAlexandre Mergnat         ea = (sa + 4u) - 1u;
20065c5b75cSMichael Clark         break;
20165c5b75cSMichael Clark 
20265c5b75cSMichael Clark     case PMP_AMATCH_NAPOT:
20365c5b75cSMichael Clark         pmp_decode_napot(this_addr, &sa, &ea);
20465c5b75cSMichael Clark         break;
20565c5b75cSMichael Clark 
20665c5b75cSMichael Clark     default:
20765c5b75cSMichael Clark         sa = 0u;
20865c5b75cSMichael Clark         ea = 0u;
20965c5b75cSMichael Clark         break;
21065c5b75cSMichael Clark     }
21165c5b75cSMichael Clark 
21265c5b75cSMichael Clark     env->pmp_state.addr[pmp_index].sa = sa;
21365c5b75cSMichael Clark     env->pmp_state.addr[pmp_index].ea = ea;
21424beb03eSYifei Jiang }
21565c5b75cSMichael Clark 
pmp_update_rule_nums(CPURISCVState * env)21624beb03eSYifei Jiang void pmp_update_rule_nums(CPURISCVState *env)
21724beb03eSYifei Jiang {
21824beb03eSYifei Jiang     int i;
21924beb03eSYifei Jiang 
22024beb03eSYifei Jiang     env->pmp_state.num_rules = 0;
22165c5b75cSMichael Clark     for (i = 0; i < MAX_RISCV_PMPS; i++) {
22265c5b75cSMichael Clark         const uint8_t a_field =
22365c5b75cSMichael Clark             pmp_get_a_field(env->pmp_state.pmp[i].cfg_reg);
22465c5b75cSMichael Clark         if (PMP_AMATCH_OFF != a_field) {
22565c5b75cSMichael Clark             env->pmp_state.num_rules++;
22665c5b75cSMichael Clark         }
22765c5b75cSMichael Clark     }
22865c5b75cSMichael Clark }
22965c5b75cSMichael Clark 
pmp_is_in_range(CPURISCVState * env,int pmp_index,hwaddr addr)2306f5bb7d4SIvan Klokov static int pmp_is_in_range(CPURISCVState *env, int pmp_index, hwaddr addr)
23165c5b75cSMichael Clark {
23265c5b75cSMichael Clark     int result = 0;
23365c5b75cSMichael Clark 
234c45eff30SWeiwei Li     if ((addr >= env->pmp_state.addr[pmp_index].sa) &&
235c45eff30SWeiwei Li         (addr <= env->pmp_state.addr[pmp_index].ea)) {
23665c5b75cSMichael Clark         result = 1;
23765c5b75cSMichael Clark     } else {
23865c5b75cSMichael Clark         result = 0;
23965c5b75cSMichael Clark     }
24065c5b75cSMichael Clark 
24165c5b75cSMichael Clark     return result;
24265c5b75cSMichael Clark }
24365c5b75cSMichael Clark 
244b297129aSJim Shu /*
245b297129aSJim Shu  * Check if the address has required RWX privs when no PMP entry is matched.
246b297129aSJim Shu  */
pmp_hart_has_privs_default(CPURISCVState * env,pmp_priv_t privs,pmp_priv_t * allowed_privs,target_ulong mode)24797ec5aefSWeiwei Li static bool pmp_hart_has_privs_default(CPURISCVState *env, pmp_priv_t privs,
248c45eff30SWeiwei Li                                        pmp_priv_t *allowed_privs,
249b297129aSJim Shu                                        target_ulong mode)
250b297129aSJim Shu {
251b297129aSJim Shu     bool ret;
252b297129aSJim Shu 
253ae39e4ceSHou Weiying     if (MSECCFG_MMWP_ISSET(env)) {
254ae39e4ceSHou Weiying         /*
255ae39e4ceSHou Weiying          * The Machine Mode Whitelist Policy (mseccfg.MMWP) is set
256ae39e4ceSHou Weiying          * so we default to deny all, even for M-mode.
257ae39e4ceSHou Weiying          */
258ae39e4ceSHou Weiying         *allowed_privs = 0;
259ae39e4ceSHou Weiying         return false;
260ae39e4ceSHou Weiying     } else if (MSECCFG_MML_ISSET(env)) {
261ae39e4ceSHou Weiying         /*
262ae39e4ceSHou Weiying          * The Machine Mode Lockdown (mseccfg.MML) bit is set
263ae39e4ceSHou Weiying          * so we can only execute code in M-mode with an applicable
264ae39e4ceSHou Weiying          * rule. Other modes are disabled.
265ae39e4ceSHou Weiying          */
266ae39e4ceSHou Weiying         if (mode == PRV_M && !(privs & PMP_EXEC)) {
267ae39e4ceSHou Weiying             ret = true;
268ae39e4ceSHou Weiying             *allowed_privs = PMP_READ | PMP_WRITE;
269ae39e4ceSHou Weiying         } else {
270ae39e4ceSHou Weiying             ret = false;
271ae39e4ceSHou Weiying             *allowed_privs = 0;
272ae39e4ceSHou Weiying         }
273ae39e4ceSHou Weiying 
274ae39e4ceSHou Weiying         return ret;
275ae39e4ceSHou Weiying     }
276ae39e4ceSHou Weiying 
2773fe40ef5SDaniel Henrique Barboza     if (!riscv_cpu_cfg(env)->pmp || (mode == PRV_M)) {
278b297129aSJim Shu         /*
279b297129aSJim Shu          * Privileged spec v1.10 states if HW doesn't implement any PMP entry
280b297129aSJim Shu          * or no PMP entry matches an M-Mode access, the access succeeds.
281b297129aSJim Shu          */
282b297129aSJim Shu         ret = true;
283b297129aSJim Shu         *allowed_privs = PMP_READ | PMP_WRITE | PMP_EXEC;
284b297129aSJim Shu     } else {
285b297129aSJim Shu         /*
286b297129aSJim Shu          * Other modes are not allowed to succeed if they don't * match a rule,
287b297129aSJim Shu          * but there are rules. We've checked for no rule earlier in this
288b297129aSJim Shu          * function.
289b297129aSJim Shu          */
290b297129aSJim Shu         ret = false;
291b297129aSJim Shu         *allowed_privs = 0;
292b297129aSJim Shu     }
293b297129aSJim Shu 
294b297129aSJim Shu     return ret;
295b297129aSJim Shu }
296b297129aSJim Shu 
29765c5b75cSMichael Clark 
29865c5b75cSMichael Clark /*
29965c5b75cSMichael Clark  * Public Interface
30065c5b75cSMichael Clark  */
30165c5b75cSMichael Clark 
30265c5b75cSMichael Clark /*
30365c5b75cSMichael Clark  * Check if the address has required RWX privs to complete desired operation
304e9c39713SWeiwei Li  * Return true if a pmp rule match or default match
305e9c39713SWeiwei Li  * Return false if no match
30665c5b75cSMichael Clark  */
pmp_hart_has_privs(CPURISCVState * env,hwaddr addr,target_ulong size,pmp_priv_t privs,pmp_priv_t * allowed_privs,target_ulong mode)3076f5bb7d4SIvan Klokov bool pmp_hart_has_privs(CPURISCVState *env, hwaddr addr,
308c45eff30SWeiwei Li                         target_ulong size, pmp_priv_t privs,
309c45eff30SWeiwei Li                         pmp_priv_t *allowed_privs, target_ulong mode)
31065c5b75cSMichael Clark {
31165c5b75cSMichael Clark     int i = 0;
3129667e535SDayeol Lee     int pmp_size = 0;
3136f5bb7d4SIvan Klokov     hwaddr s = 0;
3146f5bb7d4SIvan Klokov     hwaddr e = 0;
31565c5b75cSMichael Clark 
31665c5b75cSMichael Clark     /* Short cut if no rules */
31765c5b75cSMichael Clark     if (0 == pmp_get_num_rules(env)) {
31897ec5aefSWeiwei Li         return pmp_hart_has_privs_default(env, privs, allowed_privs, mode);
31965c5b75cSMichael Clark     }
32065c5b75cSMichael Clark 
3211145188eSAlistair Francis     if (size == 0) {
322dcf654a3SDaniel Henrique Barboza         if (riscv_cpu_cfg(env)->mmu) {
3239667e535SDayeol Lee             /*
3241145188eSAlistair Francis              * If size is unknown (0), assume that all bytes
3259667e535SDayeol Lee              * from addr to the end of the page will be accessed.
3269667e535SDayeol Lee              */
3279667e535SDayeol Lee             pmp_size = -(addr | TARGET_PAGE_MASK);
3289667e535SDayeol Lee         } else {
329efd29e33STANG Tiancheng             pmp_size = 2 << riscv_cpu_mxl(env);
3301145188eSAlistair Francis         }
3311145188eSAlistair Francis     } else {
3329667e535SDayeol Lee         pmp_size = size;
3339667e535SDayeol Lee     }
3349667e535SDayeol Lee 
3353b57254dSWeiwei Li     /*
3363b57254dSWeiwei Li      * 1.10 draft priv spec states there is an implicit order
3373b57254dSWeiwei Li      * from low to high
3383b57254dSWeiwei Li      */
33965c5b75cSMichael Clark     for (i = 0; i < MAX_RISCV_PMPS; i++) {
34065c5b75cSMichael Clark         s = pmp_is_in_range(env, i, addr);
3419667e535SDayeol Lee         e = pmp_is_in_range(env, i, addr + pmp_size - 1);
34265c5b75cSMichael Clark 
34365c5b75cSMichael Clark         /* partially inside */
34465c5b75cSMichael Clark         if ((s + e) == 1) {
345aad5ac23SAlistair Francis             qemu_log_mask(LOG_GUEST_ERROR,
346aad5ac23SAlistair Francis                           "pmp violation - access is partially inside\n");
34789fbbaddSWeiwei Li             *allowed_privs = 0;
34889fbbaddSWeiwei Li             return false;
34965c5b75cSMichael Clark         }
35065c5b75cSMichael Clark 
35165c5b75cSMichael Clark         /* fully inside */
35265c5b75cSMichael Clark         const uint8_t a_field =
35365c5b75cSMichael Clark             pmp_get_a_field(env->pmp_state.pmp[i].cfg_reg);
35465c5b75cSMichael Clark 
355f8162068SHesham Almatary         /*
356ae39e4ceSHou Weiying          * Convert the PMP permissions to match the truth table in the
357095fe72aSHimanshu Chauhan          * Smepmp spec.
358f8162068SHesham Almatary          */
359095fe72aSHimanshu Chauhan         const uint8_t smepmp_operation =
360ae39e4ceSHou Weiying             ((env->pmp_state.pmp[i].cfg_reg & PMP_LOCK) >> 4) |
361ae39e4ceSHou Weiying             ((env->pmp_state.pmp[i].cfg_reg & PMP_READ) << 2) |
362ae39e4ceSHou Weiying             (env->pmp_state.pmp[i].cfg_reg & PMP_WRITE) |
363ae39e4ceSHou Weiying             ((env->pmp_state.pmp[i].cfg_reg & PMP_EXEC) >> 2);
364ae39e4ceSHou Weiying 
365f8162068SHesham Almatary         if (((s + e) == 2) && (PMP_AMATCH_OFF != a_field)) {
366ae39e4ceSHou Weiying             /*
367ae39e4ceSHou Weiying              * If the PMP entry is not off and the address is in range,
368ae39e4ceSHou Weiying              * do the priv check
369ae39e4ceSHou Weiying              */
370ae39e4ceSHou Weiying             if (!MSECCFG_MML_ISSET(env)) {
371ae39e4ceSHou Weiying                 /*
372ae39e4ceSHou Weiying                  * If mseccfg.MML Bit is not set, do pmp priv check
373ae39e4ceSHou Weiying                  * This will always apply to regular PMP.
374ae39e4ceSHou Weiying                  */
375b297129aSJim Shu                 *allowed_privs = PMP_READ | PMP_WRITE | PMP_EXEC;
376cc0fdb29SHesham Almatary                 if ((mode != PRV_M) || pmp_is_locked(env, i)) {
377b297129aSJim Shu                     *allowed_privs &= env->pmp_state.pmp[i].cfg_reg;
37865c5b75cSMichael Clark                 }
379ae39e4ceSHou Weiying             } else {
380ae39e4ceSHou Weiying                 /*
381ae39e4ceSHou Weiying                  * If mseccfg.MML Bit set, do the enhanced pmp priv check
382ae39e4ceSHou Weiying                  */
383ae39e4ceSHou Weiying                 if (mode == PRV_M) {
384095fe72aSHimanshu Chauhan                     switch (smepmp_operation) {
385ae39e4ceSHou Weiying                     case 0:
386ae39e4ceSHou Weiying                     case 1:
387ae39e4ceSHou Weiying                     case 4:
388ae39e4ceSHou Weiying                     case 5:
389ae39e4ceSHou Weiying                     case 6:
390ae39e4ceSHou Weiying                     case 7:
391ae39e4ceSHou Weiying                     case 8:
392ae39e4ceSHou Weiying                         *allowed_privs = 0;
393ae39e4ceSHou Weiying                         break;
394ae39e4ceSHou Weiying                     case 2:
395ae39e4ceSHou Weiying                     case 3:
396ae39e4ceSHou Weiying                     case 14:
397ae39e4ceSHou Weiying                         *allowed_privs = PMP_READ | PMP_WRITE;
398ae39e4ceSHou Weiying                         break;
399ae39e4ceSHou Weiying                     case 9:
400ae39e4ceSHou Weiying                     case 10:
401ae39e4ceSHou Weiying                         *allowed_privs = PMP_EXEC;
402ae39e4ceSHou Weiying                         break;
403ae39e4ceSHou Weiying                     case 11:
404ae39e4ceSHou Weiying                     case 13:
405ae39e4ceSHou Weiying                         *allowed_privs = PMP_READ | PMP_EXEC;
406ae39e4ceSHou Weiying                         break;
407ae39e4ceSHou Weiying                     case 12:
408ae39e4ceSHou Weiying                     case 15:
409ae39e4ceSHou Weiying                         *allowed_privs = PMP_READ;
410ae39e4ceSHou Weiying                         break;
411787a4bafSAlistair Francis                     default:
412787a4bafSAlistair Francis                         g_assert_not_reached();
413ae39e4ceSHou Weiying                     }
414ae39e4ceSHou Weiying                 } else {
415095fe72aSHimanshu Chauhan                     switch (smepmp_operation) {
416ae39e4ceSHou Weiying                     case 0:
417ae39e4ceSHou Weiying                     case 8:
418ae39e4ceSHou Weiying                     case 9:
419ae39e4ceSHou Weiying                     case 12:
420ae39e4ceSHou Weiying                     case 13:
421ae39e4ceSHou Weiying                     case 14:
422ae39e4ceSHou Weiying                         *allowed_privs = 0;
423ae39e4ceSHou Weiying                         break;
424ae39e4ceSHou Weiying                     case 1:
425ae39e4ceSHou Weiying                     case 10:
426ae39e4ceSHou Weiying                     case 11:
427ae39e4ceSHou Weiying                         *allowed_privs = PMP_EXEC;
428ae39e4ceSHou Weiying                         break;
429ae39e4ceSHou Weiying                     case 2:
430ae39e4ceSHou Weiying                     case 4:
431ae39e4ceSHou Weiying                     case 15:
432ae39e4ceSHou Weiying                         *allowed_privs = PMP_READ;
433ae39e4ceSHou Weiying                         break;
434ae39e4ceSHou Weiying                     case 3:
435ae39e4ceSHou Weiying                     case 6:
436ae39e4ceSHou Weiying                         *allowed_privs = PMP_READ | PMP_WRITE;
437ae39e4ceSHou Weiying                         break;
438ae39e4ceSHou Weiying                     case 5:
439ae39e4ceSHou Weiying                         *allowed_privs = PMP_READ | PMP_EXEC;
440ae39e4ceSHou Weiying                         break;
441ae39e4ceSHou Weiying                     case 7:
442ae39e4ceSHou Weiying                         *allowed_privs = PMP_READ | PMP_WRITE | PMP_EXEC;
443ae39e4ceSHou Weiying                         break;
444787a4bafSAlistair Francis                     default:
445787a4bafSAlistair Francis                         g_assert_not_reached();
446ae39e4ceSHou Weiying                     }
447ae39e4ceSHou Weiying                 }
448ae39e4ceSHou Weiying             }
44965c5b75cSMichael Clark 
45090b1fafcSHimanshu Chauhan             /*
45190b1fafcSHimanshu Chauhan              * If matching address range was found, the protection bits
45290b1fafcSHimanshu Chauhan              * defined with PMP must be used. We shouldn't fallback on
45390b1fafcSHimanshu Chauhan              * finding default privileges.
45490b1fafcSHimanshu Chauhan              */
455a574b27aSHimanshu Chauhan             return (privs & *allowed_privs) == privs;
45665c5b75cSMichael Clark         }
45765c5b75cSMichael Clark     }
45865c5b75cSMichael Clark 
45965c5b75cSMichael Clark     /* No rule matched */
460a574b27aSHimanshu Chauhan     return pmp_hart_has_privs_default(env, privs, allowed_privs, mode);
46165c5b75cSMichael Clark }
46265c5b75cSMichael Clark 
46365c5b75cSMichael Clark /*
464b4cb178eSBin Meng  * Handle a write to a pmpcfg CSR
46565c5b75cSMichael Clark  */
pmpcfg_csr_write(CPURISCVState * env,uint32_t reg_index,target_ulong val)46665c5b75cSMichael Clark void pmpcfg_csr_write(CPURISCVState *env, uint32_t reg_index,
46765c5b75cSMichael Clark                       target_ulong val)
46865c5b75cSMichael Clark {
46965c5b75cSMichael Clark     int i;
47065c5b75cSMichael Clark     uint8_t cfg_val;
47179f26b3bSLIU Zhiwei     int pmpcfg_nums = 2 << riscv_cpu_mxl(env);
472e924074fSWeiwei Li     bool modified = false;
47365c5b75cSMichael Clark 
4746591efb5SPhilippe Mathieu-Daudé     trace_pmpcfg_csr_write(env->mhartid, reg_index, val);
47565c5b75cSMichael Clark 
47679f26b3bSLIU Zhiwei     for (i = 0; i < pmpcfg_nums; i++) {
47765c5b75cSMichael Clark         cfg_val = (val >> 8 * i)  & 0xff;
478e924074fSWeiwei Li         modified |= pmp_write_cfg(env, (reg_index * 4) + i, cfg_val);
47965c5b75cSMichael Clark     }
4802c2e0f28SJim Shu 
4812c2e0f28SJim Shu     /* If PMP permission of any addr has been changed, flush TLB pages. */
482e924074fSWeiwei Li     if (modified) {
4831b63f2feSWeiwei Li         pmp_update_rule_nums(env);
4842c2e0f28SJim Shu         tlb_flush(env_cpu(env));
48565c5b75cSMichael Clark     }
486e924074fSWeiwei Li }
48765c5b75cSMichael Clark 
48865c5b75cSMichael Clark 
48965c5b75cSMichael Clark /*
490b4cb178eSBin Meng  * Handle a read from a pmpcfg CSR
49165c5b75cSMichael Clark  */
pmpcfg_csr_read(CPURISCVState * env,uint32_t reg_index)49265c5b75cSMichael Clark target_ulong pmpcfg_csr_read(CPURISCVState *env, uint32_t reg_index)
49365c5b75cSMichael Clark {
49465c5b75cSMichael Clark     int i;
49565c5b75cSMichael Clark     target_ulong cfg_val = 0;
4964a9b31b8SDayeol Lee     target_ulong val = 0;
49779f26b3bSLIU Zhiwei     int pmpcfg_nums = 2 << riscv_cpu_mxl(env);
49865c5b75cSMichael Clark 
49979f26b3bSLIU Zhiwei     for (i = 0; i < pmpcfg_nums; i++) {
500fdd33b86SHou Weiying         val = pmp_read_cfg(env, (reg_index * 4) + i);
50165c5b75cSMichael Clark         cfg_val |= (val << (i * 8));
50265c5b75cSMichael Clark     }
5036591efb5SPhilippe Mathieu-Daudé     trace_pmpcfg_csr_read(env->mhartid, reg_index, cfg_val);
50465c5b75cSMichael Clark 
50565c5b75cSMichael Clark     return cfg_val;
50665c5b75cSMichael Clark }
50765c5b75cSMichael Clark 
50865c5b75cSMichael Clark 
50965c5b75cSMichael Clark /*
510b4cb178eSBin Meng  * Handle a write to a pmpaddr CSR
51165c5b75cSMichael Clark  */
pmpaddr_csr_write(CPURISCVState * env,uint32_t addr_index,target_ulong val)51265c5b75cSMichael Clark void pmpaddr_csr_write(CPURISCVState *env, uint32_t addr_index,
51365c5b75cSMichael Clark                        target_ulong val)
51465c5b75cSMichael Clark {
5156591efb5SPhilippe Mathieu-Daudé     trace_pmpaddr_csr_write(env->mhartid, addr_index, val);
5162b3e1278SWeiwei Li     bool is_next_cfg_tor = false;
51794c6ba83SAlistair Francis 
51865c5b75cSMichael Clark     if (addr_index < MAX_RISCV_PMPS) {
51994c6ba83SAlistair Francis         /*
52094c6ba83SAlistair Francis          * In TOR mode, need to check the lock bit of the next pmp
52194c6ba83SAlistair Francis          * (if there is a next).
52294c6ba83SAlistair Francis          */
52394c6ba83SAlistair Francis         if (addr_index + 1 < MAX_RISCV_PMPS) {
52494c6ba83SAlistair Francis             uint8_t pmp_cfg = env->pmp_state.pmp[addr_index + 1].cfg_reg;
5252b3e1278SWeiwei Li             is_next_cfg_tor = PMP_AMATCH_TOR == pmp_get_a_field(pmp_cfg);
52694c6ba83SAlistair Francis 
5272b3e1278SWeiwei Li             if (pmp_cfg & PMP_LOCK && is_next_cfg_tor) {
52894c6ba83SAlistair Francis                 qemu_log_mask(LOG_GUEST_ERROR,
52994c6ba83SAlistair Francis                               "ignoring pmpaddr write - pmpcfg + 1 locked\n");
53094c6ba83SAlistair Francis                 return;
53194c6ba83SAlistair Francis             }
53294c6ba83SAlistair Francis         }
53394c6ba83SAlistair Francis 
53465c5b75cSMichael Clark         if (!pmp_is_locked(env, addr_index)) {
535e924074fSWeiwei Li             if (env->pmp_state.pmp[addr_index].addr_reg != val) {
53665c5b75cSMichael Clark                 env->pmp_state.pmp[addr_index].addr_reg = val;
5372b3e1278SWeiwei Li                 pmp_update_rule_addr(env, addr_index);
5382b3e1278SWeiwei Li                 if (is_next_cfg_tor) {
5392b3e1278SWeiwei Li                     pmp_update_rule_addr(env, addr_index + 1);
5402b3e1278SWeiwei Li                 }
5417c4c31f6SWeiwei Li                 tlb_flush(env_cpu(env));
542e924074fSWeiwei Li             }
54365c5b75cSMichael Clark         } else {
544aad5ac23SAlistair Francis             qemu_log_mask(LOG_GUEST_ERROR,
545aad5ac23SAlistair Francis                           "ignoring pmpaddr write - locked\n");
54665c5b75cSMichael Clark         }
54765c5b75cSMichael Clark     } else {
548aad5ac23SAlistair Francis         qemu_log_mask(LOG_GUEST_ERROR,
549aad5ac23SAlistair Francis                       "ignoring pmpaddr write - out of bounds\n");
55065c5b75cSMichael Clark     }
55165c5b75cSMichael Clark }
55265c5b75cSMichael Clark 
55365c5b75cSMichael Clark 
55465c5b75cSMichael Clark /*
555b4cb178eSBin Meng  * Handle a read from a pmpaddr CSR
55665c5b75cSMichael Clark  */
pmpaddr_csr_read(CPURISCVState * env,uint32_t addr_index)55765c5b75cSMichael Clark target_ulong pmpaddr_csr_read(CPURISCVState *env, uint32_t addr_index)
55865c5b75cSMichael Clark {
5596591efb5SPhilippe Mathieu-Daudé     target_ulong val = 0;
5606591efb5SPhilippe Mathieu-Daudé 
56165c5b75cSMichael Clark     if (addr_index < MAX_RISCV_PMPS) {
5626591efb5SPhilippe Mathieu-Daudé         val = env->pmp_state.pmp[addr_index].addr_reg;
5636591efb5SPhilippe Mathieu-Daudé         trace_pmpaddr_csr_read(env->mhartid, addr_index, val);
56465c5b75cSMichael Clark     } else {
565aad5ac23SAlistair Francis         qemu_log_mask(LOG_GUEST_ERROR,
566aad5ac23SAlistair Francis                       "ignoring pmpaddr read - out of bounds\n");
56765c5b75cSMichael Clark     }
5686591efb5SPhilippe Mathieu-Daudé 
5696591efb5SPhilippe Mathieu-Daudé     return val;
57065c5b75cSMichael Clark }
571af3fc195SZong Li 
572af3fc195SZong Li /*
5732582a95cSHou Weiying  * Handle a write to a mseccfg CSR
5742582a95cSHou Weiying  */
mseccfg_csr_write(CPURISCVState * env,target_ulong val)5752582a95cSHou Weiying void mseccfg_csr_write(CPURISCVState *env, target_ulong val)
5762582a95cSHou Weiying {
5772582a95cSHou Weiying     int i;
5782582a95cSHou Weiying 
5792582a95cSHou Weiying     trace_mseccfg_csr_write(env->mhartid, val);
5802582a95cSHou Weiying 
5812582a95cSHou Weiying     /* RLB cannot be enabled if it's already 0 and if any regions are locked */
5822582a95cSHou Weiying     if (!MSECCFG_RLB_ISSET(env)) {
5832582a95cSHou Weiying         for (i = 0; i < MAX_RISCV_PMPS; i++) {
5842582a95cSHou Weiying             if (pmp_is_locked(env, i)) {
5852582a95cSHou Weiying                 val &= ~MSECCFG_RLB;
5862582a95cSHou Weiying                 break;
5872582a95cSHou Weiying             }
5882582a95cSHou Weiying         }
5892582a95cSHou Weiying     }
5902582a95cSHou Weiying 
591095fe72aSHimanshu Chauhan     if (riscv_cpu_cfg(env)->ext_smepmp) {
5922582a95cSHou Weiying         /* Sticky bits */
5932582a95cSHou Weiying         val |= (env->mseccfg & (MSECCFG_MMWP | MSECCFG_MML));
59437e79058SWeiwei Li         if ((val ^ env->mseccfg) & (MSECCFG_MMWP | MSECCFG_MML)) {
59537e79058SWeiwei Li             tlb_flush(env_cpu(env));
59637e79058SWeiwei Li         }
597b84ffd6eSWeiwei Li     } else {
598b84ffd6eSWeiwei Li         val &= ~(MSECCFG_MMWP | MSECCFG_MML | MSECCFG_RLB);
599b84ffd6eSWeiwei Li     }
6002582a95cSHou Weiying 
601*4923f672SDeepak Gupta     /* M-mode forward cfi to be enabled if cfi extension is implemented */
602*4923f672SDeepak Gupta     if (env_archcpu(env)->cfg.ext_zicfilp) {
603*4923f672SDeepak Gupta         val |= (val & MSECCFG_MLPE);
604*4923f672SDeepak Gupta     }
605*4923f672SDeepak Gupta 
6062582a95cSHou Weiying     env->mseccfg = val;
6072582a95cSHou Weiying }
6082582a95cSHou Weiying 
6092582a95cSHou Weiying /*
6102582a95cSHou Weiying  * Handle a read from a mseccfg CSR
6112582a95cSHou Weiying  */
mseccfg_csr_read(CPURISCVState * env)6122582a95cSHou Weiying target_ulong mseccfg_csr_read(CPURISCVState *env)
6132582a95cSHou Weiying {
6142582a95cSHou Weiying     trace_mseccfg_csr_read(env->mhartid, env->mseccfg);
6152582a95cSHou Weiying     return env->mseccfg;
6162582a95cSHou Weiying }
6172582a95cSHou Weiying 
6182582a95cSHou Weiying /*
619dc7b5993SWeiwei Li  * Calculate the TLB size.
620dc7b5993SWeiwei Li  * It's possible that PMP regions only cover partial of the TLB page, and
621dc7b5993SWeiwei Li  * this may split the page into regions with different permissions.
622dc7b5993SWeiwei Li  * For example if PMP0 is (0x80000008~0x8000000F, R) and PMP1 is (0x80000000
623dc7b5993SWeiwei Li  * ~0x80000FFF, RWX), then region 0x80000008~0x8000000F has R permission, and
624dc7b5993SWeiwei Li  * the other regions in this page have RWX permissions.
625dc7b5993SWeiwei Li  * A write access to 0x80000000 will match PMP1. However we cannot cache the
626dc7b5993SWeiwei Li  * translation result in the TLB since this will make the write access to
627dc7b5993SWeiwei Li  * 0x80000008 bypass the check of PMP0.
628dc7b5993SWeiwei Li  * To avoid this we return a size of 1 (which means no caching) if the PMP
629dc7b5993SWeiwei Li  * region only covers partial of the TLB page.
630af3fc195SZong Li  */
pmp_get_tlb_size(CPURISCVState * env,hwaddr addr)6316f5bb7d4SIvan Klokov target_ulong pmp_get_tlb_size(CPURISCVState *env, hwaddr addr)
632af3fc195SZong Li {
6336f5bb7d4SIvan Klokov     hwaddr pmp_sa;
6346f5bb7d4SIvan Klokov     hwaddr pmp_ea;
6356f5bb7d4SIvan Klokov     hwaddr tlb_sa = addr & ~(TARGET_PAGE_SIZE - 1);
6366f5bb7d4SIvan Klokov     hwaddr tlb_ea = tlb_sa + TARGET_PAGE_SIZE - 1;
637dc7b5993SWeiwei Li     int i;
638af3fc195SZong Li 
639dc7b5993SWeiwei Li     /*
640dc7b5993SWeiwei Li      * If PMP is not supported or there are no PMP rules, the TLB page will not
641dc7b5993SWeiwei Li      * be split into regions with different permissions by PMP so we set the
642dc7b5993SWeiwei Li      * size to TARGET_PAGE_SIZE.
643dc7b5993SWeiwei Li      */
644dc7b5993SWeiwei Li     if (!riscv_cpu_cfg(env)->pmp || !pmp_get_num_rules(env)) {
645dc7b5993SWeiwei Li         return TARGET_PAGE_SIZE;
646dc7b5993SWeiwei Li     }
647dc7b5993SWeiwei Li 
648dc7b5993SWeiwei Li     for (i = 0; i < MAX_RISCV_PMPS; i++) {
649dc7b5993SWeiwei Li         if (pmp_get_a_field(env->pmp_state.pmp[i].cfg_reg) == PMP_AMATCH_OFF) {
650dc7b5993SWeiwei Li             continue;
651dc7b5993SWeiwei Li         }
652dc7b5993SWeiwei Li 
653dc7b5993SWeiwei Li         pmp_sa = env->pmp_state.addr[i].sa;
654dc7b5993SWeiwei Li         pmp_ea = env->pmp_state.addr[i].ea;
655dc7b5993SWeiwei Li 
656dc7b5993SWeiwei Li         /*
657dc7b5993SWeiwei Li          * Only the first PMP entry that covers (whole or partial of) the TLB
658dc7b5993SWeiwei Li          * page really matters:
659dc7b5993SWeiwei Li          * If it covers the whole TLB page, set the size to TARGET_PAGE_SIZE,
660dc7b5993SWeiwei Li          * since the following PMP entries have lower priority and will not
661dc7b5993SWeiwei Li          * affect the permissions of the page.
662dc7b5993SWeiwei Li          * If it only covers partial of the TLB page, set the size to 1 since
663dc7b5993SWeiwei Li          * the allowed permissions of the region may be different from other
664dc7b5993SWeiwei Li          * region of the page.
665dc7b5993SWeiwei Li          */
666824cac68SLIU Zhiwei         if (pmp_sa <= tlb_sa && pmp_ea >= tlb_ea) {
667824cac68SLIU Zhiwei             return TARGET_PAGE_SIZE;
668dc7b5993SWeiwei Li         } else if ((pmp_sa >= tlb_sa && pmp_sa <= tlb_ea) ||
669dc7b5993SWeiwei Li                    (pmp_ea >= tlb_sa && pmp_ea <= tlb_ea)) {
670824cac68SLIU Zhiwei             return 1;
67147566421SAlistair Francis         }
672af3fc195SZong Li     }
673b297129aSJim Shu 
674b297129aSJim Shu     /*
675dc7b5993SWeiwei Li      * If no PMP entry matches the TLB page, the TLB page will also not be
676dc7b5993SWeiwei Li      * split into regions with different permissions by PMP so we set the size
677dc7b5993SWeiwei Li      * to TARGET_PAGE_SIZE.
678dc7b5993SWeiwei Li      */
679dc7b5993SWeiwei Li     return TARGET_PAGE_SIZE;
680dc7b5993SWeiwei Li }
681dc7b5993SWeiwei Li 
682dc7b5993SWeiwei Li /*
683b297129aSJim Shu  * Convert PMP privilege to TLB page privilege.
684b297129aSJim Shu  */
pmp_priv_to_page_prot(pmp_priv_t pmp_priv)685b297129aSJim Shu int pmp_priv_to_page_prot(pmp_priv_t pmp_priv)
686b297129aSJim Shu {
687b297129aSJim Shu     int prot = 0;
688b297129aSJim Shu 
689b297129aSJim Shu     if (pmp_priv & PMP_READ) {
690b297129aSJim Shu         prot |= PAGE_READ;
691b297129aSJim Shu     }
692b297129aSJim Shu     if (pmp_priv & PMP_WRITE) {
693b297129aSJim Shu         prot |= PAGE_WRITE;
694b297129aSJim Shu     }
695b297129aSJim Shu     if (pmp_priv & PMP_EXEC) {
696b297129aSJim Shu         prot |= PAGE_EXEC;
697b297129aSJim Shu     }
698b297129aSJim Shu 
699b297129aSJim Shu     return prot;
700b297129aSJim Shu }
701