xref: /openbmc/qemu/target/riscv/pmp.c (revision bc15a8db4ff857172625176b03b03138aa7624d1)
1 /*
2  * QEMU RISC-V PMP (Physical Memory Protection)
3  *
4  * Author: Daire McNamara, daire.mcnamara@emdalo.com
5  *         Ivan Griffin, ivan.griffin@emdalo.com
6  *
7  * This provides a RISC-V Physical Memory Protection implementation
8  *
9  * This program is free software; you can redistribute it and/or modify it
10  * under the terms and conditions of the GNU General Public License,
11  * version 2 or later, as published by the Free Software Foundation.
12  *
13  * This program is distributed in the hope it will be useful, but WITHOUT
14  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
15  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
16  * more details.
17  *
18  * You should have received a copy of the GNU General Public License along with
19  * this program.  If not, see <http://www.gnu.org/licenses/>.
20  */
21 
22 #include "qemu/osdep.h"
23 #include "qemu/log.h"
24 #include "qapi/error.h"
25 #include "cpu.h"
26 #include "trace.h"
27 #include "exec/cputlb.h"
28 #include "exec/page-protection.h"
29 
30 static bool pmp_write_cfg(CPURISCVState *env, uint32_t addr_index,
31                           uint8_t val);
32 static uint8_t pmp_read_cfg(CPURISCVState *env, uint32_t addr_index);
33 
34 /*
35  * Convert the PMP permissions to match the truth table in the Smepmp spec.
36  */
pmp_get_smepmp_operation(uint8_t cfg)37 static inline uint8_t pmp_get_smepmp_operation(uint8_t cfg)
38 {
39     return ((cfg & PMP_LOCK) >> 4) | ((cfg & PMP_READ) << 2) |
40            (cfg & PMP_WRITE) | ((cfg & PMP_EXEC) >> 2);
41 }
42 
43 /*
44  * Accessor method to extract address matching type 'a field' from cfg reg
45  */
pmp_get_a_field(uint8_t cfg)46 static inline uint8_t pmp_get_a_field(uint8_t cfg)
47 {
48     uint8_t a = cfg >> 3;
49     return a & 0x3;
50 }
51 
52 /*
53  * Check whether a PMP is locked or not.
54  */
pmp_is_locked(CPURISCVState * env,uint32_t pmp_index)55 static inline int pmp_is_locked(CPURISCVState *env, uint32_t pmp_index)
56 {
57     if (env->pmp_state.pmp[pmp_index].cfg_reg & PMP_LOCK) {
58         return 1;
59     }
60 
61     /* Top PMP has no 'next' to check */
62     if ((pmp_index + 1u) >= MAX_RISCV_PMPS) {
63         return 0;
64     }
65 
66     return 0;
67 }
68 
69 /*
70  * Check whether a PMP is locked for writing or not.
71  * (i.e. has LOCK flag and mseccfg.RLB is unset)
72  */
pmp_is_readonly(CPURISCVState * env,uint32_t pmp_index)73 static int pmp_is_readonly(CPURISCVState *env, uint32_t pmp_index)
74 {
75     return pmp_is_locked(env, pmp_index) && !MSECCFG_RLB_ISSET(env);
76 }
77 
78 /*
79  * Check whether `val` is an invalid Smepmp config value
80  */
pmp_is_invalid_smepmp_cfg(CPURISCVState * env,uint8_t val)81 static int pmp_is_invalid_smepmp_cfg(CPURISCVState *env, uint8_t val)
82 {
83     /* No check if mseccfg.MML is not set or if mseccfg.RLB is set */
84     if (!MSECCFG_MML_ISSET(env) || MSECCFG_RLB_ISSET(env)) {
85         return 0;
86     }
87 
88     /*
89      * Adding a rule with executable privileges that either is M-mode-only
90      * or a locked Shared-Region is not possible
91      */
92     switch (pmp_get_smepmp_operation(val)) {
93     case 0:
94     case 1:
95     case 2:
96     case 3:
97     case 4:
98     case 5:
99     case 6:
100     case 7:
101     case 8:
102     case 12:
103     case 14:
104     case 15:
105         return 0;
106     case 9:
107     case 10:
108     case 11:
109     case 13:
110         return 1;
111     default:
112         g_assert_not_reached();
113     }
114 }
115 
116 /*
117  * Count the number of active rules.
118  */
pmp_get_num_rules(CPURISCVState * env)119 uint32_t pmp_get_num_rules(CPURISCVState *env)
120 {
121      return env->pmp_state.num_rules;
122 }
123 
124 /*
125  * Accessor to get the cfg reg for a specific PMP/HART
126  */
pmp_read_cfg(CPURISCVState * env,uint32_t pmp_index)127 static inline uint8_t pmp_read_cfg(CPURISCVState *env, uint32_t pmp_index)
128 {
129     if (pmp_index < MAX_RISCV_PMPS) {
130         return env->pmp_state.pmp[pmp_index].cfg_reg;
131     }
132 
133     return 0;
134 }
135 
136 
137 /*
138  * Accessor to set the cfg reg for a specific PMP/HART
139  * Bounds checks and relevant lock bit.
140  */
pmp_write_cfg(CPURISCVState * env,uint32_t pmp_index,uint8_t val)141 static bool pmp_write_cfg(CPURISCVState *env, uint32_t pmp_index, uint8_t val)
142 {
143     if (pmp_index < MAX_RISCV_PMPS) {
144         if (pmp_is_readonly(env, pmp_index)) {
145             qemu_log_mask(LOG_GUEST_ERROR,
146                           "ignoring pmpcfg write - read only\n");
147         } else if (pmp_is_invalid_smepmp_cfg(env, val)) {
148             qemu_log_mask(LOG_GUEST_ERROR,
149                           "ignoring pmpcfg write - invalid\n");
150         } else {
151             env->pmp_state.pmp[pmp_index].cfg_reg = val;
152             pmp_update_rule_addr(env, pmp_index);
153             return true;
154         }
155     } else {
156         qemu_log_mask(LOG_GUEST_ERROR,
157                       "ignoring pmpcfg write - out of bounds\n");
158     }
159 
160     return false;
161 }
162 
pmp_unlock_entries(CPURISCVState * env)163 void pmp_unlock_entries(CPURISCVState *env)
164 {
165     uint32_t pmp_num = pmp_get_num_rules(env);
166     int i;
167 
168     for (i = 0; i < pmp_num; i++) {
169         env->pmp_state.pmp[i].cfg_reg &= ~(PMP_LOCK | PMP_AMATCH);
170     }
171 }
172 
pmp_decode_napot(hwaddr a,hwaddr * sa,hwaddr * ea)173 static void pmp_decode_napot(hwaddr a, hwaddr *sa, hwaddr *ea)
174 {
175     /*
176      * aaaa...aaa0   8-byte NAPOT range
177      * aaaa...aa01   16-byte NAPOT range
178      * aaaa...a011   32-byte NAPOT range
179      * ...
180      * aa01...1111   2^XLEN-byte NAPOT range
181      * a011...1111   2^(XLEN+1)-byte NAPOT range
182      * 0111...1111   2^(XLEN+2)-byte NAPOT range
183      * 1111...1111   Reserved
184      */
185     a = (a << 2) | 0x3;
186     *sa = a & (a + 1);
187     *ea = a | (a + 1);
188 }
189 
pmp_update_rule_addr(CPURISCVState * env,uint32_t pmp_index)190 void pmp_update_rule_addr(CPURISCVState *env, uint32_t pmp_index)
191 {
192     uint8_t this_cfg = env->pmp_state.pmp[pmp_index].cfg_reg;
193     target_ulong this_addr = env->pmp_state.pmp[pmp_index].addr_reg;
194     target_ulong prev_addr = 0u;
195     hwaddr sa = 0u;
196     hwaddr ea = 0u;
197 
198     if (pmp_index >= 1u) {
199         prev_addr = env->pmp_state.pmp[pmp_index - 1].addr_reg;
200     }
201 
202     switch (pmp_get_a_field(this_cfg)) {
203     case PMP_AMATCH_OFF:
204         sa = 0u;
205         ea = -1;
206         break;
207 
208     case PMP_AMATCH_TOR:
209         sa = prev_addr << 2; /* shift up from [xx:0] to [xx+2:2] */
210         ea = (this_addr << 2) - 1u;
211         if (sa > ea) {
212             sa = ea = 0u;
213         }
214         break;
215 
216     case PMP_AMATCH_NA4:
217         sa = this_addr << 2; /* shift up from [xx:0] to [xx+2:2] */
218         ea = (sa + 4u) - 1u;
219         break;
220 
221     case PMP_AMATCH_NAPOT:
222         pmp_decode_napot(this_addr, &sa, &ea);
223         break;
224 
225     default:
226         sa = 0u;
227         ea = 0u;
228         break;
229     }
230 
231     env->pmp_state.addr[pmp_index].sa = sa;
232     env->pmp_state.addr[pmp_index].ea = ea;
233 }
234 
pmp_update_rule_nums(CPURISCVState * env)235 void pmp_update_rule_nums(CPURISCVState *env)
236 {
237     int i;
238 
239     env->pmp_state.num_rules = 0;
240     for (i = 0; i < MAX_RISCV_PMPS; i++) {
241         const uint8_t a_field =
242             pmp_get_a_field(env->pmp_state.pmp[i].cfg_reg);
243         if (PMP_AMATCH_OFF != a_field) {
244             env->pmp_state.num_rules++;
245         }
246     }
247 }
248 
pmp_is_in_range(CPURISCVState * env,int pmp_index,hwaddr addr)249 static int pmp_is_in_range(CPURISCVState *env, int pmp_index, hwaddr addr)
250 {
251     int result = 0;
252 
253     if ((addr >= env->pmp_state.addr[pmp_index].sa) &&
254         (addr <= env->pmp_state.addr[pmp_index].ea)) {
255         result = 1;
256     } else {
257         result = 0;
258     }
259 
260     return result;
261 }
262 
263 /*
264  * Check if the address has required RWX privs when no PMP entry is matched.
265  */
pmp_hart_has_privs_default(CPURISCVState * env,pmp_priv_t privs,pmp_priv_t * allowed_privs,target_ulong mode)266 static bool pmp_hart_has_privs_default(CPURISCVState *env, pmp_priv_t privs,
267                                        pmp_priv_t *allowed_privs,
268                                        target_ulong mode)
269 {
270     bool ret;
271 
272     if (MSECCFG_MMWP_ISSET(env)) {
273         /*
274          * The Machine Mode Whitelist Policy (mseccfg.MMWP) is set
275          * so we default to deny all, even for M-mode.
276          */
277         *allowed_privs = 0;
278         return false;
279     } else if (MSECCFG_MML_ISSET(env)) {
280         /*
281          * The Machine Mode Lockdown (mseccfg.MML) bit is set
282          * so we can only execute code in M-mode with an applicable
283          * rule. Other modes are disabled.
284          */
285         if (mode == PRV_M && !(privs & PMP_EXEC)) {
286             ret = true;
287             *allowed_privs = PMP_READ | PMP_WRITE;
288         } else {
289             ret = false;
290             *allowed_privs = 0;
291         }
292 
293         return ret;
294     }
295 
296     if (!riscv_cpu_cfg(env)->pmp || (mode == PRV_M)) {
297         /*
298          * Privileged spec v1.10 states if HW doesn't implement any PMP entry
299          * or no PMP entry matches an M-Mode access, the access succeeds.
300          */
301         ret = true;
302         *allowed_privs = PMP_READ | PMP_WRITE | PMP_EXEC;
303     } else {
304         /*
305          * Other modes are not allowed to succeed if they don't * match a rule,
306          * but there are rules. We've checked for no rule earlier in this
307          * function.
308          */
309         ret = false;
310         *allowed_privs = 0;
311     }
312 
313     return ret;
314 }
315 
316 
317 /*
318  * Public Interface
319  */
320 
321 /*
322  * Check if the address has required RWX privs to complete desired operation
323  * Return true if a pmp rule match or default match
324  * Return false if no match
325  */
pmp_hart_has_privs(CPURISCVState * env,hwaddr addr,target_ulong size,pmp_priv_t privs,pmp_priv_t * allowed_privs,target_ulong mode)326 bool pmp_hart_has_privs(CPURISCVState *env, hwaddr addr,
327                         target_ulong size, pmp_priv_t privs,
328                         pmp_priv_t *allowed_privs, target_ulong mode)
329 {
330     int i = 0;
331     int pmp_size = 0;
332     hwaddr s = 0;
333     hwaddr e = 0;
334 
335     /* Short cut if no rules */
336     if (0 == pmp_get_num_rules(env)) {
337         return pmp_hart_has_privs_default(env, privs, allowed_privs, mode);
338     }
339 
340     if (size == 0) {
341         if (riscv_cpu_cfg(env)->mmu) {
342             /*
343              * If size is unknown (0), assume that all bytes
344              * from addr to the end of the page will be accessed.
345              */
346             pmp_size = -(addr | TARGET_PAGE_MASK);
347         } else {
348             pmp_size = 2 << riscv_cpu_mxl(env);
349         }
350     } else {
351         pmp_size = size;
352     }
353 
354     /*
355      * 1.10 draft priv spec states there is an implicit order
356      * from low to high
357      */
358     for (i = 0; i < MAX_RISCV_PMPS; i++) {
359         s = pmp_is_in_range(env, i, addr);
360         e = pmp_is_in_range(env, i, addr + pmp_size - 1);
361 
362         /* partially inside */
363         if ((s + e) == 1) {
364             qemu_log_mask(LOG_GUEST_ERROR,
365                           "pmp violation - access is partially inside\n");
366             *allowed_privs = 0;
367             return false;
368         }
369 
370         /* fully inside */
371         const uint8_t a_field =
372             pmp_get_a_field(env->pmp_state.pmp[i].cfg_reg);
373 
374         if (((s + e) == 2) && (PMP_AMATCH_OFF != a_field)) {
375             /*
376              * If the PMP entry is not off and the address is in range,
377              * do the priv check
378              */
379             if (!MSECCFG_MML_ISSET(env)) {
380                 /*
381                  * If mseccfg.MML Bit is not set, do pmp priv check
382                  * This will always apply to regular PMP.
383                  */
384                 *allowed_privs = PMP_READ | PMP_WRITE | PMP_EXEC;
385                 if ((mode != PRV_M) || pmp_is_locked(env, i)) {
386                     *allowed_privs &= env->pmp_state.pmp[i].cfg_reg;
387                 }
388             } else {
389                 /*
390                  * If mseccfg.MML Bit set, do the enhanced pmp priv check
391                  */
392                 const uint8_t smepmp_operation =
393                     pmp_get_smepmp_operation(env->pmp_state.pmp[i].cfg_reg);
394 
395                 if (mode == PRV_M) {
396                     switch (smepmp_operation) {
397                     case 0:
398                     case 1:
399                     case 4:
400                     case 5:
401                     case 6:
402                     case 7:
403                     case 8:
404                         *allowed_privs = 0;
405                         break;
406                     case 2:
407                     case 3:
408                     case 14:
409                         *allowed_privs = PMP_READ | PMP_WRITE;
410                         break;
411                     case 9:
412                     case 10:
413                         *allowed_privs = PMP_EXEC;
414                         break;
415                     case 11:
416                     case 13:
417                         *allowed_privs = PMP_READ | PMP_EXEC;
418                         break;
419                     case 12:
420                     case 15:
421                         *allowed_privs = PMP_READ;
422                         break;
423                     default:
424                         g_assert_not_reached();
425                     }
426                 } else {
427                     switch (smepmp_operation) {
428                     case 0:
429                     case 8:
430                     case 9:
431                     case 12:
432                     case 13:
433                     case 14:
434                         *allowed_privs = 0;
435                         break;
436                     case 1:
437                     case 10:
438                     case 11:
439                         *allowed_privs = PMP_EXEC;
440                         break;
441                     case 2:
442                     case 4:
443                     case 15:
444                         *allowed_privs = PMP_READ;
445                         break;
446                     case 3:
447                     case 6:
448                         *allowed_privs = PMP_READ | PMP_WRITE;
449                         break;
450                     case 5:
451                         *allowed_privs = PMP_READ | PMP_EXEC;
452                         break;
453                     case 7:
454                         *allowed_privs = PMP_READ | PMP_WRITE | PMP_EXEC;
455                         break;
456                     default:
457                         g_assert_not_reached();
458                     }
459                 }
460             }
461 
462             /*
463              * If matching address range was found, the protection bits
464              * defined with PMP must be used. We shouldn't fallback on
465              * finding default privileges.
466              */
467             return (privs & *allowed_privs) == privs;
468         }
469     }
470 
471     /* No rule matched */
472     return pmp_hart_has_privs_default(env, privs, allowed_privs, mode);
473 }
474 
475 /*
476  * Handle a write to a pmpcfg CSR
477  */
pmpcfg_csr_write(CPURISCVState * env,uint32_t reg_index,target_ulong val)478 void pmpcfg_csr_write(CPURISCVState *env, uint32_t reg_index,
479                       target_ulong val)
480 {
481     int i;
482     uint8_t cfg_val;
483     int pmpcfg_nums = 2 << riscv_cpu_mxl(env);
484     bool modified = false;
485 
486     trace_pmpcfg_csr_write(env->mhartid, reg_index, val);
487 
488     for (i = 0; i < pmpcfg_nums; i++) {
489         cfg_val = (val >> 8 * i)  & 0xff;
490         modified |= pmp_write_cfg(env, (reg_index * 4) + i, cfg_val);
491     }
492 
493     /* If PMP permission of any addr has been changed, flush TLB pages. */
494     if (modified) {
495         pmp_update_rule_nums(env);
496         tlb_flush(env_cpu(env));
497     }
498 }
499 
500 
501 /*
502  * Handle a read from a pmpcfg CSR
503  */
pmpcfg_csr_read(CPURISCVState * env,uint32_t reg_index)504 target_ulong pmpcfg_csr_read(CPURISCVState *env, uint32_t reg_index)
505 {
506     int i;
507     target_ulong cfg_val = 0;
508     target_ulong val = 0;
509     int pmpcfg_nums = 2 << riscv_cpu_mxl(env);
510 
511     for (i = 0; i < pmpcfg_nums; i++) {
512         val = pmp_read_cfg(env, (reg_index * 4) + i);
513         cfg_val |= (val << (i * 8));
514     }
515     trace_pmpcfg_csr_read(env->mhartid, reg_index, cfg_val);
516 
517     return cfg_val;
518 }
519 
520 
521 /*
522  * Handle a write to a pmpaddr CSR
523  */
pmpaddr_csr_write(CPURISCVState * env,uint32_t addr_index,target_ulong val)524 void pmpaddr_csr_write(CPURISCVState *env, uint32_t addr_index,
525                        target_ulong val)
526 {
527     trace_pmpaddr_csr_write(env->mhartid, addr_index, val);
528     bool is_next_cfg_tor = false;
529 
530     if (addr_index < MAX_RISCV_PMPS) {
531         /*
532          * In TOR mode, need to check the lock bit of the next pmp
533          * (if there is a next).
534          */
535         if (addr_index + 1 < MAX_RISCV_PMPS) {
536             uint8_t pmp_cfg = env->pmp_state.pmp[addr_index + 1].cfg_reg;
537             is_next_cfg_tor = PMP_AMATCH_TOR == pmp_get_a_field(pmp_cfg);
538 
539             if (pmp_is_readonly(env, addr_index + 1) && is_next_cfg_tor) {
540                 qemu_log_mask(LOG_GUEST_ERROR,
541                               "ignoring pmpaddr write - pmpcfg+1 read only\n");
542                 return;
543             }
544         }
545 
546         if (!pmp_is_readonly(env, addr_index)) {
547             if (env->pmp_state.pmp[addr_index].addr_reg != val) {
548                 env->pmp_state.pmp[addr_index].addr_reg = val;
549                 pmp_update_rule_addr(env, addr_index);
550                 if (is_next_cfg_tor) {
551                     pmp_update_rule_addr(env, addr_index + 1);
552                 }
553                 tlb_flush(env_cpu(env));
554             }
555         } else {
556             qemu_log_mask(LOG_GUEST_ERROR,
557                           "ignoring pmpaddr write - read only\n");
558         }
559     } else {
560         qemu_log_mask(LOG_GUEST_ERROR,
561                       "ignoring pmpaddr write - out of bounds\n");
562     }
563 }
564 
565 
566 /*
567  * Handle a read from a pmpaddr CSR
568  */
pmpaddr_csr_read(CPURISCVState * env,uint32_t addr_index)569 target_ulong pmpaddr_csr_read(CPURISCVState *env, uint32_t addr_index)
570 {
571     target_ulong val = 0;
572 
573     if (addr_index < MAX_RISCV_PMPS) {
574         val = env->pmp_state.pmp[addr_index].addr_reg;
575         trace_pmpaddr_csr_read(env->mhartid, addr_index, val);
576     } else {
577         qemu_log_mask(LOG_GUEST_ERROR,
578                       "ignoring pmpaddr read - out of bounds\n");
579     }
580 
581     return val;
582 }
583 
584 /*
585  * Handle a write to a mseccfg CSR
586  */
mseccfg_csr_write(CPURISCVState * env,target_ulong val)587 void mseccfg_csr_write(CPURISCVState *env, target_ulong val)
588 {
589     int i;
590     uint64_t mask = MSECCFG_MMWP | MSECCFG_MML;
591     /* Update PMM field only if the value is valid according to Zjpm v1.0 */
592     if (riscv_cpu_cfg(env)->ext_smmpm &&
593         riscv_cpu_mxl(env) == MXL_RV64 &&
594         get_field(val, MSECCFG_PMM) != PMM_FIELD_RESERVED) {
595         mask |= MSECCFG_PMM;
596     }
597 
598     trace_mseccfg_csr_write(env->mhartid, val);
599 
600     /* RLB cannot be enabled if it's already 0 and if any regions are locked */
601     if (!MSECCFG_RLB_ISSET(env)) {
602         for (i = 0; i < MAX_RISCV_PMPS; i++) {
603             if (pmp_is_locked(env, i)) {
604                 val &= ~MSECCFG_RLB;
605                 break;
606             }
607         }
608     }
609 
610     if (riscv_cpu_cfg(env)->ext_smepmp) {
611         /* Sticky bits */
612         val |= (env->mseccfg & mask);
613         if ((val ^ env->mseccfg) & mask) {
614             tlb_flush(env_cpu(env));
615         }
616     } else {
617         mask |= MSECCFG_RLB;
618         val &= ~(mask);
619     }
620 
621     /* M-mode forward cfi to be enabled if cfi extension is implemented */
622     if (env_archcpu(env)->cfg.ext_zicfilp) {
623         val |= (val & MSECCFG_MLPE);
624     }
625 
626     env->mseccfg = val;
627 }
628 
629 /*
630  * Handle a read from a mseccfg CSR
631  */
mseccfg_csr_read(CPURISCVState * env)632 target_ulong mseccfg_csr_read(CPURISCVState *env)
633 {
634     trace_mseccfg_csr_read(env->mhartid, env->mseccfg);
635     return env->mseccfg;
636 }
637 
638 /*
639  * Calculate the TLB size.
640  * It's possible that PMP regions only cover partial of the TLB page, and
641  * this may split the page into regions with different permissions.
642  * For example if PMP0 is (0x80000008~0x8000000F, R) and PMP1 is (0x80000000
643  * ~0x80000FFF, RWX), then region 0x80000008~0x8000000F has R permission, and
644  * the other regions in this page have RWX permissions.
645  * A write access to 0x80000000 will match PMP1. However we cannot cache the
646  * translation result in the TLB since this will make the write access to
647  * 0x80000008 bypass the check of PMP0.
648  * To avoid this we return a size of 1 (which means no caching) if the PMP
649  * region only covers partial of the TLB page.
650  */
pmp_get_tlb_size(CPURISCVState * env,hwaddr addr)651 target_ulong pmp_get_tlb_size(CPURISCVState *env, hwaddr addr)
652 {
653     hwaddr pmp_sa;
654     hwaddr pmp_ea;
655     hwaddr tlb_sa = addr & ~(TARGET_PAGE_SIZE - 1);
656     hwaddr tlb_ea = tlb_sa + TARGET_PAGE_SIZE - 1;
657     int i;
658 
659     /*
660      * If PMP is not supported or there are no PMP rules, the TLB page will not
661      * be split into regions with different permissions by PMP so we set the
662      * size to TARGET_PAGE_SIZE.
663      */
664     if (!riscv_cpu_cfg(env)->pmp || !pmp_get_num_rules(env)) {
665         return TARGET_PAGE_SIZE;
666     }
667 
668     for (i = 0; i < MAX_RISCV_PMPS; i++) {
669         if (pmp_get_a_field(env->pmp_state.pmp[i].cfg_reg) == PMP_AMATCH_OFF) {
670             continue;
671         }
672 
673         pmp_sa = env->pmp_state.addr[i].sa;
674         pmp_ea = env->pmp_state.addr[i].ea;
675 
676         /*
677          * Only the first PMP entry that covers (whole or partial of) the TLB
678          * page really matters:
679          * If it covers the whole TLB page, set the size to TARGET_PAGE_SIZE,
680          * since the following PMP entries have lower priority and will not
681          * affect the permissions of the page.
682          * If it only covers partial of the TLB page, set the size to 1 since
683          * the allowed permissions of the region may be different from other
684          * region of the page.
685          */
686         if (pmp_sa <= tlb_sa && pmp_ea >= tlb_ea) {
687             return TARGET_PAGE_SIZE;
688         } else if ((pmp_sa >= tlb_sa && pmp_sa <= tlb_ea) ||
689                    (pmp_ea >= tlb_sa && pmp_ea <= tlb_ea)) {
690             return 1;
691         }
692     }
693 
694     /*
695      * If no PMP entry matches the TLB page, the TLB page will also not be
696      * split into regions with different permissions by PMP so we set the size
697      * to TARGET_PAGE_SIZE.
698      */
699     return TARGET_PAGE_SIZE;
700 }
701 
702 /*
703  * Convert PMP privilege to TLB page privilege.
704  */
pmp_priv_to_page_prot(pmp_priv_t pmp_priv)705 int pmp_priv_to_page_prot(pmp_priv_t pmp_priv)
706 {
707     int prot = 0;
708 
709     if (pmp_priv & PMP_READ) {
710         prot |= PAGE_READ;
711     }
712     if (pmp_priv & PMP_WRITE) {
713         prot |= PAGE_WRITE;
714     }
715     if (pmp_priv & PMP_EXEC) {
716         prot |= PAGE_EXEC;
717     }
718 
719     return prot;
720 }
721