xref: /openbmc/qemu/target/i386/tcg/excp_helper.c (revision 835fde4a)
1 /*
2  *  x86 exception helpers
3  *
4  *  Copyright (c) 2003 Fabrice Bellard
5  *
6  * This library is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU Lesser General Public
8  * License as published by the Free Software Foundation; either
9  * version 2.1 of the License, or (at your option) any later version.
10  *
11  * This library is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14  * Lesser General Public License for more details.
15  *
16  * You should have received a copy of the GNU Lesser General Public
17  * License along with this library; if not, see <http://www.gnu.org/licenses/>.
18  */
19 
20 #include "qemu/osdep.h"
21 #include "cpu.h"
22 #include "exec/exec-all.h"
23 #include "qemu/log.h"
24 #include "sysemu/runstate.h"
25 #include "exec/helper-proto.h"
26 #include "helper-tcg.h"
27 
28 void helper_raise_interrupt(CPUX86State *env, int intno, int next_eip_addend)
29 {
30     raise_interrupt(env, intno, 1, 0, next_eip_addend);
31 }
32 
33 void helper_raise_exception(CPUX86State *env, int exception_index)
34 {
35     raise_exception(env, exception_index);
36 }
37 
38 /*
39  * Check nested exceptions and change to double or triple fault if
40  * needed. It should only be called, if this is not an interrupt.
41  * Returns the new exception number.
42  */
43 static int check_exception(CPUX86State *env, int intno, int *error_code,
44                            uintptr_t retaddr)
45 {
46     int first_contributory = env->old_exception == 0 ||
47                               (env->old_exception >= 10 &&
48                                env->old_exception <= 13);
49     int second_contributory = intno == 0 ||
50                                (intno >= 10 && intno <= 13);
51 
52     qemu_log_mask(CPU_LOG_INT, "check_exception old: 0x%x new 0x%x\n",
53                 env->old_exception, intno);
54 
55 #if !defined(CONFIG_USER_ONLY)
56     if (env->old_exception == EXCP08_DBLE) {
57         if (env->hflags & HF_GUEST_MASK) {
58             cpu_vmexit(env, SVM_EXIT_SHUTDOWN, 0, retaddr); /* does not return */
59         }
60 
61         qemu_log_mask(CPU_LOG_RESET, "Triple fault\n");
62 
63         qemu_system_reset_request(SHUTDOWN_CAUSE_GUEST_RESET);
64         return EXCP_HLT;
65     }
66 #endif
67 
68     if ((first_contributory && second_contributory)
69         || (env->old_exception == EXCP0E_PAGE &&
70             (second_contributory || (intno == EXCP0E_PAGE)))) {
71         intno = EXCP08_DBLE;
72         *error_code = 0;
73     }
74 
75     if (second_contributory || (intno == EXCP0E_PAGE) ||
76         (intno == EXCP08_DBLE)) {
77         env->old_exception = intno;
78     }
79 
80     return intno;
81 }
82 
83 /*
84  * Signal an interruption. It is executed in the main CPU loop.
85  * is_int is TRUE if coming from the int instruction. next_eip is the
86  * env->eip value AFTER the interrupt instruction. It is only relevant if
87  * is_int is TRUE.
88  */
89 static void QEMU_NORETURN raise_interrupt2(CPUX86State *env, int intno,
90                                            int is_int, int error_code,
91                                            int next_eip_addend,
92                                            uintptr_t retaddr)
93 {
94     CPUState *cs = env_cpu(env);
95 
96     if (!is_int) {
97         cpu_svm_check_intercept_param(env, SVM_EXIT_EXCP_BASE + intno,
98                                       error_code, retaddr);
99         intno = check_exception(env, intno, &error_code, retaddr);
100     } else {
101         cpu_svm_check_intercept_param(env, SVM_EXIT_SWINT, 0, retaddr);
102     }
103 
104     cs->exception_index = intno;
105     env->error_code = error_code;
106     env->exception_is_int = is_int;
107     env->exception_next_eip = env->eip + next_eip_addend;
108     cpu_loop_exit_restore(cs, retaddr);
109 }
110 
111 /* shortcuts to generate exceptions */
112 
113 void QEMU_NORETURN raise_interrupt(CPUX86State *env, int intno, int is_int,
114                                    int error_code, int next_eip_addend)
115 {
116     raise_interrupt2(env, intno, is_int, error_code, next_eip_addend, 0);
117 }
118 
119 void raise_exception_err(CPUX86State *env, int exception_index,
120                          int error_code)
121 {
122     raise_interrupt2(env, exception_index, 0, error_code, 0, 0);
123 }
124 
125 void raise_exception_err_ra(CPUX86State *env, int exception_index,
126                             int error_code, uintptr_t retaddr)
127 {
128     raise_interrupt2(env, exception_index, 0, error_code, 0, retaddr);
129 }
130 
131 void raise_exception(CPUX86State *env, int exception_index)
132 {
133     raise_interrupt2(env, exception_index, 0, 0, 0, 0);
134 }
135 
136 void raise_exception_ra(CPUX86State *env, int exception_index, uintptr_t retaddr)
137 {
138     raise_interrupt2(env, exception_index, 0, 0, 0, retaddr);
139 }
140 
141 #if !defined(CONFIG_USER_ONLY)
142 static hwaddr get_hphys(CPUState *cs, hwaddr gphys, MMUAccessType access_type,
143                         int *prot)
144 {
145     X86CPU *cpu = X86_CPU(cs);
146     CPUX86State *env = &cpu->env;
147     uint64_t rsvd_mask = PG_ADDRESS_MASK & ~MAKE_64BIT_MASK(0, cpu->phys_bits);
148     uint64_t ptep, pte;
149     uint64_t exit_info_1 = 0;
150     target_ulong pde_addr, pte_addr;
151     uint32_t page_offset;
152     int page_size;
153 
154     if (likely(!(env->hflags2 & HF2_NPT_MASK))) {
155         return gphys;
156     }
157 
158     if (!(env->nested_pg_mode & SVM_NPT_NXE)) {
159         rsvd_mask |= PG_NX_MASK;
160     }
161 
162     if (env->nested_pg_mode & SVM_NPT_PAE) {
163         uint64_t pde, pdpe;
164         target_ulong pdpe_addr;
165 
166 #ifdef TARGET_X86_64
167         if (env->nested_pg_mode & SVM_NPT_LMA) {
168             uint64_t pml5e;
169             uint64_t pml4e_addr, pml4e;
170 
171             pml5e = env->nested_cr3;
172             ptep = PG_NX_MASK | PG_USER_MASK | PG_RW_MASK;
173 
174             pml4e_addr = (pml5e & PG_ADDRESS_MASK) +
175                     (((gphys >> 39) & 0x1ff) << 3);
176             pml4e = x86_ldq_phys(cs, pml4e_addr);
177             if (!(pml4e & PG_PRESENT_MASK)) {
178                 goto do_fault;
179             }
180             if (pml4e & (rsvd_mask | PG_PSE_MASK)) {
181                 goto do_fault_rsvd;
182             }
183             if (!(pml4e & PG_ACCESSED_MASK)) {
184                 pml4e |= PG_ACCESSED_MASK;
185                 x86_stl_phys_notdirty(cs, pml4e_addr, pml4e);
186             }
187             ptep &= pml4e ^ PG_NX_MASK;
188             pdpe_addr = (pml4e & PG_ADDRESS_MASK) +
189                     (((gphys >> 30) & 0x1ff) << 3);
190             pdpe = x86_ldq_phys(cs, pdpe_addr);
191             if (!(pdpe & PG_PRESENT_MASK)) {
192                 goto do_fault;
193             }
194             if (pdpe & rsvd_mask) {
195                 goto do_fault_rsvd;
196             }
197             ptep &= pdpe ^ PG_NX_MASK;
198             if (!(pdpe & PG_ACCESSED_MASK)) {
199                 pdpe |= PG_ACCESSED_MASK;
200                 x86_stl_phys_notdirty(cs, pdpe_addr, pdpe);
201             }
202             if (pdpe & PG_PSE_MASK) {
203                 /* 1 GB page */
204                 page_size = 1024 * 1024 * 1024;
205                 pte_addr = pdpe_addr;
206                 pte = pdpe;
207                 goto do_check_protect;
208             }
209         } else
210 #endif
211         {
212             pdpe_addr = (env->nested_cr3 & ~0x1f) + ((gphys >> 27) & 0x18);
213             pdpe = x86_ldq_phys(cs, pdpe_addr);
214             if (!(pdpe & PG_PRESENT_MASK)) {
215                 goto do_fault;
216             }
217             rsvd_mask |= PG_HI_USER_MASK;
218             if (pdpe & (rsvd_mask | PG_NX_MASK)) {
219                 goto do_fault_rsvd;
220             }
221             ptep = PG_NX_MASK | PG_USER_MASK | PG_RW_MASK;
222         }
223 
224         pde_addr = (pdpe & PG_ADDRESS_MASK) + (((gphys >> 21) & 0x1ff) << 3);
225         pde = x86_ldq_phys(cs, pde_addr);
226         if (!(pde & PG_PRESENT_MASK)) {
227             goto do_fault;
228         }
229         if (pde & rsvd_mask) {
230             goto do_fault_rsvd;
231         }
232         ptep &= pde ^ PG_NX_MASK;
233         if (pde & PG_PSE_MASK) {
234             /* 2 MB page */
235             page_size = 2048 * 1024;
236             pte_addr = pde_addr;
237             pte = pde;
238             goto do_check_protect;
239         }
240         /* 4 KB page */
241         if (!(pde & PG_ACCESSED_MASK)) {
242             pde |= PG_ACCESSED_MASK;
243             x86_stl_phys_notdirty(cs, pde_addr, pde);
244         }
245         pte_addr = (pde & PG_ADDRESS_MASK) + (((gphys >> 12) & 0x1ff) << 3);
246         pte = x86_ldq_phys(cs, pte_addr);
247         if (!(pte & PG_PRESENT_MASK)) {
248             goto do_fault;
249         }
250         if (pte & rsvd_mask) {
251             goto do_fault_rsvd;
252         }
253         /* combine pde and pte nx, user and rw protections */
254         ptep &= pte ^ PG_NX_MASK;
255         page_size = 4096;
256     } else {
257         uint32_t pde;
258 
259         /* page directory entry */
260         pde_addr = (env->nested_cr3 & ~0xfff) + ((gphys >> 20) & 0xffc);
261         pde = x86_ldl_phys(cs, pde_addr);
262         if (!(pde & PG_PRESENT_MASK)) {
263             goto do_fault;
264         }
265         ptep = pde | PG_NX_MASK;
266 
267         /* if host cr4 PSE bit is set, then we use a 4MB page */
268         if ((pde & PG_PSE_MASK) && (env->nested_pg_mode & SVM_NPT_PSE)) {
269             page_size = 4096 * 1024;
270             pte_addr = pde_addr;
271 
272             /* Bits 20-13 provide bits 39-32 of the address, bit 21 is reserved.
273              * Leave bits 20-13 in place for setting accessed/dirty bits below.
274              */
275             pte = pde | ((pde & 0x1fe000LL) << (32 - 13));
276             rsvd_mask = 0x200000;
277             goto do_check_protect_pse36;
278         }
279 
280         if (!(pde & PG_ACCESSED_MASK)) {
281             pde |= PG_ACCESSED_MASK;
282             x86_stl_phys_notdirty(cs, pde_addr, pde);
283         }
284 
285         /* page directory entry */
286         pte_addr = (pde & ~0xfff) + ((gphys >> 10) & 0xffc);
287         pte = x86_ldl_phys(cs, pte_addr);
288         if (!(pte & PG_PRESENT_MASK)) {
289             goto do_fault;
290         }
291         /* combine pde and pte user and rw protections */
292         ptep &= pte | PG_NX_MASK;
293         page_size = 4096;
294         rsvd_mask = 0;
295     }
296 
297  do_check_protect:
298     rsvd_mask |= (page_size - 1) & PG_ADDRESS_MASK & ~PG_PSE_PAT_MASK;
299  do_check_protect_pse36:
300     if (pte & rsvd_mask) {
301         goto do_fault_rsvd;
302     }
303     ptep ^= PG_NX_MASK;
304 
305     if (!(ptep & PG_USER_MASK)) {
306         goto do_fault_protect;
307     }
308     if (ptep & PG_NX_MASK) {
309         if (access_type == MMU_INST_FETCH) {
310             goto do_fault_protect;
311         }
312         *prot &= ~PAGE_EXEC;
313     }
314     if (!(ptep & PG_RW_MASK)) {
315         if (access_type == MMU_DATA_STORE) {
316             goto do_fault_protect;
317         }
318         *prot &= ~PAGE_WRITE;
319     }
320 
321     pte &= PG_ADDRESS_MASK & ~(page_size - 1);
322     page_offset = gphys & (page_size - 1);
323     return pte + page_offset;
324 
325  do_fault_rsvd:
326     exit_info_1 |= SVM_NPTEXIT_RSVD;
327  do_fault_protect:
328     exit_info_1 |= SVM_NPTEXIT_P;
329  do_fault:
330     x86_stq_phys(cs, env->vm_vmcb + offsetof(struct vmcb, control.exit_info_2),
331                  gphys);
332     exit_info_1 |= SVM_NPTEXIT_US;
333     if (access_type == MMU_DATA_STORE) {
334         exit_info_1 |= SVM_NPTEXIT_RW;
335     } else if (access_type == MMU_INST_FETCH) {
336         exit_info_1 |= SVM_NPTEXIT_ID;
337     }
338     if (prot) {
339         exit_info_1 |= SVM_NPTEXIT_GPA;
340     } else { /* page table access */
341         exit_info_1 |= SVM_NPTEXIT_GPT;
342     }
343     cpu_vmexit(env, SVM_EXIT_NPF, exit_info_1, env->retaddr);
344 }
345 
346 /* return value:
347  * -1 = cannot handle fault
348  * 0  = nothing more to do
349  * 1  = generate PF fault
350  */
351 static int handle_mmu_fault(CPUState *cs, vaddr addr, int size,
352                             int is_write1, int mmu_idx)
353 {
354     X86CPU *cpu = X86_CPU(cs);
355     CPUX86State *env = &cpu->env;
356     uint64_t ptep, pte;
357     int32_t a20_mask;
358     target_ulong pde_addr, pte_addr;
359     int error_code = 0;
360     int is_dirty, prot, page_size, is_write, is_user;
361     hwaddr paddr;
362     uint64_t rsvd_mask = PG_ADDRESS_MASK & ~MAKE_64BIT_MASK(0, cpu->phys_bits);
363     uint32_t page_offset;
364     target_ulong vaddr;
365     uint32_t pkr;
366 
367     is_user = mmu_idx == MMU_USER_IDX;
368 #if defined(DEBUG_MMU)
369     printf("MMU fault: addr=%" VADDR_PRIx " w=%d u=%d eip=" TARGET_FMT_lx "\n",
370            addr, is_write1, is_user, env->eip);
371 #endif
372     is_write = is_write1 & 1;
373 
374     a20_mask = x86_get_a20_mask(env);
375     if (!(env->cr[0] & CR0_PG_MASK)) {
376         pte = addr;
377 #ifdef TARGET_X86_64
378         if (!(env->hflags & HF_LMA_MASK)) {
379             /* Without long mode we can only address 32bits in real mode */
380             pte = (uint32_t)pte;
381         }
382 #endif
383         prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC;
384         page_size = 4096;
385         goto do_mapping;
386     }
387 
388     if (!(env->efer & MSR_EFER_NXE)) {
389         rsvd_mask |= PG_NX_MASK;
390     }
391 
392     if (env->cr[4] & CR4_PAE_MASK) {
393         uint64_t pde, pdpe;
394         target_ulong pdpe_addr;
395 
396 #ifdef TARGET_X86_64
397         if (env->hflags & HF_LMA_MASK) {
398             bool la57 = env->cr[4] & CR4_LA57_MASK;
399             uint64_t pml5e_addr, pml5e;
400             uint64_t pml4e_addr, pml4e;
401             int32_t sext;
402 
403             /* test virtual address sign extension */
404             sext = la57 ? (int64_t)addr >> 56 : (int64_t)addr >> 47;
405             if (sext != 0 && sext != -1) {
406                 env->error_code = 0;
407                 cs->exception_index = EXCP0D_GPF;
408                 return 1;
409             }
410 
411             if (la57) {
412                 pml5e_addr = ((env->cr[3] & ~0xfff) +
413                         (((addr >> 48) & 0x1ff) << 3)) & a20_mask;
414                 pml5e_addr = get_hphys(cs, pml5e_addr, MMU_DATA_STORE, NULL);
415                 pml5e = x86_ldq_phys(cs, pml5e_addr);
416                 if (!(pml5e & PG_PRESENT_MASK)) {
417                     goto do_fault;
418                 }
419                 if (pml5e & (rsvd_mask | PG_PSE_MASK)) {
420                     goto do_fault_rsvd;
421                 }
422                 if (!(pml5e & PG_ACCESSED_MASK)) {
423                     pml5e |= PG_ACCESSED_MASK;
424                     x86_stl_phys_notdirty(cs, pml5e_addr, pml5e);
425                 }
426                 ptep = pml5e ^ PG_NX_MASK;
427             } else {
428                 pml5e = env->cr[3];
429                 ptep = PG_NX_MASK | PG_USER_MASK | PG_RW_MASK;
430             }
431 
432             pml4e_addr = ((pml5e & PG_ADDRESS_MASK) +
433                     (((addr >> 39) & 0x1ff) << 3)) & a20_mask;
434             pml4e_addr = get_hphys(cs, pml4e_addr, MMU_DATA_STORE, false);
435             pml4e = x86_ldq_phys(cs, pml4e_addr);
436             if (!(pml4e & PG_PRESENT_MASK)) {
437                 goto do_fault;
438             }
439             if (pml4e & (rsvd_mask | PG_PSE_MASK)) {
440                 goto do_fault_rsvd;
441             }
442             if (!(pml4e & PG_ACCESSED_MASK)) {
443                 pml4e |= PG_ACCESSED_MASK;
444                 x86_stl_phys_notdirty(cs, pml4e_addr, pml4e);
445             }
446             ptep &= pml4e ^ PG_NX_MASK;
447             pdpe_addr = ((pml4e & PG_ADDRESS_MASK) + (((addr >> 30) & 0x1ff) << 3)) &
448                 a20_mask;
449             pdpe_addr = get_hphys(cs, pdpe_addr, MMU_DATA_STORE, NULL);
450             pdpe = x86_ldq_phys(cs, pdpe_addr);
451             if (!(pdpe & PG_PRESENT_MASK)) {
452                 goto do_fault;
453             }
454             if (pdpe & rsvd_mask) {
455                 goto do_fault_rsvd;
456             }
457             ptep &= pdpe ^ PG_NX_MASK;
458             if (!(pdpe & PG_ACCESSED_MASK)) {
459                 pdpe |= PG_ACCESSED_MASK;
460                 x86_stl_phys_notdirty(cs, pdpe_addr, pdpe);
461             }
462             if (pdpe & PG_PSE_MASK) {
463                 /* 1 GB page */
464                 page_size = 1024 * 1024 * 1024;
465                 pte_addr = pdpe_addr;
466                 pte = pdpe;
467                 goto do_check_protect;
468             }
469         } else
470 #endif
471         {
472             /* XXX: load them when cr3 is loaded ? */
473             pdpe_addr = ((env->cr[3] & ~0x1f) + ((addr >> 27) & 0x18)) &
474                 a20_mask;
475             pdpe_addr = get_hphys(cs, pdpe_addr, MMU_DATA_STORE, false);
476             pdpe = x86_ldq_phys(cs, pdpe_addr);
477             if (!(pdpe & PG_PRESENT_MASK)) {
478                 goto do_fault;
479             }
480             rsvd_mask |= PG_HI_USER_MASK;
481             if (pdpe & (rsvd_mask | PG_NX_MASK)) {
482                 goto do_fault_rsvd;
483             }
484             ptep = PG_NX_MASK | PG_USER_MASK | PG_RW_MASK;
485         }
486 
487         pde_addr = ((pdpe & PG_ADDRESS_MASK) + (((addr >> 21) & 0x1ff) << 3)) &
488             a20_mask;
489         pde_addr = get_hphys(cs, pde_addr, MMU_DATA_STORE, NULL);
490         pde = x86_ldq_phys(cs, pde_addr);
491         if (!(pde & PG_PRESENT_MASK)) {
492             goto do_fault;
493         }
494         if (pde & rsvd_mask) {
495             goto do_fault_rsvd;
496         }
497         ptep &= pde ^ PG_NX_MASK;
498         if (pde & PG_PSE_MASK) {
499             /* 2 MB page */
500             page_size = 2048 * 1024;
501             pte_addr = pde_addr;
502             pte = pde;
503             goto do_check_protect;
504         }
505         /* 4 KB page */
506         if (!(pde & PG_ACCESSED_MASK)) {
507             pde |= PG_ACCESSED_MASK;
508             x86_stl_phys_notdirty(cs, pde_addr, pde);
509         }
510         pte_addr = ((pde & PG_ADDRESS_MASK) + (((addr >> 12) & 0x1ff) << 3)) &
511             a20_mask;
512         pte_addr = get_hphys(cs, pte_addr, MMU_DATA_STORE, NULL);
513         pte = x86_ldq_phys(cs, pte_addr);
514         if (!(pte & PG_PRESENT_MASK)) {
515             goto do_fault;
516         }
517         if (pte & rsvd_mask) {
518             goto do_fault_rsvd;
519         }
520         /* combine pde and pte nx, user and rw protections */
521         ptep &= pte ^ PG_NX_MASK;
522         page_size = 4096;
523     } else {
524         uint32_t pde;
525 
526         /* page directory entry */
527         pde_addr = ((env->cr[3] & ~0xfff) + ((addr >> 20) & 0xffc)) &
528             a20_mask;
529         pde_addr = get_hphys(cs, pde_addr, MMU_DATA_STORE, NULL);
530         pde = x86_ldl_phys(cs, pde_addr);
531         if (!(pde & PG_PRESENT_MASK)) {
532             goto do_fault;
533         }
534         ptep = pde | PG_NX_MASK;
535 
536         /* if PSE bit is set, then we use a 4MB page */
537         if ((pde & PG_PSE_MASK) && (env->cr[4] & CR4_PSE_MASK)) {
538             page_size = 4096 * 1024;
539             pte_addr = pde_addr;
540 
541             /* Bits 20-13 provide bits 39-32 of the address, bit 21 is reserved.
542              * Leave bits 20-13 in place for setting accessed/dirty bits below.
543              */
544             pte = pde | ((pde & 0x1fe000LL) << (32 - 13));
545             rsvd_mask = 0x200000;
546             goto do_check_protect_pse36;
547         }
548 
549         if (!(pde & PG_ACCESSED_MASK)) {
550             pde |= PG_ACCESSED_MASK;
551             x86_stl_phys_notdirty(cs, pde_addr, pde);
552         }
553 
554         /* page directory entry */
555         pte_addr = ((pde & ~0xfff) + ((addr >> 10) & 0xffc)) &
556             a20_mask;
557         pte_addr = get_hphys(cs, pte_addr, MMU_DATA_STORE, NULL);
558         pte = x86_ldl_phys(cs, pte_addr);
559         if (!(pte & PG_PRESENT_MASK)) {
560             goto do_fault;
561         }
562         /* combine pde and pte user and rw protections */
563         ptep &= pte | PG_NX_MASK;
564         page_size = 4096;
565         rsvd_mask = 0;
566     }
567 
568 do_check_protect:
569     rsvd_mask |= (page_size - 1) & PG_ADDRESS_MASK & ~PG_PSE_PAT_MASK;
570 do_check_protect_pse36:
571     if (pte & rsvd_mask) {
572         goto do_fault_rsvd;
573     }
574     ptep ^= PG_NX_MASK;
575 
576     /* can the page can be put in the TLB?  prot will tell us */
577     if (is_user && !(ptep & PG_USER_MASK)) {
578         goto do_fault_protect;
579     }
580 
581     prot = 0;
582     if (mmu_idx != MMU_KSMAP_IDX || !(ptep & PG_USER_MASK)) {
583         prot |= PAGE_READ;
584         if ((ptep & PG_RW_MASK) || (!is_user && !(env->cr[0] & CR0_WP_MASK))) {
585             prot |= PAGE_WRITE;
586         }
587     }
588     if (!(ptep & PG_NX_MASK) &&
589         (mmu_idx == MMU_USER_IDX ||
590          !((env->cr[4] & CR4_SMEP_MASK) && (ptep & PG_USER_MASK)))) {
591         prot |= PAGE_EXEC;
592     }
593 
594     if (!(env->hflags & HF_LMA_MASK)) {
595         pkr = 0;
596     } else if (ptep & PG_USER_MASK) {
597         pkr = env->cr[4] & CR4_PKE_MASK ? env->pkru : 0;
598     } else {
599         pkr = env->cr[4] & CR4_PKS_MASK ? env->pkrs : 0;
600     }
601     if (pkr) {
602         uint32_t pk = (pte & PG_PKRU_MASK) >> PG_PKRU_BIT;
603         uint32_t pkr_ad = (pkr >> pk * 2) & 1;
604         uint32_t pkr_wd = (pkr >> pk * 2) & 2;
605         uint32_t pkr_prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC;
606 
607         if (pkr_ad) {
608             pkr_prot &= ~(PAGE_READ | PAGE_WRITE);
609         } else if (pkr_wd && (is_user || env->cr[0] & CR0_WP_MASK)) {
610             pkr_prot &= ~PAGE_WRITE;
611         }
612 
613         prot &= pkr_prot;
614         if ((pkr_prot & (1 << is_write1)) == 0) {
615             assert(is_write1 != 2);
616             error_code |= PG_ERROR_PK_MASK;
617             goto do_fault_protect;
618         }
619     }
620 
621     if ((prot & (1 << is_write1)) == 0) {
622         goto do_fault_protect;
623     }
624 
625     /* yes, it can! */
626     is_dirty = is_write && !(pte & PG_DIRTY_MASK);
627     if (!(pte & PG_ACCESSED_MASK) || is_dirty) {
628         pte |= PG_ACCESSED_MASK;
629         if (is_dirty) {
630             pte |= PG_DIRTY_MASK;
631         }
632         x86_stl_phys_notdirty(cs, pte_addr, pte);
633     }
634 
635     if (!(pte & PG_DIRTY_MASK)) {
636         /* only set write access if already dirty... otherwise wait
637            for dirty access */
638         assert(!is_write);
639         prot &= ~PAGE_WRITE;
640     }
641 
642  do_mapping:
643     pte = pte & a20_mask;
644 
645     /* align to page_size */
646     pte &= PG_ADDRESS_MASK & ~(page_size - 1);
647     page_offset = addr & (page_size - 1);
648     paddr = get_hphys(cs, pte + page_offset, is_write1, &prot);
649 
650     /* Even if 4MB pages, we map only one 4KB page in the cache to
651        avoid filling it too fast */
652     vaddr = addr & TARGET_PAGE_MASK;
653     paddr &= TARGET_PAGE_MASK;
654 
655     assert(prot & (1 << is_write1));
656     tlb_set_page_with_attrs(cs, vaddr, paddr, cpu_get_mem_attrs(env),
657                             prot, mmu_idx, page_size);
658     return 0;
659  do_fault_rsvd:
660     error_code |= PG_ERROR_RSVD_MASK;
661  do_fault_protect:
662     error_code |= PG_ERROR_P_MASK;
663  do_fault:
664     error_code |= (is_write << PG_ERROR_W_BIT);
665     if (is_user)
666         error_code |= PG_ERROR_U_MASK;
667     if (is_write1 == 2 &&
668         (((env->efer & MSR_EFER_NXE) &&
669           (env->cr[4] & CR4_PAE_MASK)) ||
670          (env->cr[4] & CR4_SMEP_MASK)))
671         error_code |= PG_ERROR_I_D_MASK;
672     if (env->intercept_exceptions & (1 << EXCP0E_PAGE)) {
673         /* cr2 is not modified in case of exceptions */
674         x86_stq_phys(cs,
675                  env->vm_vmcb + offsetof(struct vmcb, control.exit_info_2),
676                  addr);
677     } else {
678         env->cr[2] = addr;
679     }
680     env->error_code = error_code;
681     cs->exception_index = EXCP0E_PAGE;
682     return 1;
683 }
684 #endif
685 
686 bool x86_cpu_tlb_fill(CPUState *cs, vaddr addr, int size,
687                       MMUAccessType access_type, int mmu_idx,
688                       bool probe, uintptr_t retaddr)
689 {
690     X86CPU *cpu = X86_CPU(cs);
691     CPUX86State *env = &cpu->env;
692 
693 #ifdef CONFIG_USER_ONLY
694     /* user mode only emulation */
695     env->cr[2] = addr;
696     env->error_code = (access_type == MMU_DATA_STORE) << PG_ERROR_W_BIT;
697     env->error_code |= PG_ERROR_U_MASK;
698     cs->exception_index = EXCP0E_PAGE;
699     env->exception_is_int = 0;
700     env->exception_next_eip = -1;
701     cpu_loop_exit_restore(cs, retaddr);
702 #else
703     env->retaddr = retaddr;
704     if (handle_mmu_fault(cs, addr, size, access_type, mmu_idx)) {
705         /* FIXME: On error in get_hphys we have already jumped out.  */
706         g_assert(!probe);
707         raise_exception_err_ra(env, cs->exception_index,
708                                env->error_code, retaddr);
709     }
710     return true;
711 #endif
712 }
713