xref: /openbmc/qemu/target/i386/tcg/excp_helper.c (revision 2182e405)
1 /*
2  *  x86 exception helpers
3  *
4  *  Copyright (c) 2003 Fabrice Bellard
5  *
6  * This library is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU Lesser General Public
8  * License as published by the Free Software Foundation; either
9  * version 2.1 of the License, or (at your option) any later version.
10  *
11  * This library is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14  * Lesser General Public License for more details.
15  *
16  * You should have received a copy of the GNU Lesser General Public
17  * License along with this library; if not, see <http://www.gnu.org/licenses/>.
18  */
19 
20 #include "qemu/osdep.h"
21 #include "cpu.h"
22 #include "exec/exec-all.h"
23 #include "qemu/log.h"
24 #include "sysemu/runstate.h"
25 #include "exec/helper-proto.h"
26 #include "helper-tcg.h"
27 
28 void helper_raise_interrupt(CPUX86State *env, int intno, int next_eip_addend)
29 {
30     raise_interrupt(env, intno, 1, 0, next_eip_addend);
31 }
32 
33 void helper_raise_exception(CPUX86State *env, int exception_index)
34 {
35     raise_exception(env, exception_index);
36 }
37 
38 /*
39  * Check nested exceptions and change to double or triple fault if
40  * needed. It should only be called, if this is not an interrupt.
41  * Returns the new exception number.
42  */
43 static int check_exception(CPUX86State *env, int intno, int *error_code,
44                            uintptr_t retaddr)
45 {
46     int first_contributory = env->old_exception == 0 ||
47                               (env->old_exception >= 10 &&
48                                env->old_exception <= 13);
49     int second_contributory = intno == 0 ||
50                                (intno >= 10 && intno <= 13);
51 
52     qemu_log_mask(CPU_LOG_INT, "check_exception old: 0x%x new 0x%x\n",
53                 env->old_exception, intno);
54 
55 #if !defined(CONFIG_USER_ONLY)
56     if (env->old_exception == EXCP08_DBLE) {
57         if (env->hflags & HF_GUEST_MASK) {
58             cpu_vmexit(env, SVM_EXIT_SHUTDOWN, 0, retaddr); /* does not return */
59         }
60 
61         qemu_log_mask(CPU_LOG_RESET, "Triple fault\n");
62 
63         qemu_system_reset_request(SHUTDOWN_CAUSE_GUEST_RESET);
64         return EXCP_HLT;
65     }
66 #endif
67 
68     if ((first_contributory && second_contributory)
69         || (env->old_exception == EXCP0E_PAGE &&
70             (second_contributory || (intno == EXCP0E_PAGE)))) {
71         intno = EXCP08_DBLE;
72         *error_code = 0;
73     }
74 
75     if (second_contributory || (intno == EXCP0E_PAGE) ||
76         (intno == EXCP08_DBLE)) {
77         env->old_exception = intno;
78     }
79 
80     return intno;
81 }
82 
83 /*
84  * Signal an interruption. It is executed in the main CPU loop.
85  * is_int is TRUE if coming from the int instruction. next_eip is the
86  * env->eip value AFTER the interrupt instruction. It is only relevant if
87  * is_int is TRUE.
88  */
89 static void QEMU_NORETURN raise_interrupt2(CPUX86State *env, int intno,
90                                            int is_int, int error_code,
91                                            int next_eip_addend,
92                                            uintptr_t retaddr)
93 {
94     CPUState *cs = env_cpu(env);
95 
96     if (!is_int) {
97         cpu_svm_check_intercept_param(env, SVM_EXIT_EXCP_BASE + intno,
98                                       error_code, retaddr);
99         intno = check_exception(env, intno, &error_code, retaddr);
100     } else {
101         cpu_svm_check_intercept_param(env, SVM_EXIT_SWINT, 0, retaddr);
102     }
103 
104     cs->exception_index = intno;
105     env->error_code = error_code;
106     env->exception_is_int = is_int;
107     env->exception_next_eip = env->eip + next_eip_addend;
108     cpu_loop_exit_restore(cs, retaddr);
109 }
110 
111 /* shortcuts to generate exceptions */
112 
113 void QEMU_NORETURN raise_interrupt(CPUX86State *env, int intno, int is_int,
114                                    int error_code, int next_eip_addend)
115 {
116     raise_interrupt2(env, intno, is_int, error_code, next_eip_addend, 0);
117 }
118 
119 void raise_exception_err(CPUX86State *env, int exception_index,
120                          int error_code)
121 {
122     raise_interrupt2(env, exception_index, 0, error_code, 0, 0);
123 }
124 
125 void raise_exception_err_ra(CPUX86State *env, int exception_index,
126                             int error_code, uintptr_t retaddr)
127 {
128     raise_interrupt2(env, exception_index, 0, error_code, 0, retaddr);
129 }
130 
131 void raise_exception(CPUX86State *env, int exception_index)
132 {
133     raise_interrupt2(env, exception_index, 0, 0, 0, 0);
134 }
135 
136 void raise_exception_ra(CPUX86State *env, int exception_index, uintptr_t retaddr)
137 {
138     raise_interrupt2(env, exception_index, 0, 0, 0, retaddr);
139 }
140 
141 #if !defined(CONFIG_USER_ONLY)
142 static hwaddr get_hphys(CPUState *cs, hwaddr gphys, MMUAccessType access_type,
143                         int *prot)
144 {
145     CPUX86State *env = &X86_CPU(cs)->env;
146     uint64_t rsvd_mask = PG_HI_RSVD_MASK;
147     uint64_t ptep, pte;
148     uint64_t exit_info_1 = 0;
149     target_ulong pde_addr, pte_addr;
150     uint32_t page_offset;
151     int page_size;
152 
153     if (likely(!(env->hflags2 & HF2_NPT_MASK))) {
154         return gphys;
155     }
156 
157     if (!(env->nested_pg_mode & SVM_NPT_NXE)) {
158         rsvd_mask |= PG_NX_MASK;
159     }
160 
161     if (env->nested_pg_mode & SVM_NPT_PAE) {
162         uint64_t pde, pdpe;
163         target_ulong pdpe_addr;
164 
165 #ifdef TARGET_X86_64
166         if (env->nested_pg_mode & SVM_NPT_LMA) {
167             uint64_t pml5e;
168             uint64_t pml4e_addr, pml4e;
169 
170             pml5e = env->nested_cr3;
171             ptep = PG_NX_MASK | PG_USER_MASK | PG_RW_MASK;
172 
173             pml4e_addr = (pml5e & PG_ADDRESS_MASK) +
174                     (((gphys >> 39) & 0x1ff) << 3);
175             pml4e = x86_ldq_phys(cs, pml4e_addr);
176             if (!(pml4e & PG_PRESENT_MASK)) {
177                 goto do_fault;
178             }
179             if (pml4e & (rsvd_mask | PG_PSE_MASK)) {
180                 goto do_fault_rsvd;
181             }
182             if (!(pml4e & PG_ACCESSED_MASK)) {
183                 pml4e |= PG_ACCESSED_MASK;
184                 x86_stl_phys_notdirty(cs, pml4e_addr, pml4e);
185             }
186             ptep &= pml4e ^ PG_NX_MASK;
187             pdpe_addr = (pml4e & PG_ADDRESS_MASK) +
188                     (((gphys >> 30) & 0x1ff) << 3);
189             pdpe = x86_ldq_phys(cs, pdpe_addr);
190             if (!(pdpe & PG_PRESENT_MASK)) {
191                 goto do_fault;
192             }
193             if (pdpe & rsvd_mask) {
194                 goto do_fault_rsvd;
195             }
196             ptep &= pdpe ^ PG_NX_MASK;
197             if (!(pdpe & PG_ACCESSED_MASK)) {
198                 pdpe |= PG_ACCESSED_MASK;
199                 x86_stl_phys_notdirty(cs, pdpe_addr, pdpe);
200             }
201             if (pdpe & PG_PSE_MASK) {
202                 /* 1 GB page */
203                 page_size = 1024 * 1024 * 1024;
204                 pte_addr = pdpe_addr;
205                 pte = pdpe;
206                 goto do_check_protect;
207             }
208         } else
209 #endif
210         {
211             pdpe_addr = (env->nested_cr3 & ~0x1f) + ((gphys >> 27) & 0x18);
212             pdpe = x86_ldq_phys(cs, pdpe_addr);
213             if (!(pdpe & PG_PRESENT_MASK)) {
214                 goto do_fault;
215             }
216             rsvd_mask |= PG_HI_USER_MASK;
217             if (pdpe & (rsvd_mask | PG_NX_MASK)) {
218                 goto do_fault_rsvd;
219             }
220             ptep = PG_NX_MASK | PG_USER_MASK | PG_RW_MASK;
221         }
222 
223         pde_addr = (pdpe & PG_ADDRESS_MASK) + (((gphys >> 21) & 0x1ff) << 3);
224         pde = x86_ldq_phys(cs, pde_addr);
225         if (!(pde & PG_PRESENT_MASK)) {
226             goto do_fault;
227         }
228         if (pde & rsvd_mask) {
229             goto do_fault_rsvd;
230         }
231         ptep &= pde ^ PG_NX_MASK;
232         if (pde & PG_PSE_MASK) {
233             /* 2 MB page */
234             page_size = 2048 * 1024;
235             pte_addr = pde_addr;
236             pte = pde;
237             goto do_check_protect;
238         }
239         /* 4 KB page */
240         if (!(pde & PG_ACCESSED_MASK)) {
241             pde |= PG_ACCESSED_MASK;
242             x86_stl_phys_notdirty(cs, pde_addr, pde);
243         }
244         pte_addr = (pde & PG_ADDRESS_MASK) + (((gphys >> 12) & 0x1ff) << 3);
245         pte = x86_ldq_phys(cs, pte_addr);
246         if (!(pte & PG_PRESENT_MASK)) {
247             goto do_fault;
248         }
249         if (pte & rsvd_mask) {
250             goto do_fault_rsvd;
251         }
252         /* combine pde and pte nx, user and rw protections */
253         ptep &= pte ^ PG_NX_MASK;
254         page_size = 4096;
255     } else {
256         uint32_t pde;
257 
258         /* page directory entry */
259         pde_addr = (env->nested_cr3 & ~0xfff) + ((gphys >> 20) & 0xffc);
260         pde = x86_ldl_phys(cs, pde_addr);
261         if (!(pde & PG_PRESENT_MASK)) {
262             goto do_fault;
263         }
264         ptep = pde | PG_NX_MASK;
265 
266         /* if host cr4 PSE bit is set, then we use a 4MB page */
267         if ((pde & PG_PSE_MASK) && (env->nested_pg_mode & SVM_NPT_PSE)) {
268             page_size = 4096 * 1024;
269             pte_addr = pde_addr;
270 
271             /* Bits 20-13 provide bits 39-32 of the address, bit 21 is reserved.
272              * Leave bits 20-13 in place for setting accessed/dirty bits below.
273              */
274             pte = pde | ((pde & 0x1fe000LL) << (32 - 13));
275             rsvd_mask = 0x200000;
276             goto do_check_protect_pse36;
277         }
278 
279         if (!(pde & PG_ACCESSED_MASK)) {
280             pde |= PG_ACCESSED_MASK;
281             x86_stl_phys_notdirty(cs, pde_addr, pde);
282         }
283 
284         /* page directory entry */
285         pte_addr = (pde & ~0xfff) + ((gphys >> 10) & 0xffc);
286         pte = x86_ldl_phys(cs, pte_addr);
287         if (!(pte & PG_PRESENT_MASK)) {
288             goto do_fault;
289         }
290         /* combine pde and pte user and rw protections */
291         ptep &= pte | PG_NX_MASK;
292         page_size = 4096;
293         rsvd_mask = 0;
294     }
295 
296  do_check_protect:
297     rsvd_mask |= (page_size - 1) & PG_ADDRESS_MASK & ~PG_PSE_PAT_MASK;
298  do_check_protect_pse36:
299     if (pte & rsvd_mask) {
300         goto do_fault_rsvd;
301     }
302     ptep ^= PG_NX_MASK;
303 
304     if (!(ptep & PG_USER_MASK)) {
305         goto do_fault_protect;
306     }
307     if (ptep & PG_NX_MASK) {
308         if (access_type == MMU_INST_FETCH) {
309             goto do_fault_protect;
310         }
311         *prot &= ~PAGE_EXEC;
312     }
313     if (!(ptep & PG_RW_MASK)) {
314         if (access_type == MMU_DATA_STORE) {
315             goto do_fault_protect;
316         }
317         *prot &= ~PAGE_WRITE;
318     }
319 
320     pte &= PG_ADDRESS_MASK & ~(page_size - 1);
321     page_offset = gphys & (page_size - 1);
322     return pte + page_offset;
323 
324  do_fault_rsvd:
325     exit_info_1 |= SVM_NPTEXIT_RSVD;
326  do_fault_protect:
327     exit_info_1 |= SVM_NPTEXIT_P;
328  do_fault:
329     x86_stq_phys(cs, env->vm_vmcb + offsetof(struct vmcb, control.exit_info_2),
330                  gphys);
331     exit_info_1 |= SVM_NPTEXIT_US;
332     if (access_type == MMU_DATA_STORE) {
333         exit_info_1 |= SVM_NPTEXIT_RW;
334     } else if (access_type == MMU_INST_FETCH) {
335         exit_info_1 |= SVM_NPTEXIT_ID;
336     }
337     if (prot) {
338         exit_info_1 |= SVM_NPTEXIT_GPA;
339     } else { /* page table access */
340         exit_info_1 |= SVM_NPTEXIT_GPT;
341     }
342     cpu_vmexit(env, SVM_EXIT_NPF, exit_info_1, env->retaddr);
343 }
344 
345 /* return value:
346  * -1 = cannot handle fault
347  * 0  = nothing more to do
348  * 1  = generate PF fault
349  */
350 static int handle_mmu_fault(CPUState *cs, vaddr addr, int size,
351                             int is_write1, int mmu_idx)
352 {
353     X86CPU *cpu = X86_CPU(cs);
354     CPUX86State *env = &cpu->env;
355     uint64_t ptep, pte;
356     int32_t a20_mask;
357     target_ulong pde_addr, pte_addr;
358     int error_code = 0;
359     int is_dirty, prot, page_size, is_write, is_user;
360     hwaddr paddr;
361     uint64_t rsvd_mask = PG_HI_RSVD_MASK;
362     uint32_t page_offset;
363     target_ulong vaddr;
364     uint32_t pkr;
365 
366     is_user = mmu_idx == MMU_USER_IDX;
367 #if defined(DEBUG_MMU)
368     printf("MMU fault: addr=%" VADDR_PRIx " w=%d u=%d eip=" TARGET_FMT_lx "\n",
369            addr, is_write1, is_user, env->eip);
370 #endif
371     is_write = is_write1 & 1;
372 
373     a20_mask = x86_get_a20_mask(env);
374     if (!(env->cr[0] & CR0_PG_MASK)) {
375         pte = addr;
376 #ifdef TARGET_X86_64
377         if (!(env->hflags & HF_LMA_MASK)) {
378             /* Without long mode we can only address 32bits in real mode */
379             pte = (uint32_t)pte;
380         }
381 #endif
382         prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC;
383         page_size = 4096;
384         goto do_mapping;
385     }
386 
387     if (!(env->efer & MSR_EFER_NXE)) {
388         rsvd_mask |= PG_NX_MASK;
389     }
390 
391     if (env->cr[4] & CR4_PAE_MASK) {
392         uint64_t pde, pdpe;
393         target_ulong pdpe_addr;
394 
395 #ifdef TARGET_X86_64
396         if (env->hflags & HF_LMA_MASK) {
397             bool la57 = env->cr[4] & CR4_LA57_MASK;
398             uint64_t pml5e_addr, pml5e;
399             uint64_t pml4e_addr, pml4e;
400             int32_t sext;
401 
402             /* test virtual address sign extension */
403             sext = la57 ? (int64_t)addr >> 56 : (int64_t)addr >> 47;
404             if (sext != 0 && sext != -1) {
405                 env->error_code = 0;
406                 cs->exception_index = EXCP0D_GPF;
407                 return 1;
408             }
409 
410             if (la57) {
411                 pml5e_addr = ((env->cr[3] & ~0xfff) +
412                         (((addr >> 48) & 0x1ff) << 3)) & a20_mask;
413                 pml5e_addr = get_hphys(cs, pml5e_addr, MMU_DATA_STORE, NULL);
414                 pml5e = x86_ldq_phys(cs, pml5e_addr);
415                 if (!(pml5e & PG_PRESENT_MASK)) {
416                     goto do_fault;
417                 }
418                 if (pml5e & (rsvd_mask | PG_PSE_MASK)) {
419                     goto do_fault_rsvd;
420                 }
421                 if (!(pml5e & PG_ACCESSED_MASK)) {
422                     pml5e |= PG_ACCESSED_MASK;
423                     x86_stl_phys_notdirty(cs, pml5e_addr, pml5e);
424                 }
425                 ptep = pml5e ^ PG_NX_MASK;
426             } else {
427                 pml5e = env->cr[3];
428                 ptep = PG_NX_MASK | PG_USER_MASK | PG_RW_MASK;
429             }
430 
431             pml4e_addr = ((pml5e & PG_ADDRESS_MASK) +
432                     (((addr >> 39) & 0x1ff) << 3)) & a20_mask;
433             pml4e_addr = get_hphys(cs, pml4e_addr, MMU_DATA_STORE, false);
434             pml4e = x86_ldq_phys(cs, pml4e_addr);
435             if (!(pml4e & PG_PRESENT_MASK)) {
436                 goto do_fault;
437             }
438             if (pml4e & (rsvd_mask | PG_PSE_MASK)) {
439                 goto do_fault_rsvd;
440             }
441             if (!(pml4e & PG_ACCESSED_MASK)) {
442                 pml4e |= PG_ACCESSED_MASK;
443                 x86_stl_phys_notdirty(cs, pml4e_addr, pml4e);
444             }
445             ptep &= pml4e ^ PG_NX_MASK;
446             pdpe_addr = ((pml4e & PG_ADDRESS_MASK) + (((addr >> 30) & 0x1ff) << 3)) &
447                 a20_mask;
448             pdpe_addr = get_hphys(cs, pdpe_addr, MMU_DATA_STORE, NULL);
449             pdpe = x86_ldq_phys(cs, pdpe_addr);
450             if (!(pdpe & PG_PRESENT_MASK)) {
451                 goto do_fault;
452             }
453             if (pdpe & rsvd_mask) {
454                 goto do_fault_rsvd;
455             }
456             ptep &= pdpe ^ PG_NX_MASK;
457             if (!(pdpe & PG_ACCESSED_MASK)) {
458                 pdpe |= PG_ACCESSED_MASK;
459                 x86_stl_phys_notdirty(cs, pdpe_addr, pdpe);
460             }
461             if (pdpe & PG_PSE_MASK) {
462                 /* 1 GB page */
463                 page_size = 1024 * 1024 * 1024;
464                 pte_addr = pdpe_addr;
465                 pte = pdpe;
466                 goto do_check_protect;
467             }
468         } else
469 #endif
470         {
471             /* XXX: load them when cr3 is loaded ? */
472             pdpe_addr = ((env->cr[3] & ~0x1f) + ((addr >> 27) & 0x18)) &
473                 a20_mask;
474             pdpe_addr = get_hphys(cs, pdpe_addr, MMU_DATA_STORE, false);
475             pdpe = x86_ldq_phys(cs, pdpe_addr);
476             if (!(pdpe & PG_PRESENT_MASK)) {
477                 goto do_fault;
478             }
479             rsvd_mask |= PG_HI_USER_MASK;
480             if (pdpe & (rsvd_mask | PG_NX_MASK)) {
481                 goto do_fault_rsvd;
482             }
483             ptep = PG_NX_MASK | PG_USER_MASK | PG_RW_MASK;
484         }
485 
486         pde_addr = ((pdpe & PG_ADDRESS_MASK) + (((addr >> 21) & 0x1ff) << 3)) &
487             a20_mask;
488         pde_addr = get_hphys(cs, pde_addr, MMU_DATA_STORE, NULL);
489         pde = x86_ldq_phys(cs, pde_addr);
490         if (!(pde & PG_PRESENT_MASK)) {
491             goto do_fault;
492         }
493         if (pde & rsvd_mask) {
494             goto do_fault_rsvd;
495         }
496         ptep &= pde ^ PG_NX_MASK;
497         if (pde & PG_PSE_MASK) {
498             /* 2 MB page */
499             page_size = 2048 * 1024;
500             pte_addr = pde_addr;
501             pte = pde;
502             goto do_check_protect;
503         }
504         /* 4 KB page */
505         if (!(pde & PG_ACCESSED_MASK)) {
506             pde |= PG_ACCESSED_MASK;
507             x86_stl_phys_notdirty(cs, pde_addr, pde);
508         }
509         pte_addr = ((pde & PG_ADDRESS_MASK) + (((addr >> 12) & 0x1ff) << 3)) &
510             a20_mask;
511         pte_addr = get_hphys(cs, pte_addr, MMU_DATA_STORE, NULL);
512         pte = x86_ldq_phys(cs, pte_addr);
513         if (!(pte & PG_PRESENT_MASK)) {
514             goto do_fault;
515         }
516         if (pte & rsvd_mask) {
517             goto do_fault_rsvd;
518         }
519         /* combine pde and pte nx, user and rw protections */
520         ptep &= pte ^ PG_NX_MASK;
521         page_size = 4096;
522     } else {
523         uint32_t pde;
524 
525         /* page directory entry */
526         pde_addr = ((env->cr[3] & ~0xfff) + ((addr >> 20) & 0xffc)) &
527             a20_mask;
528         pde_addr = get_hphys(cs, pde_addr, MMU_DATA_STORE, NULL);
529         pde = x86_ldl_phys(cs, pde_addr);
530         if (!(pde & PG_PRESENT_MASK)) {
531             goto do_fault;
532         }
533         ptep = pde | PG_NX_MASK;
534 
535         /* if PSE bit is set, then we use a 4MB page */
536         if ((pde & PG_PSE_MASK) && (env->cr[4] & CR4_PSE_MASK)) {
537             page_size = 4096 * 1024;
538             pte_addr = pde_addr;
539 
540             /* Bits 20-13 provide bits 39-32 of the address, bit 21 is reserved.
541              * Leave bits 20-13 in place for setting accessed/dirty bits below.
542              */
543             pte = pde | ((pde & 0x1fe000LL) << (32 - 13));
544             rsvd_mask = 0x200000;
545             goto do_check_protect_pse36;
546         }
547 
548         if (!(pde & PG_ACCESSED_MASK)) {
549             pde |= PG_ACCESSED_MASK;
550             x86_stl_phys_notdirty(cs, pde_addr, pde);
551         }
552 
553         /* page directory entry */
554         pte_addr = ((pde & ~0xfff) + ((addr >> 10) & 0xffc)) &
555             a20_mask;
556         pte_addr = get_hphys(cs, pte_addr, MMU_DATA_STORE, NULL);
557         pte = x86_ldl_phys(cs, pte_addr);
558         if (!(pte & PG_PRESENT_MASK)) {
559             goto do_fault;
560         }
561         /* combine pde and pte user and rw protections */
562         ptep &= pte | PG_NX_MASK;
563         page_size = 4096;
564         rsvd_mask = 0;
565     }
566 
567 do_check_protect:
568     rsvd_mask |= (page_size - 1) & PG_ADDRESS_MASK & ~PG_PSE_PAT_MASK;
569 do_check_protect_pse36:
570     if (pte & rsvd_mask) {
571         goto do_fault_rsvd;
572     }
573     ptep ^= PG_NX_MASK;
574 
575     /* can the page can be put in the TLB?  prot will tell us */
576     if (is_user && !(ptep & PG_USER_MASK)) {
577         goto do_fault_protect;
578     }
579 
580     prot = 0;
581     if (mmu_idx != MMU_KSMAP_IDX || !(ptep & PG_USER_MASK)) {
582         prot |= PAGE_READ;
583         if ((ptep & PG_RW_MASK) || (!is_user && !(env->cr[0] & CR0_WP_MASK))) {
584             prot |= PAGE_WRITE;
585         }
586     }
587     if (!(ptep & PG_NX_MASK) &&
588         (mmu_idx == MMU_USER_IDX ||
589          !((env->cr[4] & CR4_SMEP_MASK) && (ptep & PG_USER_MASK)))) {
590         prot |= PAGE_EXEC;
591     }
592 
593     if (!(env->hflags & HF_LMA_MASK)) {
594         pkr = 0;
595     } else if (ptep & PG_USER_MASK) {
596         pkr = env->cr[4] & CR4_PKE_MASK ? env->pkru : 0;
597     } else {
598         pkr = env->cr[4] & CR4_PKS_MASK ? env->pkrs : 0;
599     }
600     if (pkr) {
601         uint32_t pk = (pte & PG_PKRU_MASK) >> PG_PKRU_BIT;
602         uint32_t pkr_ad = (pkr >> pk * 2) & 1;
603         uint32_t pkr_wd = (pkr >> pk * 2) & 2;
604         uint32_t pkr_prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC;
605 
606         if (pkr_ad) {
607             pkr_prot &= ~(PAGE_READ | PAGE_WRITE);
608         } else if (pkr_wd && (is_user || env->cr[0] & CR0_WP_MASK)) {
609             pkr_prot &= ~PAGE_WRITE;
610         }
611 
612         prot &= pkr_prot;
613         if ((pkr_prot & (1 << is_write1)) == 0) {
614             assert(is_write1 != 2);
615             error_code |= PG_ERROR_PK_MASK;
616             goto do_fault_protect;
617         }
618     }
619 
620     if ((prot & (1 << is_write1)) == 0) {
621         goto do_fault_protect;
622     }
623 
624     /* yes, it can! */
625     is_dirty = is_write && !(pte & PG_DIRTY_MASK);
626     if (!(pte & PG_ACCESSED_MASK) || is_dirty) {
627         pte |= PG_ACCESSED_MASK;
628         if (is_dirty) {
629             pte |= PG_DIRTY_MASK;
630         }
631         x86_stl_phys_notdirty(cs, pte_addr, pte);
632     }
633 
634     if (!(pte & PG_DIRTY_MASK)) {
635         /* only set write access if already dirty... otherwise wait
636            for dirty access */
637         assert(!is_write);
638         prot &= ~PAGE_WRITE;
639     }
640 
641  do_mapping:
642     pte = pte & a20_mask;
643 
644     /* align to page_size */
645     pte &= PG_ADDRESS_MASK & ~(page_size - 1);
646     page_offset = addr & (page_size - 1);
647     paddr = get_hphys(cs, pte + page_offset, is_write1, &prot);
648 
649     /* Even if 4MB pages, we map only one 4KB page in the cache to
650        avoid filling it too fast */
651     vaddr = addr & TARGET_PAGE_MASK;
652     paddr &= TARGET_PAGE_MASK;
653 
654     assert(prot & (1 << is_write1));
655     tlb_set_page_with_attrs(cs, vaddr, paddr, cpu_get_mem_attrs(env),
656                             prot, mmu_idx, page_size);
657     return 0;
658  do_fault_rsvd:
659     error_code |= PG_ERROR_RSVD_MASK;
660  do_fault_protect:
661     error_code |= PG_ERROR_P_MASK;
662  do_fault:
663     error_code |= (is_write << PG_ERROR_W_BIT);
664     if (is_user)
665         error_code |= PG_ERROR_U_MASK;
666     if (is_write1 == 2 &&
667         (((env->efer & MSR_EFER_NXE) &&
668           (env->cr[4] & CR4_PAE_MASK)) ||
669          (env->cr[4] & CR4_SMEP_MASK)))
670         error_code |= PG_ERROR_I_D_MASK;
671     if (env->intercept_exceptions & (1 << EXCP0E_PAGE)) {
672         /* cr2 is not modified in case of exceptions */
673         x86_stq_phys(cs,
674                  env->vm_vmcb + offsetof(struct vmcb, control.exit_info_2),
675                  addr);
676     } else {
677         env->cr[2] = addr;
678     }
679     env->error_code = error_code;
680     cs->exception_index = EXCP0E_PAGE;
681     return 1;
682 }
683 #endif
684 
685 bool x86_cpu_tlb_fill(CPUState *cs, vaddr addr, int size,
686                       MMUAccessType access_type, int mmu_idx,
687                       bool probe, uintptr_t retaddr)
688 {
689     X86CPU *cpu = X86_CPU(cs);
690     CPUX86State *env = &cpu->env;
691 
692 #ifdef CONFIG_USER_ONLY
693     /* user mode only emulation */
694     env->cr[2] = addr;
695     env->error_code = (access_type == MMU_DATA_STORE) << PG_ERROR_W_BIT;
696     env->error_code |= PG_ERROR_U_MASK;
697     cs->exception_index = EXCP0E_PAGE;
698     env->exception_is_int = 0;
699     env->exception_next_eip = -1;
700     cpu_loop_exit_restore(cs, retaddr);
701 #else
702     env->retaddr = retaddr;
703     if (handle_mmu_fault(cs, addr, size, access_type, mmu_idx)) {
704         /* FIXME: On error in get_hphys we have already jumped out.  */
705         g_assert(!probe);
706         raise_exception_err_ra(env, cs->exception_index,
707                                env->error_code, retaddr);
708     }
709     return true;
710 #endif
711 }
712