1 /*
2  *  x86 exception helpers - sysemu code
3  *
4  *  Copyright (c) 2003 Fabrice Bellard
5  *
6  * This library is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU Lesser General Public
8  * License as published by the Free Software Foundation; either
9  * version 2.1 of the License, or (at your option) any later version.
10  *
11  * This library is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14  * Lesser General Public License for more details.
15  *
16  * You should have received a copy of the GNU Lesser General Public
17  * License along with this library; if not, see <http://www.gnu.org/licenses/>.
18  */
19 
20 #include "qemu/osdep.h"
21 #include "cpu.h"
22 #include "exec/exec-all.h"
23 #include "tcg/helper-tcg.h"
24 
25 #define PG_ERROR_OK (-1)
26 
27 typedef hwaddr (*MMUTranslateFunc)(CPUState *cs, hwaddr gphys, MMUAccessType access_type,
28 				int *prot);
29 
30 #define GET_HPHYS(cs, gpa, access_type, prot)  \
31 	(get_hphys_func ? get_hphys_func(cs, gpa, access_type, prot) : gpa)
32 
33 static int mmu_translate(CPUState *cs, hwaddr addr, MMUTranslateFunc get_hphys_func,
34                          uint64_t cr3, int is_write1, int mmu_idx, int pg_mode,
35                          hwaddr *xlat, int *page_size, int *prot)
36 {
37     X86CPU *cpu = X86_CPU(cs);
38     CPUX86State *env = &cpu->env;
39     uint64_t ptep, pte;
40     int32_t a20_mask;
41     target_ulong pde_addr, pte_addr;
42     int error_code = 0;
43     int is_dirty, is_write, is_user;
44     uint64_t rsvd_mask = PG_ADDRESS_MASK & ~MAKE_64BIT_MASK(0, cpu->phys_bits);
45     uint32_t page_offset;
46     uint32_t pkr;
47 
48     is_user = (mmu_idx == MMU_USER_IDX);
49     is_write = is_write1 & 1;
50     a20_mask = x86_get_a20_mask(env);
51 
52     if (!(pg_mode & PG_MODE_NXE)) {
53         rsvd_mask |= PG_NX_MASK;
54     }
55 
56     if (pg_mode & PG_MODE_PAE) {
57         uint64_t pde, pdpe;
58         target_ulong pdpe_addr;
59 
60 #ifdef TARGET_X86_64
61         if (pg_mode & PG_MODE_LMA) {
62             bool la57 = pg_mode & PG_MODE_LA57;
63             uint64_t pml5e_addr, pml5e;
64             uint64_t pml4e_addr, pml4e;
65 
66             if (la57) {
67                 pml5e_addr = ((cr3 & ~0xfff) +
68                         (((addr >> 48) & 0x1ff) << 3)) & a20_mask;
69                 pml5e_addr = GET_HPHYS(cs, pml5e_addr, MMU_DATA_STORE, NULL);
70                 pml5e = x86_ldq_phys(cs, pml5e_addr);
71                 if (!(pml5e & PG_PRESENT_MASK)) {
72                     goto do_fault;
73                 }
74                 if (pml5e & (rsvd_mask | PG_PSE_MASK)) {
75                     goto do_fault_rsvd;
76                 }
77                 if (!(pml5e & PG_ACCESSED_MASK)) {
78                     pml5e |= PG_ACCESSED_MASK;
79                     x86_stl_phys_notdirty(cs, pml5e_addr, pml5e);
80                 }
81                 ptep = pml5e ^ PG_NX_MASK;
82             } else {
83                 pml5e = cr3;
84                 ptep = PG_NX_MASK | PG_USER_MASK | PG_RW_MASK;
85             }
86 
87             pml4e_addr = ((pml5e & PG_ADDRESS_MASK) +
88                     (((addr >> 39) & 0x1ff) << 3)) & a20_mask;
89             pml4e_addr = GET_HPHYS(cs, pml4e_addr, MMU_DATA_STORE, NULL);
90             pml4e = x86_ldq_phys(cs, pml4e_addr);
91             if (!(pml4e & PG_PRESENT_MASK)) {
92                 goto do_fault;
93             }
94             if (pml4e & (rsvd_mask | PG_PSE_MASK)) {
95                 goto do_fault_rsvd;
96             }
97             if (!(pml4e & PG_ACCESSED_MASK)) {
98                 pml4e |= PG_ACCESSED_MASK;
99                 x86_stl_phys_notdirty(cs, pml4e_addr, pml4e);
100             }
101             ptep &= pml4e ^ PG_NX_MASK;
102             pdpe_addr = ((pml4e & PG_ADDRESS_MASK) + (((addr >> 30) & 0x1ff) << 3)) &
103                 a20_mask;
104             pdpe_addr = GET_HPHYS(cs, pdpe_addr, MMU_DATA_STORE, NULL);
105             pdpe = x86_ldq_phys(cs, pdpe_addr);
106             if (!(pdpe & PG_PRESENT_MASK)) {
107                 goto do_fault;
108             }
109             if (pdpe & rsvd_mask) {
110                 goto do_fault_rsvd;
111             }
112             ptep &= pdpe ^ PG_NX_MASK;
113             if (!(pdpe & PG_ACCESSED_MASK)) {
114                 pdpe |= PG_ACCESSED_MASK;
115                 x86_stl_phys_notdirty(cs, pdpe_addr, pdpe);
116             }
117             if (pdpe & PG_PSE_MASK) {
118                 /* 1 GB page */
119                 *page_size = 1024 * 1024 * 1024;
120                 pte_addr = pdpe_addr;
121                 pte = pdpe;
122                 goto do_check_protect;
123             }
124         } else
125 #endif
126         {
127             /* XXX: load them when cr3 is loaded ? */
128             pdpe_addr = ((cr3 & ~0x1f) + ((addr >> 27) & 0x18)) &
129                 a20_mask;
130             pdpe_addr = GET_HPHYS(cs, pdpe_addr, MMU_DATA_STORE, NULL);
131             pdpe = x86_ldq_phys(cs, pdpe_addr);
132             if (!(pdpe & PG_PRESENT_MASK)) {
133                 goto do_fault;
134             }
135             rsvd_mask |= PG_HI_USER_MASK;
136             if (pdpe & (rsvd_mask | PG_NX_MASK)) {
137                 goto do_fault_rsvd;
138             }
139             ptep = PG_NX_MASK | PG_USER_MASK | PG_RW_MASK;
140         }
141 
142         pde_addr = ((pdpe & PG_ADDRESS_MASK) + (((addr >> 21) & 0x1ff) << 3)) &
143             a20_mask;
144         pde_addr = GET_HPHYS(cs, pde_addr, MMU_DATA_STORE, NULL);
145         pde = x86_ldq_phys(cs, pde_addr);
146         if (!(pde & PG_PRESENT_MASK)) {
147             goto do_fault;
148         }
149         if (pde & rsvd_mask) {
150             goto do_fault_rsvd;
151         }
152         ptep &= pde ^ PG_NX_MASK;
153         if (pde & PG_PSE_MASK) {
154             /* 2 MB page */
155             *page_size = 2048 * 1024;
156             pte_addr = pde_addr;
157             pte = pde;
158             goto do_check_protect;
159         }
160         /* 4 KB page */
161         if (!(pde & PG_ACCESSED_MASK)) {
162             pde |= PG_ACCESSED_MASK;
163             x86_stl_phys_notdirty(cs, pde_addr, pde);
164         }
165         pte_addr = ((pde & PG_ADDRESS_MASK) + (((addr >> 12) & 0x1ff) << 3)) &
166             a20_mask;
167         pte_addr = GET_HPHYS(cs, pte_addr, MMU_DATA_STORE, NULL);
168         pte = x86_ldq_phys(cs, pte_addr);
169         if (!(pte & PG_PRESENT_MASK)) {
170             goto do_fault;
171         }
172         if (pte & rsvd_mask) {
173             goto do_fault_rsvd;
174         }
175         /* combine pde and pte nx, user and rw protections */
176         ptep &= pte ^ PG_NX_MASK;
177         *page_size = 4096;
178     } else {
179         uint32_t pde;
180 
181         /* page directory entry */
182         pde_addr = ((cr3 & ~0xfff) + ((addr >> 20) & 0xffc)) &
183             a20_mask;
184         pde_addr = GET_HPHYS(cs, pde_addr, MMU_DATA_STORE, NULL);
185         pde = x86_ldl_phys(cs, pde_addr);
186         if (!(pde & PG_PRESENT_MASK)) {
187             goto do_fault;
188         }
189         ptep = pde | PG_NX_MASK;
190 
191         /* if PSE bit is set, then we use a 4MB page */
192         if ((pde & PG_PSE_MASK) && (pg_mode & PG_MODE_PSE)) {
193             *page_size = 4096 * 1024;
194             pte_addr = pde_addr;
195 
196             /* Bits 20-13 provide bits 39-32 of the address, bit 21 is reserved.
197              * Leave bits 20-13 in place for setting accessed/dirty bits below.
198              */
199             pte = pde | ((pde & 0x1fe000LL) << (32 - 13));
200             rsvd_mask = 0x200000;
201             goto do_check_protect_pse36;
202         }
203 
204         if (!(pde & PG_ACCESSED_MASK)) {
205             pde |= PG_ACCESSED_MASK;
206             x86_stl_phys_notdirty(cs, pde_addr, pde);
207         }
208 
209         /* page directory entry */
210         pte_addr = ((pde & ~0xfff) + ((addr >> 10) & 0xffc)) &
211             a20_mask;
212         pte_addr = GET_HPHYS(cs, pte_addr, MMU_DATA_STORE, NULL);
213         pte = x86_ldl_phys(cs, pte_addr);
214         if (!(pte & PG_PRESENT_MASK)) {
215             goto do_fault;
216         }
217         /* combine pde and pte user and rw protections */
218         ptep &= pte | PG_NX_MASK;
219         *page_size = 4096;
220         rsvd_mask = 0;
221     }
222 
223 do_check_protect:
224     rsvd_mask |= (*page_size - 1) & PG_ADDRESS_MASK & ~PG_PSE_PAT_MASK;
225 do_check_protect_pse36:
226     if (pte & rsvd_mask) {
227         goto do_fault_rsvd;
228     }
229     ptep ^= PG_NX_MASK;
230 
231     /* can the page can be put in the TLB?  prot will tell us */
232     if (is_user && !(ptep & PG_USER_MASK)) {
233         goto do_fault_protect;
234     }
235 
236     *prot = 0;
237     if (mmu_idx != MMU_KSMAP_IDX || !(ptep & PG_USER_MASK)) {
238         *prot |= PAGE_READ;
239         if ((ptep & PG_RW_MASK) || !(is_user || (pg_mode & PG_MODE_WP))) {
240             *prot |= PAGE_WRITE;
241         }
242     }
243     if (!(ptep & PG_NX_MASK) &&
244         (mmu_idx == MMU_USER_IDX ||
245          !((pg_mode & PG_MODE_SMEP) && (ptep & PG_USER_MASK)))) {
246         *prot |= PAGE_EXEC;
247     }
248 
249     if (ptep & PG_USER_MASK) {
250         pkr = pg_mode & PG_MODE_PKE ? env->pkru : 0;
251     } else {
252         pkr = pg_mode & PG_MODE_PKS ? env->pkrs : 0;
253     }
254     if (pkr) {
255         uint32_t pk = (pte & PG_PKRU_MASK) >> PG_PKRU_BIT;
256         uint32_t pkr_ad = (pkr >> pk * 2) & 1;
257         uint32_t pkr_wd = (pkr >> pk * 2) & 2;
258         uint32_t pkr_prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC;
259 
260         if (pkr_ad) {
261             pkr_prot &= ~(PAGE_READ | PAGE_WRITE);
262         } else if (pkr_wd && (is_user || (pg_mode & PG_MODE_WP))) {
263             pkr_prot &= ~PAGE_WRITE;
264         }
265 
266         *prot &= pkr_prot;
267         if ((pkr_prot & (1 << is_write1)) == 0) {
268             assert(is_write1 != 2);
269             error_code |= PG_ERROR_PK_MASK;
270             goto do_fault_protect;
271         }
272     }
273 
274     if ((*prot & (1 << is_write1)) == 0) {
275         goto do_fault_protect;
276     }
277 
278     /* yes, it can! */
279     is_dirty = is_write && !(pte & PG_DIRTY_MASK);
280     if (!(pte & PG_ACCESSED_MASK) || is_dirty) {
281         pte |= PG_ACCESSED_MASK;
282         if (is_dirty) {
283             pte |= PG_DIRTY_MASK;
284         }
285         x86_stl_phys_notdirty(cs, pte_addr, pte);
286     }
287 
288     if (!(pte & PG_DIRTY_MASK)) {
289         /* only set write access if already dirty... otherwise wait
290            for dirty access */
291         assert(!is_write);
292         *prot &= ~PAGE_WRITE;
293     }
294 
295     pte = pte & a20_mask;
296 
297     /* align to page_size */
298     pte &= PG_ADDRESS_MASK & ~(*page_size - 1);
299     page_offset = addr & (*page_size - 1);
300     *xlat = GET_HPHYS(cs, pte + page_offset, is_write1, prot);
301     return PG_ERROR_OK;
302 
303  do_fault_rsvd:
304     error_code |= PG_ERROR_RSVD_MASK;
305  do_fault_protect:
306     error_code |= PG_ERROR_P_MASK;
307  do_fault:
308     error_code |= (is_write << PG_ERROR_W_BIT);
309     if (is_user)
310         error_code |= PG_ERROR_U_MASK;
311     if (is_write1 == 2 &&
312         ((pg_mode & PG_MODE_NXE) || (pg_mode & PG_MODE_SMEP)))
313         error_code |= PG_ERROR_I_D_MASK;
314     return error_code;
315 }
316 
317 hwaddr get_hphys(CPUState *cs, hwaddr gphys, MMUAccessType access_type,
318                         int *prot)
319 {
320     CPUX86State *env = &X86_CPU(cs)->env;
321     uint64_t exit_info_1;
322     int page_size;
323     int next_prot;
324     hwaddr hphys;
325 
326     if (likely(!(env->hflags2 & HF2_NPT_MASK))) {
327         return gphys;
328     }
329 
330     exit_info_1 = mmu_translate(cs, gphys, NULL, env->nested_cr3,
331                                access_type, MMU_USER_IDX, env->nested_pg_mode,
332                                &hphys, &page_size, &next_prot);
333     if (exit_info_1 == PG_ERROR_OK) {
334         if (prot) {
335             *prot &= next_prot;
336         }
337         return hphys;
338     }
339 
340     x86_stq_phys(cs, env->vm_vmcb + offsetof(struct vmcb, control.exit_info_2),
341                  gphys);
342     if (prot) {
343         exit_info_1 |= SVM_NPTEXIT_GPA;
344     } else { /* page table access */
345         exit_info_1 |= SVM_NPTEXIT_GPT;
346     }
347     cpu_vmexit(env, SVM_EXIT_NPF, exit_info_1, env->retaddr);
348 }
349 
350 /* return value:
351  * -1 = cannot handle fault
352  * 0  = nothing more to do
353  * 1  = generate PF fault
354  */
355 static int handle_mmu_fault(CPUState *cs, vaddr addr, int size,
356                             int is_write1, int mmu_idx)
357 {
358     X86CPU *cpu = X86_CPU(cs);
359     CPUX86State *env = &cpu->env;
360     int error_code = PG_ERROR_OK;
361     int pg_mode, prot, page_size;
362     int32_t a20_mask;
363     hwaddr paddr;
364     hwaddr vaddr;
365 
366 #if defined(DEBUG_MMU)
367     printf("MMU fault: addr=%" VADDR_PRIx " w=%d mmu=%d eip=" TARGET_FMT_lx "\n",
368            addr, is_write1, mmu_idx, env->eip);
369 #endif
370 
371     if (!(env->cr[0] & CR0_PG_MASK)) {
372         a20_mask = x86_get_a20_mask(env);
373         paddr = addr & a20_mask;
374 #ifdef TARGET_X86_64
375         if (!(env->hflags & HF_LMA_MASK)) {
376             /* Without long mode we can only address 32bits in real mode */
377             paddr = (uint32_t)paddr;
378         }
379 #endif
380         prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC;
381         page_size = 4096;
382     } else {
383         pg_mode = get_pg_mode(env);
384         if (pg_mode & PG_MODE_LMA) {
385             int32_t sext;
386 
387             /* test virtual address sign extension */
388             sext = (int64_t)addr >> (pg_mode & PG_MODE_LA57 ? 56 : 47);
389             if (sext != 0 && sext != -1) {
390                 env->error_code = 0;
391                 cs->exception_index = EXCP0D_GPF;
392                 return 1;
393             }
394         }
395 
396         error_code = mmu_translate(cs, addr, get_hphys, env->cr[3], is_write1,
397                                    mmu_idx, pg_mode,
398                                    &paddr, &page_size, &prot);
399     }
400 
401     if (error_code == PG_ERROR_OK) {
402         /* Even if 4MB pages, we map only one 4KB page in the cache to
403            avoid filling it too fast */
404         vaddr = addr & TARGET_PAGE_MASK;
405         paddr &= TARGET_PAGE_MASK;
406 
407         assert(prot & (1 << is_write1));
408         tlb_set_page_with_attrs(cs, vaddr, paddr, cpu_get_mem_attrs(env),
409                                 prot, mmu_idx, page_size);
410         return 0;
411     } else {
412         if (env->intercept_exceptions & (1 << EXCP0E_PAGE)) {
413             /* cr2 is not modified in case of exceptions */
414             x86_stq_phys(cs,
415                      env->vm_vmcb + offsetof(struct vmcb, control.exit_info_2),
416                      addr);
417         } else {
418             env->cr[2] = addr;
419         }
420         env->error_code = error_code;
421         cs->exception_index = EXCP0E_PAGE;
422         return 1;
423     }
424 }
425 
426 bool x86_cpu_tlb_fill(CPUState *cs, vaddr addr, int size,
427                       MMUAccessType access_type, int mmu_idx,
428                       bool probe, uintptr_t retaddr)
429 {
430     X86CPU *cpu = X86_CPU(cs);
431     CPUX86State *env = &cpu->env;
432 
433     env->retaddr = retaddr;
434     if (handle_mmu_fault(cs, addr, size, access_type, mmu_idx)) {
435         /* FIXME: On error in get_hphys we have already jumped out.  */
436         g_assert(!probe);
437         raise_exception_err_ra(env, cs->exception_index,
438                                env->error_code, retaddr);
439     }
440     return true;
441 }
442 
443 G_NORETURN void x86_cpu_do_unaligned_access(CPUState *cs, vaddr vaddr,
444                                             MMUAccessType access_type,
445                                             int mmu_idx, uintptr_t retaddr)
446 {
447     X86CPU *cpu = X86_CPU(cs);
448     handle_unaligned_access(&cpu->env, vaddr, access_type, retaddr);
449 }
450