1e7f2670fSClaudio Fontana /* 2e7f2670fSClaudio Fontana * x86 exception helpers - sysemu code 3e7f2670fSClaudio Fontana * 4e7f2670fSClaudio Fontana * Copyright (c) 2003 Fabrice Bellard 5e7f2670fSClaudio Fontana * 6e7f2670fSClaudio Fontana * This library is free software; you can redistribute it and/or 7e7f2670fSClaudio Fontana * modify it under the terms of the GNU Lesser General Public 8e7f2670fSClaudio Fontana * License as published by the Free Software Foundation; either 9e7f2670fSClaudio Fontana * version 2.1 of the License, or (at your option) any later version. 10e7f2670fSClaudio Fontana * 11e7f2670fSClaudio Fontana * This library is distributed in the hope that it will be useful, 12e7f2670fSClaudio Fontana * but WITHOUT ANY WARRANTY; without even the implied warranty of 13e7f2670fSClaudio Fontana * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 14e7f2670fSClaudio Fontana * Lesser General Public License for more details. 15e7f2670fSClaudio Fontana * 16e7f2670fSClaudio Fontana * You should have received a copy of the GNU Lesser General Public 17e7f2670fSClaudio Fontana * License along with this library; if not, see <http://www.gnu.org/licenses/>. 18e7f2670fSClaudio Fontana */ 19e7f2670fSClaudio Fontana 20e7f2670fSClaudio Fontana #include "qemu/osdep.h" 21e7f2670fSClaudio Fontana #include "cpu.h" 2209b07f28SPhilippe Mathieu-Daudé #include "exec/cpu_ldst.h" 23b28b366dSPhilippe Mathieu-Daudé #include "exec/exec-all.h" 2474781c08SPhilippe Mathieu-Daudé #include "exec/page-protection.h" 25e7f2670fSClaudio Fontana #include "tcg/helper-tcg.h" 26e7f2670fSClaudio Fontana 273563362dSRichard Henderson typedef struct TranslateParams { 283563362dSRichard Henderson target_ulong addr; 293563362dSRichard Henderson target_ulong cr3; 303563362dSRichard Henderson int pg_mode; 313563362dSRichard Henderson int mmu_idx; 324a1e9d4dSRichard Henderson int ptw_idx; 333563362dSRichard Henderson MMUAccessType access_type; 343563362dSRichard Henderson } TranslateParams; 353563362dSRichard Henderson 363563362dSRichard Henderson typedef struct TranslateResult { 373563362dSRichard Henderson hwaddr paddr; 383563362dSRichard Henderson int prot; 393563362dSRichard Henderson int page_size; 403563362dSRichard Henderson } TranslateResult; 413563362dSRichard Henderson 429bbcf372SRichard Henderson typedef enum TranslateFaultStage2 { 439bbcf372SRichard Henderson S2_NONE, 449bbcf372SRichard Henderson S2_GPA, 459bbcf372SRichard Henderson S2_GPT, 469bbcf372SRichard Henderson } TranslateFaultStage2; 479bbcf372SRichard Henderson 483563362dSRichard Henderson typedef struct TranslateFault { 493563362dSRichard Henderson int exception_index; 503563362dSRichard Henderson int error_code; 513563362dSRichard Henderson target_ulong cr2; 529bbcf372SRichard Henderson TranslateFaultStage2 stage2; 533563362dSRichard Henderson } TranslateFault; 54661ff487SPaolo Bonzini 554a1e9d4dSRichard Henderson typedef struct PTETranslate { 564a1e9d4dSRichard Henderson CPUX86State *env; 574a1e9d4dSRichard Henderson TranslateFault *err; 584a1e9d4dSRichard Henderson int ptw_idx; 594a1e9d4dSRichard Henderson void *haddr; 604a1e9d4dSRichard Henderson hwaddr gaddr; 614a1e9d4dSRichard Henderson } PTETranslate; 624a1e9d4dSRichard Henderson 639dab7bbbSGregory Price static bool ptw_translate(PTETranslate *inout, hwaddr addr, uint64_t ra) 644a1e9d4dSRichard Henderson { 654a1e9d4dSRichard Henderson CPUTLBEntryFull *full; 664a1e9d4dSRichard Henderson int flags; 674a1e9d4dSRichard Henderson 684a1e9d4dSRichard Henderson inout->gaddr = addr; 69d507e6c5SRichard Henderson flags = probe_access_full(inout->env, addr, 0, MMU_DATA_STORE, 709dab7bbbSGregory Price inout->ptw_idx, true, &inout->haddr, &full, ra); 714a1e9d4dSRichard Henderson 724a1e9d4dSRichard Henderson if (unlikely(flags & TLB_INVALID_MASK)) { 734a1e9d4dSRichard Henderson TranslateFault *err = inout->err; 744a1e9d4dSRichard Henderson 754a1e9d4dSRichard Henderson assert(inout->ptw_idx == MMU_NESTED_IDX); 768218c048SRichard Henderson *err = (TranslateFault){ 778218c048SRichard Henderson .error_code = inout->env->error_code, 788218c048SRichard Henderson .cr2 = addr, 798218c048SRichard Henderson .stage2 = S2_GPT, 808218c048SRichard Henderson }; 814a1e9d4dSRichard Henderson return false; 824a1e9d4dSRichard Henderson } 834a1e9d4dSRichard Henderson return true; 844a1e9d4dSRichard Henderson } 854a1e9d4dSRichard Henderson 869dab7bbbSGregory Price static inline uint32_t ptw_ldl(const PTETranslate *in, uint64_t ra) 874a1e9d4dSRichard Henderson { 884a1e9d4dSRichard Henderson if (likely(in->haddr)) { 894a1e9d4dSRichard Henderson return ldl_p(in->haddr); 904a1e9d4dSRichard Henderson } 919dab7bbbSGregory Price return cpu_ldl_mmuidx_ra(in->env, in->gaddr, in->ptw_idx, ra); 924a1e9d4dSRichard Henderson } 934a1e9d4dSRichard Henderson 949dab7bbbSGregory Price static inline uint64_t ptw_ldq(const PTETranslate *in, uint64_t ra) 954a1e9d4dSRichard Henderson { 964a1e9d4dSRichard Henderson if (likely(in->haddr)) { 974a1e9d4dSRichard Henderson return ldq_p(in->haddr); 984a1e9d4dSRichard Henderson } 999dab7bbbSGregory Price return cpu_ldq_mmuidx_ra(in->env, in->gaddr, in->ptw_idx, ra); 1004a1e9d4dSRichard Henderson } 1014a1e9d4dSRichard Henderson 1024a1e9d4dSRichard Henderson /* 1034a1e9d4dSRichard Henderson * Note that we can use a 32-bit cmpxchg for all page table entries, 1044a1e9d4dSRichard Henderson * even 64-bit ones, because PG_PRESENT_MASK, PG_ACCESSED_MASK and 1054a1e9d4dSRichard Henderson * PG_DIRTY_MASK are all in the low 32 bits. 1064a1e9d4dSRichard Henderson */ 1074a1e9d4dSRichard Henderson static bool ptw_setl_slow(const PTETranslate *in, uint32_t old, uint32_t new) 1084a1e9d4dSRichard Henderson { 1094a1e9d4dSRichard Henderson uint32_t cmp; 1104a1e9d4dSRichard Henderson 1114a1e9d4dSRichard Henderson /* Does x86 really perform a rmw cycle on mmio for ptw? */ 1124a1e9d4dSRichard Henderson start_exclusive(); 1134a1e9d4dSRichard Henderson cmp = cpu_ldl_mmuidx_ra(in->env, in->gaddr, in->ptw_idx, 0); 1144a1e9d4dSRichard Henderson if (cmp == old) { 1154a1e9d4dSRichard Henderson cpu_stl_mmuidx_ra(in->env, in->gaddr, new, in->ptw_idx, 0); 1164a1e9d4dSRichard Henderson } 1174a1e9d4dSRichard Henderson end_exclusive(); 1184a1e9d4dSRichard Henderson return cmp == old; 1194a1e9d4dSRichard Henderson } 1204a1e9d4dSRichard Henderson 1214a1e9d4dSRichard Henderson static inline bool ptw_setl(const PTETranslate *in, uint32_t old, uint32_t set) 1224a1e9d4dSRichard Henderson { 1234a1e9d4dSRichard Henderson if (set & ~old) { 1244a1e9d4dSRichard Henderson uint32_t new = old | set; 1254a1e9d4dSRichard Henderson if (likely(in->haddr)) { 1264a1e9d4dSRichard Henderson old = cpu_to_le32(old); 1274a1e9d4dSRichard Henderson new = cpu_to_le32(new); 1284a1e9d4dSRichard Henderson return qatomic_cmpxchg((uint32_t *)in->haddr, old, new) == old; 1294a1e9d4dSRichard Henderson } 1304a1e9d4dSRichard Henderson return ptw_setl_slow(in, old, new); 1314a1e9d4dSRichard Henderson } 1324a1e9d4dSRichard Henderson return true; 1334a1e9d4dSRichard Henderson } 13433ce155cSPaolo Bonzini 1353563362dSRichard Henderson static bool mmu_translate(CPUX86State *env, const TranslateParams *in, 1369dab7bbbSGregory Price TranslateResult *out, TranslateFault *err, 1379dab7bbbSGregory Price uint64_t ra) 138e7f2670fSClaudio Fontana { 1393563362dSRichard Henderson const target_ulong addr = in->addr; 1403563362dSRichard Henderson const int pg_mode = in->pg_mode; 1415f97afe2SPaolo Bonzini const bool is_user = is_mmu_index_user(in->mmu_idx); 1423563362dSRichard Henderson const MMUAccessType access_type = in->access_type; 1434a1e9d4dSRichard Henderson uint64_t ptep, pte, rsvd_mask; 1444a1e9d4dSRichard Henderson PTETranslate pte_trans = { 1454a1e9d4dSRichard Henderson .env = env, 1464a1e9d4dSRichard Henderson .err = err, 1474a1e9d4dSRichard Henderson .ptw_idx = in->ptw_idx, 1484a1e9d4dSRichard Henderson }; 1498629e77bSRichard Henderson hwaddr pte_addr, paddr; 150e7f2670fSClaudio Fontana uint32_t pkr; 1513563362dSRichard Henderson int page_size; 152987b63f2SPeter Maydell int error_code; 153*01bfc2e2SAlexander Graf int prot; 154e7f2670fSClaudio Fontana 1554a1e9d4dSRichard Henderson restart_all: 1564a1e9d4dSRichard Henderson rsvd_mask = ~MAKE_64BIT_MASK(0, env_archcpu(env)->phys_bits); 1574a1e9d4dSRichard Henderson rsvd_mask &= PG_ADDRESS_MASK; 15831dd35ebSPaolo Bonzini if (!(pg_mode & PG_MODE_NXE)) { 159e7f2670fSClaudio Fontana rsvd_mask |= PG_NX_MASK; 160e7f2670fSClaudio Fontana } 161e7f2670fSClaudio Fontana 16231dd35ebSPaolo Bonzini if (pg_mode & PG_MODE_PAE) { 163e7f2670fSClaudio Fontana #ifdef TARGET_X86_64 16493eae358SPaolo Bonzini if (pg_mode & PG_MODE_LMA) { 16511b4e971SRichard Henderson if (pg_mode & PG_MODE_LA57) { 16611b4e971SRichard Henderson /* 16711b4e971SRichard Henderson * Page table level 5 16811b4e971SRichard Henderson */ 169a28fe7dcSPaolo Bonzini pte_addr = (in->cr3 & ~0xfff) + (((addr >> 48) & 0x1ff) << 3); 1709dab7bbbSGregory Price if (!ptw_translate(&pte_trans, pte_addr, ra)) { 1714a1e9d4dSRichard Henderson return false; 1724a1e9d4dSRichard Henderson } 1734a1e9d4dSRichard Henderson restart_5: 1749dab7bbbSGregory Price pte = ptw_ldq(&pte_trans, ra); 17511b4e971SRichard Henderson if (!(pte & PG_PRESENT_MASK)) { 176e7f2670fSClaudio Fontana goto do_fault; 177e7f2670fSClaudio Fontana } 17811b4e971SRichard Henderson if (pte & (rsvd_mask | PG_PSE_MASK)) { 179e7f2670fSClaudio Fontana goto do_fault_rsvd; 180e7f2670fSClaudio Fontana } 1814a1e9d4dSRichard Henderson if (!ptw_setl(&pte_trans, pte, PG_ACCESSED_MASK)) { 1824a1e9d4dSRichard Henderson goto restart_5; 183e7f2670fSClaudio Fontana } 18411b4e971SRichard Henderson ptep = pte ^ PG_NX_MASK; 185e7f2670fSClaudio Fontana } else { 18611b4e971SRichard Henderson pte = in->cr3; 187e7f2670fSClaudio Fontana ptep = PG_NX_MASK | PG_USER_MASK | PG_RW_MASK; 188e7f2670fSClaudio Fontana } 189e7f2670fSClaudio Fontana 19011b4e971SRichard Henderson /* 19111b4e971SRichard Henderson * Page table level 4 19211b4e971SRichard Henderson */ 193a28fe7dcSPaolo Bonzini pte_addr = (pte & PG_ADDRESS_MASK) + (((addr >> 39) & 0x1ff) << 3); 1949dab7bbbSGregory Price if (!ptw_translate(&pte_trans, pte_addr, ra)) { 1954a1e9d4dSRichard Henderson return false; 1964a1e9d4dSRichard Henderson } 1974a1e9d4dSRichard Henderson restart_4: 1989dab7bbbSGregory Price pte = ptw_ldq(&pte_trans, ra); 19911b4e971SRichard Henderson if (!(pte & PG_PRESENT_MASK)) { 200e7f2670fSClaudio Fontana goto do_fault; 201e7f2670fSClaudio Fontana } 20211b4e971SRichard Henderson if (pte & (rsvd_mask | PG_PSE_MASK)) { 203e7f2670fSClaudio Fontana goto do_fault_rsvd; 204e7f2670fSClaudio Fontana } 2054a1e9d4dSRichard Henderson if (!ptw_setl(&pte_trans, pte, PG_ACCESSED_MASK)) { 2064a1e9d4dSRichard Henderson goto restart_4; 207e7f2670fSClaudio Fontana } 20811b4e971SRichard Henderson ptep &= pte ^ PG_NX_MASK; 20911b4e971SRichard Henderson 21011b4e971SRichard Henderson /* 21111b4e971SRichard Henderson * Page table level 3 21211b4e971SRichard Henderson */ 213a28fe7dcSPaolo Bonzini pte_addr = (pte & PG_ADDRESS_MASK) + (((addr >> 30) & 0x1ff) << 3); 2149dab7bbbSGregory Price if (!ptw_translate(&pte_trans, pte_addr, ra)) { 2154a1e9d4dSRichard Henderson return false; 2164a1e9d4dSRichard Henderson } 2174a1e9d4dSRichard Henderson restart_3_lma: 2189dab7bbbSGregory Price pte = ptw_ldq(&pte_trans, ra); 21911b4e971SRichard Henderson if (!(pte & PG_PRESENT_MASK)) { 220e7f2670fSClaudio Fontana goto do_fault; 221e7f2670fSClaudio Fontana } 22211b4e971SRichard Henderson if (pte & rsvd_mask) { 223e7f2670fSClaudio Fontana goto do_fault_rsvd; 224e7f2670fSClaudio Fontana } 2254a1e9d4dSRichard Henderson if (!ptw_setl(&pte_trans, pte, PG_ACCESSED_MASK)) { 2264a1e9d4dSRichard Henderson goto restart_3_lma; 227e7f2670fSClaudio Fontana } 2284a1e9d4dSRichard Henderson ptep &= pte ^ PG_NX_MASK; 22911b4e971SRichard Henderson if (pte & PG_PSE_MASK) { 230e7f2670fSClaudio Fontana /* 1 GB page */ 2313563362dSRichard Henderson page_size = 1024 * 1024 * 1024; 232e7f2670fSClaudio Fontana goto do_check_protect; 233e7f2670fSClaudio Fontana } 234e7f2670fSClaudio Fontana } else 235e7f2670fSClaudio Fontana #endif 236e7f2670fSClaudio Fontana { 23711b4e971SRichard Henderson /* 23811b4e971SRichard Henderson * Page table level 3 23911b4e971SRichard Henderson */ 240a28fe7dcSPaolo Bonzini pte_addr = (in->cr3 & 0xffffffe0ULL) + ((addr >> 27) & 0x18); 2419dab7bbbSGregory Price if (!ptw_translate(&pte_trans, pte_addr, ra)) { 2424a1e9d4dSRichard Henderson return false; 2434a1e9d4dSRichard Henderson } 2444a1e9d4dSRichard Henderson rsvd_mask |= PG_HI_USER_MASK; 2454a1e9d4dSRichard Henderson restart_3_nolma: 2469dab7bbbSGregory Price pte = ptw_ldq(&pte_trans, ra); 24711b4e971SRichard Henderson if (!(pte & PG_PRESENT_MASK)) { 248e7f2670fSClaudio Fontana goto do_fault; 249e7f2670fSClaudio Fontana } 25011b4e971SRichard Henderson if (pte & (rsvd_mask | PG_NX_MASK)) { 251e7f2670fSClaudio Fontana goto do_fault_rsvd; 252e7f2670fSClaudio Fontana } 2534a1e9d4dSRichard Henderson if (!ptw_setl(&pte_trans, pte, PG_ACCESSED_MASK)) { 2544a1e9d4dSRichard Henderson goto restart_3_nolma; 2554a1e9d4dSRichard Henderson } 256e7f2670fSClaudio Fontana ptep = PG_NX_MASK | PG_USER_MASK | PG_RW_MASK; 257e7f2670fSClaudio Fontana } 258e7f2670fSClaudio Fontana 25911b4e971SRichard Henderson /* 26011b4e971SRichard Henderson * Page table level 2 26111b4e971SRichard Henderson */ 262a28fe7dcSPaolo Bonzini pte_addr = (pte & PG_ADDRESS_MASK) + (((addr >> 21) & 0x1ff) << 3); 2639dab7bbbSGregory Price if (!ptw_translate(&pte_trans, pte_addr, ra)) { 2644a1e9d4dSRichard Henderson return false; 2654a1e9d4dSRichard Henderson } 2664a1e9d4dSRichard Henderson restart_2_pae: 2679dab7bbbSGregory Price pte = ptw_ldq(&pte_trans, ra); 26811b4e971SRichard Henderson if (!(pte & PG_PRESENT_MASK)) { 269e7f2670fSClaudio Fontana goto do_fault; 270e7f2670fSClaudio Fontana } 27111b4e971SRichard Henderson if (pte & rsvd_mask) { 272e7f2670fSClaudio Fontana goto do_fault_rsvd; 273e7f2670fSClaudio Fontana } 27411b4e971SRichard Henderson if (pte & PG_PSE_MASK) { 275e7f2670fSClaudio Fontana /* 2 MB page */ 2763563362dSRichard Henderson page_size = 2048 * 1024; 2774a1e9d4dSRichard Henderson ptep &= pte ^ PG_NX_MASK; 278e7f2670fSClaudio Fontana goto do_check_protect; 279e7f2670fSClaudio Fontana } 2804a1e9d4dSRichard Henderson if (!ptw_setl(&pte_trans, pte, PG_ACCESSED_MASK)) { 2814a1e9d4dSRichard Henderson goto restart_2_pae; 282e7f2670fSClaudio Fontana } 2834a1e9d4dSRichard Henderson ptep &= pte ^ PG_NX_MASK; 28411b4e971SRichard Henderson 28511b4e971SRichard Henderson /* 28611b4e971SRichard Henderson * Page table level 1 28711b4e971SRichard Henderson */ 288a28fe7dcSPaolo Bonzini pte_addr = (pte & PG_ADDRESS_MASK) + (((addr >> 12) & 0x1ff) << 3); 2899dab7bbbSGregory Price if (!ptw_translate(&pte_trans, pte_addr, ra)) { 2904a1e9d4dSRichard Henderson return false; 2914a1e9d4dSRichard Henderson } 2929dab7bbbSGregory Price pte = ptw_ldq(&pte_trans, ra); 293e7f2670fSClaudio Fontana if (!(pte & PG_PRESENT_MASK)) { 294e7f2670fSClaudio Fontana goto do_fault; 295e7f2670fSClaudio Fontana } 296e7f2670fSClaudio Fontana if (pte & rsvd_mask) { 297e7f2670fSClaudio Fontana goto do_fault_rsvd; 298e7f2670fSClaudio Fontana } 299e7f2670fSClaudio Fontana /* combine pde and pte nx, user and rw protections */ 300e7f2670fSClaudio Fontana ptep &= pte ^ PG_NX_MASK; 3013563362dSRichard Henderson page_size = 4096; 302*01bfc2e2SAlexander Graf } else if (pg_mode) { 30311b4e971SRichard Henderson /* 30411b4e971SRichard Henderson * Page table level 2 30511b4e971SRichard Henderson */ 306a28fe7dcSPaolo Bonzini pte_addr = (in->cr3 & 0xfffff000ULL) + ((addr >> 20) & 0xffc); 3079dab7bbbSGregory Price if (!ptw_translate(&pte_trans, pte_addr, ra)) { 3084a1e9d4dSRichard Henderson return false; 3094a1e9d4dSRichard Henderson } 3104a1e9d4dSRichard Henderson restart_2_nopae: 3119dab7bbbSGregory Price pte = ptw_ldl(&pte_trans, ra); 31211b4e971SRichard Henderson if (!(pte & PG_PRESENT_MASK)) { 313e7f2670fSClaudio Fontana goto do_fault; 314e7f2670fSClaudio Fontana } 31511b4e971SRichard Henderson ptep = pte | PG_NX_MASK; 316e7f2670fSClaudio Fontana 317e7f2670fSClaudio Fontana /* if PSE bit is set, then we use a 4MB page */ 31811b4e971SRichard Henderson if ((pte & PG_PSE_MASK) && (pg_mode & PG_MODE_PSE)) { 3193563362dSRichard Henderson page_size = 4096 * 1024; 32011b4e971SRichard Henderson /* 32111b4e971SRichard Henderson * Bits 20-13 provide bits 39-32 of the address, bit 21 is reserved. 322e7f2670fSClaudio Fontana * Leave bits 20-13 in place for setting accessed/dirty bits below. 323e7f2670fSClaudio Fontana */ 32411b4e971SRichard Henderson pte = (uint32_t)pte | ((pte & 0x1fe000LL) << (32 - 13)); 325e7f2670fSClaudio Fontana rsvd_mask = 0x200000; 326e7f2670fSClaudio Fontana goto do_check_protect_pse36; 327e7f2670fSClaudio Fontana } 3284a1e9d4dSRichard Henderson if (!ptw_setl(&pte_trans, pte, PG_ACCESSED_MASK)) { 3294a1e9d4dSRichard Henderson goto restart_2_nopae; 330e7f2670fSClaudio Fontana } 331e7f2670fSClaudio Fontana 33211b4e971SRichard Henderson /* 33311b4e971SRichard Henderson * Page table level 1 33411b4e971SRichard Henderson */ 335a28fe7dcSPaolo Bonzini pte_addr = (pte & ~0xfffu) + ((addr >> 10) & 0xffc); 3369dab7bbbSGregory Price if (!ptw_translate(&pte_trans, pte_addr, ra)) { 3374a1e9d4dSRichard Henderson return false; 3384a1e9d4dSRichard Henderson } 3399dab7bbbSGregory Price pte = ptw_ldl(&pte_trans, ra); 340e7f2670fSClaudio Fontana if (!(pte & PG_PRESENT_MASK)) { 341e7f2670fSClaudio Fontana goto do_fault; 342e7f2670fSClaudio Fontana } 343e7f2670fSClaudio Fontana /* combine pde and pte user and rw protections */ 344e7f2670fSClaudio Fontana ptep &= pte | PG_NX_MASK; 3453563362dSRichard Henderson page_size = 4096; 346e7f2670fSClaudio Fontana rsvd_mask = 0; 347*01bfc2e2SAlexander Graf } else { 348*01bfc2e2SAlexander Graf /* 349*01bfc2e2SAlexander Graf * No paging (real mode), let's tentatively resolve the address as 1:1 350*01bfc2e2SAlexander Graf * here, but conditionally still perform an NPT walk on it later. 351*01bfc2e2SAlexander Graf */ 352*01bfc2e2SAlexander Graf page_size = 0x40000000; 353*01bfc2e2SAlexander Graf paddr = in->addr; 354*01bfc2e2SAlexander Graf prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC; 355*01bfc2e2SAlexander Graf goto stage2; 356e7f2670fSClaudio Fontana } 357e7f2670fSClaudio Fontana 358e7f2670fSClaudio Fontana do_check_protect: 3593563362dSRichard Henderson rsvd_mask |= (page_size - 1) & PG_ADDRESS_MASK & ~PG_PSE_PAT_MASK; 360e7f2670fSClaudio Fontana do_check_protect_pse36: 361e7f2670fSClaudio Fontana if (pte & rsvd_mask) { 362e7f2670fSClaudio Fontana goto do_fault_rsvd; 363e7f2670fSClaudio Fontana } 364e7f2670fSClaudio Fontana ptep ^= PG_NX_MASK; 365e7f2670fSClaudio Fontana 366e7f2670fSClaudio Fontana /* can the page can be put in the TLB? prot will tell us */ 367e7f2670fSClaudio Fontana if (is_user && !(ptep & PG_USER_MASK)) { 368e7f2670fSClaudio Fontana goto do_fault_protect; 369e7f2670fSClaudio Fontana } 370e7f2670fSClaudio Fontana 371*01bfc2e2SAlexander Graf prot = 0; 3725f97afe2SPaolo Bonzini if (!is_mmu_index_smap(in->mmu_idx) || !(ptep & PG_USER_MASK)) { 3733563362dSRichard Henderson prot |= PAGE_READ; 37431dd35ebSPaolo Bonzini if ((ptep & PG_RW_MASK) || !(is_user || (pg_mode & PG_MODE_WP))) { 3753563362dSRichard Henderson prot |= PAGE_WRITE; 376e7f2670fSClaudio Fontana } 377e7f2670fSClaudio Fontana } 378e7f2670fSClaudio Fontana if (!(ptep & PG_NX_MASK) && 3793563362dSRichard Henderson (is_user || 38031dd35ebSPaolo Bonzini !((pg_mode & PG_MODE_SMEP) && (ptep & PG_USER_MASK)))) { 3813563362dSRichard Henderson prot |= PAGE_EXEC; 382e7f2670fSClaudio Fontana } 383e7f2670fSClaudio Fontana 384991ec976SPaolo Bonzini if (ptep & PG_USER_MASK) { 38531dd35ebSPaolo Bonzini pkr = pg_mode & PG_MODE_PKE ? env->pkru : 0; 386e7f2670fSClaudio Fontana } else { 38731dd35ebSPaolo Bonzini pkr = pg_mode & PG_MODE_PKS ? env->pkrs : 0; 388e7f2670fSClaudio Fontana } 389e7f2670fSClaudio Fontana if (pkr) { 390e7f2670fSClaudio Fontana uint32_t pk = (pte & PG_PKRU_MASK) >> PG_PKRU_BIT; 391e7f2670fSClaudio Fontana uint32_t pkr_ad = (pkr >> pk * 2) & 1; 392e7f2670fSClaudio Fontana uint32_t pkr_wd = (pkr >> pk * 2) & 2; 393e7f2670fSClaudio Fontana uint32_t pkr_prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC; 394e7f2670fSClaudio Fontana 395e7f2670fSClaudio Fontana if (pkr_ad) { 396e7f2670fSClaudio Fontana pkr_prot &= ~(PAGE_READ | PAGE_WRITE); 39731dd35ebSPaolo Bonzini } else if (pkr_wd && (is_user || (pg_mode & PG_MODE_WP))) { 398e7f2670fSClaudio Fontana pkr_prot &= ~PAGE_WRITE; 399e7f2670fSClaudio Fontana } 400487d1133SRichard Henderson if ((pkr_prot & (1 << access_type)) == 0) { 4013563362dSRichard Henderson goto do_fault_pk_protect; 402e7f2670fSClaudio Fontana } 4033563362dSRichard Henderson prot &= pkr_prot; 404e7f2670fSClaudio Fontana } 405e7f2670fSClaudio Fontana 4063563362dSRichard Henderson if ((prot & (1 << access_type)) == 0) { 407e7f2670fSClaudio Fontana goto do_fault_protect; 408e7f2670fSClaudio Fontana } 409e7f2670fSClaudio Fontana 410e7f2670fSClaudio Fontana /* yes, it can! */ 4113563362dSRichard Henderson { 4123563362dSRichard Henderson uint32_t set = PG_ACCESSED_MASK; 4133563362dSRichard Henderson if (access_type == MMU_DATA_STORE) { 4143563362dSRichard Henderson set |= PG_DIRTY_MASK; 4154a1e9d4dSRichard Henderson } else if (!(pte & PG_DIRTY_MASK)) { 4164a1e9d4dSRichard Henderson /* 4174a1e9d4dSRichard Henderson * Only set write access if already dirty... 4184a1e9d4dSRichard Henderson * otherwise wait for dirty access. 4194a1e9d4dSRichard Henderson */ 4203563362dSRichard Henderson prot &= ~PAGE_WRITE; 421e7f2670fSClaudio Fontana } 4224a1e9d4dSRichard Henderson if (!ptw_setl(&pte_trans, pte, set)) { 4234a1e9d4dSRichard Henderson /* 4244a1e9d4dSRichard Henderson * We can arrive here from any of 3 levels and 2 formats. 4254a1e9d4dSRichard Henderson * The only safe thing is to restart the entire lookup. 4264a1e9d4dSRichard Henderson */ 4274a1e9d4dSRichard Henderson goto restart_all; 4284a1e9d4dSRichard Henderson } 4294a1e9d4dSRichard Henderson } 430e7f2670fSClaudio Fontana 431b5a9de32SPaolo Bonzini /* merge offset within page */ 432b5a9de32SPaolo Bonzini paddr = (pte & PG_ADDRESS_MASK & ~(page_size - 1)) | (addr & (page_size - 1)); 433*01bfc2e2SAlexander Graf stage2: 4349bbcf372SRichard Henderson 435b5a9de32SPaolo Bonzini /* 436b5a9de32SPaolo Bonzini * Note that NPT is walked (for both paging structures and final guest 437b5a9de32SPaolo Bonzini * addresses) using the address with the A20 bit set. 438b5a9de32SPaolo Bonzini */ 4394a1e9d4dSRichard Henderson if (in->ptw_idx == MMU_NESTED_IDX) { 4408629e77bSRichard Henderson CPUTLBEntryFull *full; 4418629e77bSRichard Henderson int flags, nested_page_size; 4429bbcf372SRichard Henderson 443d507e6c5SRichard Henderson flags = probe_access_full(env, paddr, 0, access_type, 4448629e77bSRichard Henderson MMU_NESTED_IDX, true, 4458629e77bSRichard Henderson &pte_trans.haddr, &full, 0); 4468629e77bSRichard Henderson if (unlikely(flags & TLB_INVALID_MASK)) { 4478218c048SRichard Henderson *err = (TranslateFault){ 4488218c048SRichard Henderson .error_code = env->error_code, 4498218c048SRichard Henderson .cr2 = paddr, 4508218c048SRichard Henderson .stage2 = S2_GPA, 4518218c048SRichard Henderson }; 4529bbcf372SRichard Henderson return false; 4539bbcf372SRichard Henderson } 4549bbcf372SRichard Henderson 4559bbcf372SRichard Henderson /* Merge stage1 & stage2 protection bits. */ 4568629e77bSRichard Henderson prot &= full->prot; 4579bbcf372SRichard Henderson 4589bbcf372SRichard Henderson /* Re-verify resulting protection. */ 4599bbcf372SRichard Henderson if ((prot & (1 << access_type)) == 0) { 4609bbcf372SRichard Henderson goto do_fault_protect; 4619bbcf372SRichard Henderson } 4628629e77bSRichard Henderson 4638629e77bSRichard Henderson /* Merge stage1 & stage2 addresses to final physical address. */ 4648629e77bSRichard Henderson nested_page_size = 1 << full->lg_page_size; 4658629e77bSRichard Henderson paddr = (full->phys_addr & ~(nested_page_size - 1)) 4668629e77bSRichard Henderson | (paddr & (nested_page_size - 1)); 4678629e77bSRichard Henderson 4688629e77bSRichard Henderson /* 4698629e77bSRichard Henderson * Use the larger of stage1 & stage2 page sizes, so that 4708629e77bSRichard Henderson * invalidation works. 4718629e77bSRichard Henderson */ 4728629e77bSRichard Henderson if (nested_page_size > page_size) { 4738629e77bSRichard Henderson page_size = nested_page_size; 4748629e77bSRichard Henderson } 4759bbcf372SRichard Henderson } 4769bbcf372SRichard Henderson 477b5a9de32SPaolo Bonzini out->paddr = paddr & x86_get_a20_mask(env); 4789bbcf372SRichard Henderson out->prot = prot; 4799bbcf372SRichard Henderson out->page_size = page_size; 4803563362dSRichard Henderson return true; 481e7f2670fSClaudio Fontana 482e7f2670fSClaudio Fontana do_fault_rsvd: 4833563362dSRichard Henderson error_code = PG_ERROR_RSVD_MASK; 4843563362dSRichard Henderson goto do_fault_cont; 485e7f2670fSClaudio Fontana do_fault_protect: 4863563362dSRichard Henderson error_code = PG_ERROR_P_MASK; 4873563362dSRichard Henderson goto do_fault_cont; 4883563362dSRichard Henderson do_fault_pk_protect: 4893563362dSRichard Henderson assert(access_type != MMU_INST_FETCH); 4903563362dSRichard Henderson error_code = PG_ERROR_PK_MASK | PG_ERROR_P_MASK; 4913563362dSRichard Henderson goto do_fault_cont; 492e7f2670fSClaudio Fontana do_fault: 4933563362dSRichard Henderson error_code = 0; 4943563362dSRichard Henderson do_fault_cont: 4953563362dSRichard Henderson if (is_user) { 496e7f2670fSClaudio Fontana error_code |= PG_ERROR_U_MASK; 4973563362dSRichard Henderson } 4983563362dSRichard Henderson switch (access_type) { 4993563362dSRichard Henderson case MMU_DATA_LOAD: 5003563362dSRichard Henderson break; 5013563362dSRichard Henderson case MMU_DATA_STORE: 5023563362dSRichard Henderson error_code |= PG_ERROR_W_MASK; 5033563362dSRichard Henderson break; 5043563362dSRichard Henderson case MMU_INST_FETCH: 5053563362dSRichard Henderson if (pg_mode & (PG_MODE_NXE | PG_MODE_SMEP)) { 506e7f2670fSClaudio Fontana error_code |= PG_ERROR_I_D_MASK; 5073563362dSRichard Henderson } 5083563362dSRichard Henderson break; 5093563362dSRichard Henderson } 5108218c048SRichard Henderson *err = (TranslateFault){ 5118218c048SRichard Henderson .exception_index = EXCP0E_PAGE, 5128218c048SRichard Henderson .error_code = error_code, 5138218c048SRichard Henderson .cr2 = addr, 5148218c048SRichard Henderson }; 5153563362dSRichard Henderson return false; 516661ff487SPaolo Bonzini } 517661ff487SPaolo Bonzini 5189bbcf372SRichard Henderson static G_NORETURN void raise_stage2(CPUX86State *env, TranslateFault *err, 5199bbcf372SRichard Henderson uintptr_t retaddr) 5209bbcf372SRichard Henderson { 5219bbcf372SRichard Henderson uint64_t exit_info_1 = err->error_code; 5229bbcf372SRichard Henderson 5239bbcf372SRichard Henderson switch (err->stage2) { 5249bbcf372SRichard Henderson case S2_GPT: 5259bbcf372SRichard Henderson exit_info_1 |= SVM_NPTEXIT_GPT; 5269bbcf372SRichard Henderson break; 5279bbcf372SRichard Henderson case S2_GPA: 5289bbcf372SRichard Henderson exit_info_1 |= SVM_NPTEXIT_GPA; 5299bbcf372SRichard Henderson break; 5309bbcf372SRichard Henderson default: 5319bbcf372SRichard Henderson g_assert_not_reached(); 5329bbcf372SRichard Henderson } 5339bbcf372SRichard Henderson 5349bbcf372SRichard Henderson x86_stq_phys(env_cpu(env), 5359bbcf372SRichard Henderson env->vm_vmcb + offsetof(struct vmcb, control.exit_info_2), 5369bbcf372SRichard Henderson err->cr2); 5379bbcf372SRichard Henderson cpu_vmexit(env, SVM_EXIT_NPF, exit_info_1, retaddr); 5389bbcf372SRichard Henderson } 5399bbcf372SRichard Henderson 5403563362dSRichard Henderson static bool get_physical_address(CPUX86State *env, vaddr addr, 5413563362dSRichard Henderson MMUAccessType access_type, int mmu_idx, 5429dab7bbbSGregory Price TranslateResult *out, TranslateFault *err, 5439dab7bbbSGregory Price uint64_t ra) 544661ff487SPaolo Bonzini { 54598281984SRichard Henderson TranslateParams in; 54698281984SRichard Henderson bool use_stage2 = env->hflags2 & HF2_NPT_MASK; 5473563362dSRichard Henderson 54898281984SRichard Henderson in.addr = addr; 54998281984SRichard Henderson in.access_type = access_type; 55098281984SRichard Henderson 55198281984SRichard Henderson switch (mmu_idx) { 55298281984SRichard Henderson case MMU_PHYS_IDX: 55398281984SRichard Henderson break; 55498281984SRichard Henderson 55598281984SRichard Henderson case MMU_NESTED_IDX: 55698281984SRichard Henderson if (likely(use_stage2)) { 55798281984SRichard Henderson in.cr3 = env->nested_cr3; 55898281984SRichard Henderson in.pg_mode = env->nested_pg_mode; 55990f64153SPaolo Bonzini in.mmu_idx = 56090f64153SPaolo Bonzini env->nested_pg_mode & PG_MODE_LMA ? MMU_USER64_IDX : MMU_USER32_IDX; 5614a1e9d4dSRichard Henderson in.ptw_idx = MMU_PHYS_IDX; 56298281984SRichard Henderson 5639dab7bbbSGregory Price if (!mmu_translate(env, &in, out, err, ra)) { 56498281984SRichard Henderson err->stage2 = S2_GPA; 56598281984SRichard Henderson return false; 566661ff487SPaolo Bonzini } 5673563362dSRichard Henderson return true; 56898281984SRichard Henderson } 56998281984SRichard Henderson break; 570b04dc92eSPaolo Bonzini 57198281984SRichard Henderson default: 572b1661801SPaolo Bonzini if (is_mmu_index_32(mmu_idx)) { 573b1661801SPaolo Bonzini addr = (uint32_t)addr; 574b1661801SPaolo Bonzini } 575b1661801SPaolo Bonzini 576*01bfc2e2SAlexander Graf if (likely(env->cr[0] & CR0_PG_MASK || use_stage2)) { 57798281984SRichard Henderson in.cr3 = env->cr[3]; 57898281984SRichard Henderson in.mmu_idx = mmu_idx; 5794a1e9d4dSRichard Henderson in.ptw_idx = use_stage2 ? MMU_NESTED_IDX : MMU_PHYS_IDX; 58098281984SRichard Henderson in.pg_mode = get_pg_mode(env); 58198281984SRichard Henderson 5823563362dSRichard Henderson if (in.pg_mode & PG_MODE_LMA) { 583b04dc92eSPaolo Bonzini /* test virtual address sign extension */ 5843563362dSRichard Henderson int shift = in.pg_mode & PG_MODE_LA57 ? 56 : 47; 5853563362dSRichard Henderson int64_t sext = (int64_t)addr >> shift; 586b04dc92eSPaolo Bonzini if (sext != 0 && sext != -1) { 5878218c048SRichard Henderson *err = (TranslateFault){ 5888218c048SRichard Henderson .exception_index = EXCP0D_GPF, 5898218c048SRichard Henderson .cr2 = addr, 5908218c048SRichard Henderson }; 5913563362dSRichard Henderson return false; 592b04dc92eSPaolo Bonzini } 593b04dc92eSPaolo Bonzini } 5949dab7bbbSGregory Price return mmu_translate(env, &in, out, err, ra); 595e7f2670fSClaudio Fontana } 59698281984SRichard Henderson break; 59798281984SRichard Henderson } 59898281984SRichard Henderson 599b1661801SPaolo Bonzini /* No translation needed. */ 60098281984SRichard Henderson out->paddr = addr & x86_get_a20_mask(env); 60198281984SRichard Henderson out->prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC; 60298281984SRichard Henderson out->page_size = TARGET_PAGE_SIZE; 60398281984SRichard Henderson return true; 604661ff487SPaolo Bonzini } 605e7f2670fSClaudio Fontana 606e7f2670fSClaudio Fontana bool x86_cpu_tlb_fill(CPUState *cs, vaddr addr, int size, 607e7f2670fSClaudio Fontana MMUAccessType access_type, int mmu_idx, 608e7f2670fSClaudio Fontana bool probe, uintptr_t retaddr) 609e7f2670fSClaudio Fontana { 610b77af26eSRichard Henderson CPUX86State *env = cpu_env(cs); 6113563362dSRichard Henderson TranslateResult out; 6123563362dSRichard Henderson TranslateFault err; 613e7f2670fSClaudio Fontana 6149dab7bbbSGregory Price if (get_physical_address(env, addr, access_type, mmu_idx, &out, &err, 6159dab7bbbSGregory Price retaddr)) { 6163563362dSRichard Henderson /* 6173563362dSRichard Henderson * Even if 4MB pages, we map only one 4KB page in the cache to 6183563362dSRichard Henderson * avoid filling it too fast. 6193563362dSRichard Henderson */ 6203563362dSRichard Henderson assert(out.prot & (1 << access_type)); 6213563362dSRichard Henderson tlb_set_page_with_attrs(cs, addr & TARGET_PAGE_MASK, 6223563362dSRichard Henderson out.paddr & TARGET_PAGE_MASK, 6233563362dSRichard Henderson cpu_get_mem_attrs(env), 6243563362dSRichard Henderson out.prot, mmu_idx, out.page_size); 6253563362dSRichard Henderson return true; 6263563362dSRichard Henderson } 6273563362dSRichard Henderson 6289bbcf372SRichard Henderson if (probe) { 6294a1e9d4dSRichard Henderson /* This will be used if recursing for stage2 translation. */ 6304a1e9d4dSRichard Henderson env->error_code = err.error_code; 6319bbcf372SRichard Henderson return false; 6329bbcf372SRichard Henderson } 6339bbcf372SRichard Henderson 6349bbcf372SRichard Henderson if (err.stage2 != S2_NONE) { 6359bbcf372SRichard Henderson raise_stage2(env, &err, retaddr); 6369bbcf372SRichard Henderson } 6373563362dSRichard Henderson 6383563362dSRichard Henderson if (env->intercept_exceptions & (1 << err.exception_index)) { 6393563362dSRichard Henderson /* cr2 is not modified in case of exceptions */ 6403563362dSRichard Henderson x86_stq_phys(cs, env->vm_vmcb + 6413563362dSRichard Henderson offsetof(struct vmcb, control.exit_info_2), 6423563362dSRichard Henderson err.cr2); 6433563362dSRichard Henderson } else { 6443563362dSRichard Henderson env->cr[2] = err.cr2; 645e7f2670fSClaudio Fontana } 6463563362dSRichard Henderson raise_exception_err_ra(env, err.exception_index, err.error_code, retaddr); 647e7f2670fSClaudio Fontana } 648958e1dd1SPaolo Bonzini 649958e1dd1SPaolo Bonzini G_NORETURN void x86_cpu_do_unaligned_access(CPUState *cs, vaddr vaddr, 650958e1dd1SPaolo Bonzini MMUAccessType access_type, 651958e1dd1SPaolo Bonzini int mmu_idx, uintptr_t retaddr) 652958e1dd1SPaolo Bonzini { 653958e1dd1SPaolo Bonzini X86CPU *cpu = X86_CPU(cs); 654958e1dd1SPaolo Bonzini handle_unaligned_access(&cpu->env, vaddr, access_type, retaddr); 655958e1dd1SPaolo Bonzini } 656