1 /* 2 * x86 exception helpers 3 * 4 * Copyright (c) 2003 Fabrice Bellard 5 * 6 * This library is free software; you can redistribute it and/or 7 * modify it under the terms of the GNU Lesser General Public 8 * License as published by the Free Software Foundation; either 9 * version 2.1 of the License, or (at your option) any later version. 10 * 11 * This library is distributed in the hope that it will be useful, 12 * but WITHOUT ANY WARRANTY; without even the implied warranty of 13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 14 * Lesser General Public License for more details. 15 * 16 * You should have received a copy of the GNU Lesser General Public 17 * License along with this library; if not, see <http://www.gnu.org/licenses/>. 18 */ 19 20 #include "qemu/osdep.h" 21 #include "cpu.h" 22 #include "exec/exec-all.h" 23 #include "qemu/log.h" 24 #include "sysemu/runstate.h" 25 #include "exec/helper-proto.h" 26 #include "helper-tcg.h" 27 28 void helper_raise_interrupt(CPUX86State *env, int intno, int next_eip_addend) 29 { 30 raise_interrupt(env, intno, 1, 0, next_eip_addend); 31 } 32 33 void helper_raise_exception(CPUX86State *env, int exception_index) 34 { 35 raise_exception(env, exception_index); 36 } 37 38 /* 39 * Check nested exceptions and change to double or triple fault if 40 * needed. It should only be called, if this is not an interrupt. 41 * Returns the new exception number. 42 */ 43 static int check_exception(CPUX86State *env, int intno, int *error_code, 44 uintptr_t retaddr) 45 { 46 int first_contributory = env->old_exception == 0 || 47 (env->old_exception >= 10 && 48 env->old_exception <= 13); 49 int second_contributory = intno == 0 || 50 (intno >= 10 && intno <= 13); 51 52 qemu_log_mask(CPU_LOG_INT, "check_exception old: 0x%x new 0x%x\n", 53 env->old_exception, intno); 54 55 #if !defined(CONFIG_USER_ONLY) 56 if (env->old_exception == EXCP08_DBLE) { 57 if (env->hflags & HF_GUEST_MASK) { 58 cpu_vmexit(env, SVM_EXIT_SHUTDOWN, 0, retaddr); /* does not return */ 59 } 60 61 qemu_log_mask(CPU_LOG_RESET, "Triple fault\n"); 62 63 qemu_system_reset_request(SHUTDOWN_CAUSE_GUEST_RESET); 64 return EXCP_HLT; 65 } 66 #endif 67 68 if ((first_contributory && second_contributory) 69 || (env->old_exception == EXCP0E_PAGE && 70 (second_contributory || (intno == EXCP0E_PAGE)))) { 71 intno = EXCP08_DBLE; 72 *error_code = 0; 73 } 74 75 if (second_contributory || (intno == EXCP0E_PAGE) || 76 (intno == EXCP08_DBLE)) { 77 env->old_exception = intno; 78 } 79 80 return intno; 81 } 82 83 /* 84 * Signal an interruption. It is executed in the main CPU loop. 85 * is_int is TRUE if coming from the int instruction. next_eip is the 86 * env->eip value AFTER the interrupt instruction. It is only relevant if 87 * is_int is TRUE. 88 */ 89 static void QEMU_NORETURN raise_interrupt2(CPUX86State *env, int intno, 90 int is_int, int error_code, 91 int next_eip_addend, 92 uintptr_t retaddr) 93 { 94 CPUState *cs = env_cpu(env); 95 96 if (!is_int) { 97 cpu_svm_check_intercept_param(env, SVM_EXIT_EXCP_BASE + intno, 98 error_code, retaddr); 99 intno = check_exception(env, intno, &error_code, retaddr); 100 } else { 101 cpu_svm_check_intercept_param(env, SVM_EXIT_SWINT, 0, retaddr); 102 } 103 104 cs->exception_index = intno; 105 env->error_code = error_code; 106 env->exception_is_int = is_int; 107 env->exception_next_eip = env->eip + next_eip_addend; 108 cpu_loop_exit_restore(cs, retaddr); 109 } 110 111 /* shortcuts to generate exceptions */ 112 113 void QEMU_NORETURN raise_interrupt(CPUX86State *env, int intno, int is_int, 114 int error_code, int next_eip_addend) 115 { 116 raise_interrupt2(env, intno, is_int, error_code, next_eip_addend, 0); 117 } 118 119 void raise_exception_err(CPUX86State *env, int exception_index, 120 int error_code) 121 { 122 raise_interrupt2(env, exception_index, 0, error_code, 0, 0); 123 } 124 125 void raise_exception_err_ra(CPUX86State *env, int exception_index, 126 int error_code, uintptr_t retaddr) 127 { 128 raise_interrupt2(env, exception_index, 0, error_code, 0, retaddr); 129 } 130 131 void raise_exception(CPUX86State *env, int exception_index) 132 { 133 raise_interrupt2(env, exception_index, 0, 0, 0, 0); 134 } 135 136 void raise_exception_ra(CPUX86State *env, int exception_index, uintptr_t retaddr) 137 { 138 raise_interrupt2(env, exception_index, 0, 0, 0, retaddr); 139 } 140 141 #if !defined(CONFIG_USER_ONLY) 142 static hwaddr get_hphys(CPUState *cs, hwaddr gphys, MMUAccessType access_type, 143 int *prot) 144 { 145 CPUX86State *env = &X86_CPU(cs)->env; 146 uint64_t rsvd_mask = PG_HI_RSVD_MASK; 147 uint64_t ptep, pte; 148 uint64_t exit_info_1 = 0; 149 target_ulong pde_addr, pte_addr; 150 uint32_t page_offset; 151 int page_size; 152 153 if (likely(!(env->hflags2 & HF2_NPT_MASK))) { 154 return gphys; 155 } 156 157 if (!(env->nested_pg_mode & SVM_NPT_NXE)) { 158 rsvd_mask |= PG_NX_MASK; 159 } 160 161 if (env->nested_pg_mode & SVM_NPT_PAE) { 162 uint64_t pde, pdpe; 163 target_ulong pdpe_addr; 164 165 #ifdef TARGET_X86_64 166 if (env->nested_pg_mode & SVM_NPT_LMA) { 167 uint64_t pml5e; 168 uint64_t pml4e_addr, pml4e; 169 170 pml5e = env->nested_cr3; 171 ptep = PG_NX_MASK | PG_USER_MASK | PG_RW_MASK; 172 173 pml4e_addr = (pml5e & PG_ADDRESS_MASK) + 174 (((gphys >> 39) & 0x1ff) << 3); 175 pml4e = x86_ldq_phys(cs, pml4e_addr); 176 if (!(pml4e & PG_PRESENT_MASK)) { 177 goto do_fault; 178 } 179 if (pml4e & (rsvd_mask | PG_PSE_MASK)) { 180 goto do_fault_rsvd; 181 } 182 if (!(pml4e & PG_ACCESSED_MASK)) { 183 pml4e |= PG_ACCESSED_MASK; 184 x86_stl_phys_notdirty(cs, pml4e_addr, pml4e); 185 } 186 ptep &= pml4e ^ PG_NX_MASK; 187 pdpe_addr = (pml4e & PG_ADDRESS_MASK) + 188 (((gphys >> 30) & 0x1ff) << 3); 189 pdpe = x86_ldq_phys(cs, pdpe_addr); 190 if (!(pdpe & PG_PRESENT_MASK)) { 191 goto do_fault; 192 } 193 if (pdpe & rsvd_mask) { 194 goto do_fault_rsvd; 195 } 196 ptep &= pdpe ^ PG_NX_MASK; 197 if (!(pdpe & PG_ACCESSED_MASK)) { 198 pdpe |= PG_ACCESSED_MASK; 199 x86_stl_phys_notdirty(cs, pdpe_addr, pdpe); 200 } 201 if (pdpe & PG_PSE_MASK) { 202 /* 1 GB page */ 203 page_size = 1024 * 1024 * 1024; 204 pte_addr = pdpe_addr; 205 pte = pdpe; 206 goto do_check_protect; 207 } 208 } else 209 #endif 210 { 211 pdpe_addr = (env->nested_cr3 & ~0x1f) + ((gphys >> 27) & 0x18); 212 pdpe = x86_ldq_phys(cs, pdpe_addr); 213 if (!(pdpe & PG_PRESENT_MASK)) { 214 goto do_fault; 215 } 216 rsvd_mask |= PG_HI_USER_MASK; 217 if (pdpe & (rsvd_mask | PG_NX_MASK)) { 218 goto do_fault_rsvd; 219 } 220 ptep = PG_NX_MASK | PG_USER_MASK | PG_RW_MASK; 221 } 222 223 pde_addr = (pdpe & PG_ADDRESS_MASK) + (((gphys >> 21) & 0x1ff) << 3); 224 pde = x86_ldq_phys(cs, pde_addr); 225 if (!(pde & PG_PRESENT_MASK)) { 226 goto do_fault; 227 } 228 if (pde & rsvd_mask) { 229 goto do_fault_rsvd; 230 } 231 ptep &= pde ^ PG_NX_MASK; 232 if (pde & PG_PSE_MASK) { 233 /* 2 MB page */ 234 page_size = 2048 * 1024; 235 pte_addr = pde_addr; 236 pte = pde; 237 goto do_check_protect; 238 } 239 /* 4 KB page */ 240 if (!(pde & PG_ACCESSED_MASK)) { 241 pde |= PG_ACCESSED_MASK; 242 x86_stl_phys_notdirty(cs, pde_addr, pde); 243 } 244 pte_addr = (pde & PG_ADDRESS_MASK) + (((gphys >> 12) & 0x1ff) << 3); 245 pte = x86_ldq_phys(cs, pte_addr); 246 if (!(pte & PG_PRESENT_MASK)) { 247 goto do_fault; 248 } 249 if (pte & rsvd_mask) { 250 goto do_fault_rsvd; 251 } 252 /* combine pde and pte nx, user and rw protections */ 253 ptep &= pte ^ PG_NX_MASK; 254 page_size = 4096; 255 } else { 256 uint32_t pde; 257 258 /* page directory entry */ 259 pde_addr = (env->nested_cr3 & ~0xfff) + ((gphys >> 20) & 0xffc); 260 pde = x86_ldl_phys(cs, pde_addr); 261 if (!(pde & PG_PRESENT_MASK)) { 262 goto do_fault; 263 } 264 ptep = pde | PG_NX_MASK; 265 266 /* if host cr4 PSE bit is set, then we use a 4MB page */ 267 if ((pde & PG_PSE_MASK) && (env->nested_pg_mode & SVM_NPT_PSE)) { 268 page_size = 4096 * 1024; 269 pte_addr = pde_addr; 270 271 /* Bits 20-13 provide bits 39-32 of the address, bit 21 is reserved. 272 * Leave bits 20-13 in place for setting accessed/dirty bits below. 273 */ 274 pte = pde | ((pde & 0x1fe000LL) << (32 - 13)); 275 rsvd_mask = 0x200000; 276 goto do_check_protect_pse36; 277 } 278 279 if (!(pde & PG_ACCESSED_MASK)) { 280 pde |= PG_ACCESSED_MASK; 281 x86_stl_phys_notdirty(cs, pde_addr, pde); 282 } 283 284 /* page directory entry */ 285 pte_addr = (pde & ~0xfff) + ((gphys >> 10) & 0xffc); 286 pte = x86_ldl_phys(cs, pte_addr); 287 if (!(pte & PG_PRESENT_MASK)) { 288 goto do_fault; 289 } 290 /* combine pde and pte user and rw protections */ 291 ptep &= pte | PG_NX_MASK; 292 page_size = 4096; 293 rsvd_mask = 0; 294 } 295 296 do_check_protect: 297 rsvd_mask |= (page_size - 1) & PG_ADDRESS_MASK & ~PG_PSE_PAT_MASK; 298 do_check_protect_pse36: 299 if (pte & rsvd_mask) { 300 goto do_fault_rsvd; 301 } 302 ptep ^= PG_NX_MASK; 303 304 if (!(ptep & PG_USER_MASK)) { 305 goto do_fault_protect; 306 } 307 if (ptep & PG_NX_MASK) { 308 if (access_type == MMU_INST_FETCH) { 309 goto do_fault_protect; 310 } 311 *prot &= ~PAGE_EXEC; 312 } 313 if (!(ptep & PG_RW_MASK)) { 314 if (access_type == MMU_DATA_STORE) { 315 goto do_fault_protect; 316 } 317 *prot &= ~PAGE_WRITE; 318 } 319 320 pte &= PG_ADDRESS_MASK & ~(page_size - 1); 321 page_offset = gphys & (page_size - 1); 322 return pte + page_offset; 323 324 do_fault_rsvd: 325 exit_info_1 |= SVM_NPTEXIT_RSVD; 326 do_fault_protect: 327 exit_info_1 |= SVM_NPTEXIT_P; 328 do_fault: 329 x86_stq_phys(cs, env->vm_vmcb + offsetof(struct vmcb, control.exit_info_2), 330 gphys); 331 exit_info_1 |= SVM_NPTEXIT_US; 332 if (access_type == MMU_DATA_STORE) { 333 exit_info_1 |= SVM_NPTEXIT_RW; 334 } else if (access_type == MMU_INST_FETCH) { 335 exit_info_1 |= SVM_NPTEXIT_ID; 336 } 337 if (prot) { 338 exit_info_1 |= SVM_NPTEXIT_GPA; 339 } else { /* page table access */ 340 exit_info_1 |= SVM_NPTEXIT_GPT; 341 } 342 cpu_vmexit(env, SVM_EXIT_NPF, exit_info_1, env->retaddr); 343 } 344 345 /* return value: 346 * -1 = cannot handle fault 347 * 0 = nothing more to do 348 * 1 = generate PF fault 349 */ 350 static int handle_mmu_fault(CPUState *cs, vaddr addr, int size, 351 int is_write1, int mmu_idx) 352 { 353 X86CPU *cpu = X86_CPU(cs); 354 CPUX86State *env = &cpu->env; 355 uint64_t ptep, pte; 356 int32_t a20_mask; 357 target_ulong pde_addr, pte_addr; 358 int error_code = 0; 359 int is_dirty, prot, page_size, is_write, is_user; 360 hwaddr paddr; 361 uint64_t rsvd_mask = PG_HI_RSVD_MASK; 362 uint32_t page_offset; 363 target_ulong vaddr; 364 365 is_user = mmu_idx == MMU_USER_IDX; 366 #if defined(DEBUG_MMU) 367 printf("MMU fault: addr=%" VADDR_PRIx " w=%d u=%d eip=" TARGET_FMT_lx "\n", 368 addr, is_write1, is_user, env->eip); 369 #endif 370 is_write = is_write1 & 1; 371 372 a20_mask = x86_get_a20_mask(env); 373 if (!(env->cr[0] & CR0_PG_MASK)) { 374 pte = addr; 375 #ifdef TARGET_X86_64 376 if (!(env->hflags & HF_LMA_MASK)) { 377 /* Without long mode we can only address 32bits in real mode */ 378 pte = (uint32_t)pte; 379 } 380 #endif 381 prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC; 382 page_size = 4096; 383 goto do_mapping; 384 } 385 386 if (!(env->efer & MSR_EFER_NXE)) { 387 rsvd_mask |= PG_NX_MASK; 388 } 389 390 if (env->cr[4] & CR4_PAE_MASK) { 391 uint64_t pde, pdpe; 392 target_ulong pdpe_addr; 393 394 #ifdef TARGET_X86_64 395 if (env->hflags & HF_LMA_MASK) { 396 bool la57 = env->cr[4] & CR4_LA57_MASK; 397 uint64_t pml5e_addr, pml5e; 398 uint64_t pml4e_addr, pml4e; 399 int32_t sext; 400 401 /* test virtual address sign extension */ 402 sext = la57 ? (int64_t)addr >> 56 : (int64_t)addr >> 47; 403 if (sext != 0 && sext != -1) { 404 env->error_code = 0; 405 cs->exception_index = EXCP0D_GPF; 406 return 1; 407 } 408 409 if (la57) { 410 pml5e_addr = ((env->cr[3] & ~0xfff) + 411 (((addr >> 48) & 0x1ff) << 3)) & a20_mask; 412 pml5e_addr = get_hphys(cs, pml5e_addr, MMU_DATA_STORE, NULL); 413 pml5e = x86_ldq_phys(cs, pml5e_addr); 414 if (!(pml5e & PG_PRESENT_MASK)) { 415 goto do_fault; 416 } 417 if (pml5e & (rsvd_mask | PG_PSE_MASK)) { 418 goto do_fault_rsvd; 419 } 420 if (!(pml5e & PG_ACCESSED_MASK)) { 421 pml5e |= PG_ACCESSED_MASK; 422 x86_stl_phys_notdirty(cs, pml5e_addr, pml5e); 423 } 424 ptep = pml5e ^ PG_NX_MASK; 425 } else { 426 pml5e = env->cr[3]; 427 ptep = PG_NX_MASK | PG_USER_MASK | PG_RW_MASK; 428 } 429 430 pml4e_addr = ((pml5e & PG_ADDRESS_MASK) + 431 (((addr >> 39) & 0x1ff) << 3)) & a20_mask; 432 pml4e_addr = get_hphys(cs, pml4e_addr, MMU_DATA_STORE, false); 433 pml4e = x86_ldq_phys(cs, pml4e_addr); 434 if (!(pml4e & PG_PRESENT_MASK)) { 435 goto do_fault; 436 } 437 if (pml4e & (rsvd_mask | PG_PSE_MASK)) { 438 goto do_fault_rsvd; 439 } 440 if (!(pml4e & PG_ACCESSED_MASK)) { 441 pml4e |= PG_ACCESSED_MASK; 442 x86_stl_phys_notdirty(cs, pml4e_addr, pml4e); 443 } 444 ptep &= pml4e ^ PG_NX_MASK; 445 pdpe_addr = ((pml4e & PG_ADDRESS_MASK) + (((addr >> 30) & 0x1ff) << 3)) & 446 a20_mask; 447 pdpe_addr = get_hphys(cs, pdpe_addr, MMU_DATA_STORE, NULL); 448 pdpe = x86_ldq_phys(cs, pdpe_addr); 449 if (!(pdpe & PG_PRESENT_MASK)) { 450 goto do_fault; 451 } 452 if (pdpe & rsvd_mask) { 453 goto do_fault_rsvd; 454 } 455 ptep &= pdpe ^ PG_NX_MASK; 456 if (!(pdpe & PG_ACCESSED_MASK)) { 457 pdpe |= PG_ACCESSED_MASK; 458 x86_stl_phys_notdirty(cs, pdpe_addr, pdpe); 459 } 460 if (pdpe & PG_PSE_MASK) { 461 /* 1 GB page */ 462 page_size = 1024 * 1024 * 1024; 463 pte_addr = pdpe_addr; 464 pte = pdpe; 465 goto do_check_protect; 466 } 467 } else 468 #endif 469 { 470 /* XXX: load them when cr3 is loaded ? */ 471 pdpe_addr = ((env->cr[3] & ~0x1f) + ((addr >> 27) & 0x18)) & 472 a20_mask; 473 pdpe_addr = get_hphys(cs, pdpe_addr, MMU_DATA_STORE, false); 474 pdpe = x86_ldq_phys(cs, pdpe_addr); 475 if (!(pdpe & PG_PRESENT_MASK)) { 476 goto do_fault; 477 } 478 rsvd_mask |= PG_HI_USER_MASK; 479 if (pdpe & (rsvd_mask | PG_NX_MASK)) { 480 goto do_fault_rsvd; 481 } 482 ptep = PG_NX_MASK | PG_USER_MASK | PG_RW_MASK; 483 } 484 485 pde_addr = ((pdpe & PG_ADDRESS_MASK) + (((addr >> 21) & 0x1ff) << 3)) & 486 a20_mask; 487 pde_addr = get_hphys(cs, pde_addr, MMU_DATA_STORE, NULL); 488 pde = x86_ldq_phys(cs, pde_addr); 489 if (!(pde & PG_PRESENT_MASK)) { 490 goto do_fault; 491 } 492 if (pde & rsvd_mask) { 493 goto do_fault_rsvd; 494 } 495 ptep &= pde ^ PG_NX_MASK; 496 if (pde & PG_PSE_MASK) { 497 /* 2 MB page */ 498 page_size = 2048 * 1024; 499 pte_addr = pde_addr; 500 pte = pde; 501 goto do_check_protect; 502 } 503 /* 4 KB page */ 504 if (!(pde & PG_ACCESSED_MASK)) { 505 pde |= PG_ACCESSED_MASK; 506 x86_stl_phys_notdirty(cs, pde_addr, pde); 507 } 508 pte_addr = ((pde & PG_ADDRESS_MASK) + (((addr >> 12) & 0x1ff) << 3)) & 509 a20_mask; 510 pte_addr = get_hphys(cs, pte_addr, MMU_DATA_STORE, NULL); 511 pte = x86_ldq_phys(cs, pte_addr); 512 if (!(pte & PG_PRESENT_MASK)) { 513 goto do_fault; 514 } 515 if (pte & rsvd_mask) { 516 goto do_fault_rsvd; 517 } 518 /* combine pde and pte nx, user and rw protections */ 519 ptep &= pte ^ PG_NX_MASK; 520 page_size = 4096; 521 } else { 522 uint32_t pde; 523 524 /* page directory entry */ 525 pde_addr = ((env->cr[3] & ~0xfff) + ((addr >> 20) & 0xffc)) & 526 a20_mask; 527 pde_addr = get_hphys(cs, pde_addr, MMU_DATA_STORE, NULL); 528 pde = x86_ldl_phys(cs, pde_addr); 529 if (!(pde & PG_PRESENT_MASK)) { 530 goto do_fault; 531 } 532 ptep = pde | PG_NX_MASK; 533 534 /* if PSE bit is set, then we use a 4MB page */ 535 if ((pde & PG_PSE_MASK) && (env->cr[4] & CR4_PSE_MASK)) { 536 page_size = 4096 * 1024; 537 pte_addr = pde_addr; 538 539 /* Bits 20-13 provide bits 39-32 of the address, bit 21 is reserved. 540 * Leave bits 20-13 in place for setting accessed/dirty bits below. 541 */ 542 pte = pde | ((pde & 0x1fe000LL) << (32 - 13)); 543 rsvd_mask = 0x200000; 544 goto do_check_protect_pse36; 545 } 546 547 if (!(pde & PG_ACCESSED_MASK)) { 548 pde |= PG_ACCESSED_MASK; 549 x86_stl_phys_notdirty(cs, pde_addr, pde); 550 } 551 552 /* page directory entry */ 553 pte_addr = ((pde & ~0xfff) + ((addr >> 10) & 0xffc)) & 554 a20_mask; 555 pte_addr = get_hphys(cs, pte_addr, MMU_DATA_STORE, NULL); 556 pte = x86_ldl_phys(cs, pte_addr); 557 if (!(pte & PG_PRESENT_MASK)) { 558 goto do_fault; 559 } 560 /* combine pde and pte user and rw protections */ 561 ptep &= pte | PG_NX_MASK; 562 page_size = 4096; 563 rsvd_mask = 0; 564 } 565 566 do_check_protect: 567 rsvd_mask |= (page_size - 1) & PG_ADDRESS_MASK & ~PG_PSE_PAT_MASK; 568 do_check_protect_pse36: 569 if (pte & rsvd_mask) { 570 goto do_fault_rsvd; 571 } 572 ptep ^= PG_NX_MASK; 573 574 /* can the page can be put in the TLB? prot will tell us */ 575 if (is_user && !(ptep & PG_USER_MASK)) { 576 goto do_fault_protect; 577 } 578 579 prot = 0; 580 if (mmu_idx != MMU_KSMAP_IDX || !(ptep & PG_USER_MASK)) { 581 prot |= PAGE_READ; 582 if ((ptep & PG_RW_MASK) || (!is_user && !(env->cr[0] & CR0_WP_MASK))) { 583 prot |= PAGE_WRITE; 584 } 585 } 586 if (!(ptep & PG_NX_MASK) && 587 (mmu_idx == MMU_USER_IDX || 588 !((env->cr[4] & CR4_SMEP_MASK) && (ptep & PG_USER_MASK)))) { 589 prot |= PAGE_EXEC; 590 } 591 if ((env->cr[4] & CR4_PKE_MASK) && (env->hflags & HF_LMA_MASK) && 592 (ptep & PG_USER_MASK) && env->pkru) { 593 uint32_t pk = (pte & PG_PKRU_MASK) >> PG_PKRU_BIT; 594 uint32_t pkru_ad = (env->pkru >> pk * 2) & 1; 595 uint32_t pkru_wd = (env->pkru >> pk * 2) & 2; 596 uint32_t pkru_prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC; 597 598 if (pkru_ad) { 599 pkru_prot &= ~(PAGE_READ | PAGE_WRITE); 600 } else if (pkru_wd && (is_user || env->cr[0] & CR0_WP_MASK)) { 601 pkru_prot &= ~PAGE_WRITE; 602 } 603 604 prot &= pkru_prot; 605 if ((pkru_prot & (1 << is_write1)) == 0) { 606 assert(is_write1 != 2); 607 error_code |= PG_ERROR_PK_MASK; 608 goto do_fault_protect; 609 } 610 } 611 612 if ((prot & (1 << is_write1)) == 0) { 613 goto do_fault_protect; 614 } 615 616 /* yes, it can! */ 617 is_dirty = is_write && !(pte & PG_DIRTY_MASK); 618 if (!(pte & PG_ACCESSED_MASK) || is_dirty) { 619 pte |= PG_ACCESSED_MASK; 620 if (is_dirty) { 621 pte |= PG_DIRTY_MASK; 622 } 623 x86_stl_phys_notdirty(cs, pte_addr, pte); 624 } 625 626 if (!(pte & PG_DIRTY_MASK)) { 627 /* only set write access if already dirty... otherwise wait 628 for dirty access */ 629 assert(!is_write); 630 prot &= ~PAGE_WRITE; 631 } 632 633 do_mapping: 634 pte = pte & a20_mask; 635 636 /* align to page_size */ 637 pte &= PG_ADDRESS_MASK & ~(page_size - 1); 638 page_offset = addr & (page_size - 1); 639 paddr = get_hphys(cs, pte + page_offset, is_write1, &prot); 640 641 /* Even if 4MB pages, we map only one 4KB page in the cache to 642 avoid filling it too fast */ 643 vaddr = addr & TARGET_PAGE_MASK; 644 paddr &= TARGET_PAGE_MASK; 645 646 assert(prot & (1 << is_write1)); 647 tlb_set_page_with_attrs(cs, vaddr, paddr, cpu_get_mem_attrs(env), 648 prot, mmu_idx, page_size); 649 return 0; 650 do_fault_rsvd: 651 error_code |= PG_ERROR_RSVD_MASK; 652 do_fault_protect: 653 error_code |= PG_ERROR_P_MASK; 654 do_fault: 655 error_code |= (is_write << PG_ERROR_W_BIT); 656 if (is_user) 657 error_code |= PG_ERROR_U_MASK; 658 if (is_write1 == 2 && 659 (((env->efer & MSR_EFER_NXE) && 660 (env->cr[4] & CR4_PAE_MASK)) || 661 (env->cr[4] & CR4_SMEP_MASK))) 662 error_code |= PG_ERROR_I_D_MASK; 663 if (env->intercept_exceptions & (1 << EXCP0E_PAGE)) { 664 /* cr2 is not modified in case of exceptions */ 665 x86_stq_phys(cs, 666 env->vm_vmcb + offsetof(struct vmcb, control.exit_info_2), 667 addr); 668 } else { 669 env->cr[2] = addr; 670 } 671 env->error_code = error_code; 672 cs->exception_index = EXCP0E_PAGE; 673 return 1; 674 } 675 #endif 676 677 bool x86_cpu_tlb_fill(CPUState *cs, vaddr addr, int size, 678 MMUAccessType access_type, int mmu_idx, 679 bool probe, uintptr_t retaddr) 680 { 681 X86CPU *cpu = X86_CPU(cs); 682 CPUX86State *env = &cpu->env; 683 684 #ifdef CONFIG_USER_ONLY 685 /* user mode only emulation */ 686 env->cr[2] = addr; 687 env->error_code = (access_type == MMU_DATA_STORE) << PG_ERROR_W_BIT; 688 env->error_code |= PG_ERROR_U_MASK; 689 cs->exception_index = EXCP0E_PAGE; 690 env->exception_is_int = 0; 691 env->exception_next_eip = -1; 692 cpu_loop_exit_restore(cs, retaddr); 693 #else 694 env->retaddr = retaddr; 695 if (handle_mmu_fault(cs, addr, size, access_type, mmu_idx)) { 696 /* FIXME: On error in get_hphys we have already jumped out. */ 697 g_assert(!probe); 698 raise_exception_err_ra(env, cs->exception_index, 699 env->error_code, retaddr); 700 } 701 return true; 702 #endif 703 } 704